ArmNN
 20.11
Serializer.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Serializer.hpp"
6 
7 #include <armnn/Descriptors.hpp>
8 #include <armnn/LstmParams.hpp>
12 
13 #include <iostream>
14 
15 #include <flatbuffers/util.h>
16 
17 #include "SerializerUtils.hpp"
18 
19 using namespace armnn;
20 namespace fb = flatbuffers;
21 namespace serializer = armnnSerializer;
22 
23 namespace armnnSerializer
24 {
25 
27 {
28  switch (function)
29  {
31  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
33  return serializer::ActivationFunction::ActivationFunction_TanH;
35  return serializer::ActivationFunction::ActivationFunction_Linear;
37  return serializer::ActivationFunction::ActivationFunction_ReLu;
39  return serializer::ActivationFunction::ActivationFunction_BoundedReLu;
41  return serializer::ActivationFunction::ActivationFunction_LeakyReLu;
43  return serializer::ActivationFunction::ActivationFunction_Abs;
45  return serializer::ActivationFunction::ActivationFunction_Sqrt;
47  return serializer::ActivationFunction::ActivationFunction_Square;
49  return serializer::ActivationFunction::ActivationFunction_Elu;
51  return serializer::ActivationFunction::ActivationFunction_HardSwish;
52  default:
53  return serializer::ActivationFunction::ActivationFunction_Sigmoid;
54  }
55 }
56 
58 {
59  switch (function)
60  {
62  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Max;
64  default:
65  return serializer::ArgMinMaxFunction::ArgMinMaxFunction_Min;
66  }
67 }
68 
69 uint32_t SerializerVisitor::GetSerializedId(armnn::LayerGuid guid)
70 {
71  if (m_guidMap.empty())
72  {
73  m_guidMap.insert(std::make_pair(guid, m_layerId));
74  }
75  else if (m_guidMap.find(guid) == m_guidMap.end())
76  {
77  ++m_layerId;
78  m_guidMap.insert(std::make_pair(guid, m_layerId));
79 
80  return m_layerId;
81  }
82  return m_guidMap[guid];
83 }
84 
85 // Build FlatBuffer for Input Layer
86 void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
87 {
88  IgnoreUnused(name);
89 
90  // Create FlatBuffer BaseLayer
91  auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
92 
93  // Create FlatBuffer BindableBaseLayer
94  auto flatBufferInputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
95  flatBufferInputBaseLayer,
96  id);
97  // Push layer binding id to outputIds.
98  m_inputIds.push_back(id);
99 
100  // Create the FlatBuffer InputLayer
101  auto flatBufferInputLayer = serializer::CreateInputLayer(m_flatBufferBuilder, flatBufferInputBindableBaseLayer);
102 
103  // Add the AnyLayer to the FlatBufferLayers
104  CreateAnyLayer(flatBufferInputLayer.o, serializer::Layer::Layer_InputLayer);
105 }
106 
107 // Build FlatBuffer for Output Layer
108 void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
109 {
110  IgnoreUnused(name);
111 
112  // Create FlatBuffer BaseLayer
113  auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
114 
115  // Create FlatBuffer BindableBaseLayer
116  auto flatBufferOutputBindableBaseLayer = serializer::CreateBindableLayerBase(m_flatBufferBuilder,
117  flatBufferOutputBaseLayer,
118  id);
119  // Push layer binding id to outputIds.
120  m_outputIds.push_back(id);
121 
122  // Create the FlatBuffer OutputLayer
123  auto flatBufferOutputLayer = serializer::CreateOutputLayer(m_flatBufferBuilder, flatBufferOutputBindableBaseLayer);
124  // Add the AnyLayer to the FlatBufferLayers
125  CreateAnyLayer(flatBufferOutputLayer.o, serializer::Layer::Layer_OutputLayer);
126 }
127 
128 void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
129 {
130  IgnoreUnused(name);
131  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
132  auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
133 
134  CreateAnyLayer(flatBufferAbsLayer.o, serializer::Layer::Layer_AbsLayer);
135 }
136 
137 // Build FlatBuffer for Activation Layer
138 void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* layer,
139  const armnn::ActivationDescriptor& descriptor,
140  const char* name)
141 {
142  IgnoreUnused(name);
143 
144  // Create FlatBuffer BaseLayer
145  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
146 
147  // Create the FlatBuffer ActivationDescriptor
148  auto flatBufferDescriptor = CreateActivationDescriptor(m_flatBufferBuilder,
150  descriptor.m_A,
151  descriptor.m_B);
152 
153  // Create the FlatBuffer ActivationLayer
154  auto flatBufferAdditionLayer = CreateActivationLayer(m_flatBufferBuilder,
155  flatBufferBaseLayer,
156  flatBufferDescriptor);
157 
158  // Add the AnyLayer to the FlatBufferLayers
159  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_ActivationLayer);
160 }
161 
162 // Build FlatBuffer for Addition Layer
163 void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
164 {
165  IgnoreUnused(name);
166 
167  // Create FlatBuffer BaseLayer
168  auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
169 
170  // Create the FlatBuffer AdditionLayer
171  auto flatBufferAdditionLayer = serializer::CreateAdditionLayer(m_flatBufferBuilder, flatBufferAdditionBaseLayer);
172 
173  // Add the AnyLayer to the FlatBufferLayers
174  CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
175 }
176 
177 // Build FlatBuffer for ArgMinMax Layer
178 void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *layer,
179  const armnn::ArgMinMaxDescriptor& descriptor,
180  const char *name)
181 {
182  IgnoreUnused(name);
183 
184  // Create FlatBuffer BaseLayer
185  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
186 
187  // Create FlatBuffer Descriptor
188  auto flatBufferDescriptor = CreateArgMinMaxDescriptor(m_flatBufferBuilder,
190  descriptor.m_Axis);
191 
192  // Create FlatBuffer ArgMinMaxLayer
193  auto flatBufferLayer = CreateArgMinMaxLayer(m_flatBufferBuilder,
194  flatBufferBaseLayer,
195  flatBufferDescriptor);
196 
197  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ArgMinMaxLayer);
198 }
199 
200 // Build FlatBuffer for BatchToSpaceNd Layer
201 void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
202  const armnn::BatchToSpaceNdDescriptor& descriptor,
203  const char* name)
204 {
205  IgnoreUnused(name);
206 
207  // Create FlatBuffer BaseLayer
208  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
209 
210  std::vector<unsigned int> crops;
211  crops.reserve(descriptor.m_Crops.size() * 2);
212  for (auto& crop : descriptor.m_Crops)
213  {
214  crops.push_back(crop.first);
215  crops.push_back(crop.second);
216  }
217 
218  auto flatBufferDescriptor =
219  CreateBatchToSpaceNdDescriptor(m_flatBufferBuilder,
220  m_flatBufferBuilder.CreateVector(descriptor.m_BlockShape),
221  m_flatBufferBuilder.CreateVector(crops),
223 
224  auto flatBufferLayer = serializer::CreateBatchToSpaceNdLayer(m_flatBufferBuilder,
225  flatBufferBaseLayer,
226  flatBufferDescriptor);
227 
228  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_BatchToSpaceNdLayer);
229 }
230 
231 void SerializerVisitor::VisitBatchNormalizationLayer(const armnn::IConnectableLayer* layer,
232  const armnn::BatchNormalizationDescriptor& batchNormDescriptor,
233  const armnn::ConstTensor& mean,
234  const armnn::ConstTensor& variance,
235  const armnn::ConstTensor& beta,
236  const armnn::ConstTensor& gamma,
237  const char* name)
238 {
239  IgnoreUnused(name);
240 
241  auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
242  auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
243  m_flatBufferBuilder,
244  batchNormDescriptor.m_Eps,
245  GetFlatBufferDataLayout(batchNormDescriptor.m_DataLayout));
246 
247  auto fbMeanConstTensorInfo = CreateConstTensorInfo(mean);
248  auto fbVarianceConstTensorInfo = CreateConstTensorInfo(variance);
249  auto fbBetaConstTensorInfo = CreateConstTensorInfo(beta);
250  auto fbGammaConstTensorInfo = CreateConstTensorInfo(gamma);
251  auto fbBatchNormalizationLayer = serializer::CreateBatchNormalizationLayer(m_flatBufferBuilder,
252  fbBatchNormalizationBaseLayer,
253  fbBatchNormalizationDescriptor,
254  fbMeanConstTensorInfo,
255  fbVarianceConstTensorInfo,
256  fbBetaConstTensorInfo,
257  fbGammaConstTensorInfo);
258 
259  CreateAnyLayer(fbBatchNormalizationLayer.o, serializer::Layer::Layer_BatchNormalizationLayer);
260 }
261 
262 void SerializerVisitor::VisitComparisonLayer(const armnn::IConnectableLayer* layer,
263  const armnn::ComparisonDescriptor& descriptor,
264  const char* name)
265 {
266  IgnoreUnused(name);
267 
268  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
269  auto fbDescriptor = serializer::CreateComparisonDescriptor(
270  m_flatBufferBuilder,
272 
273  auto fbLayer = serializer::CreateComparisonLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
274  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ComparisonLayer);
275 }
276 
277 // Build FlatBuffer for Constant Layer
278 void SerializerVisitor::VisitConstantLayer(const armnn::IConnectableLayer* layer,
279  const armnn::ConstTensor& input,
280  const char* name)
281 {
282  IgnoreUnused(name);
283 
284  // Create FlatBuffer BaseLayer
285  auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
286 
287  auto flatBufferConstTensorInfo = CreateConstTensorInfo(input);
288 
289  // Create the FlatBuffer ConstantLayer
290  auto flatBufferLayer = CreateConstantLayer(m_flatBufferBuilder,
291  flatBufferConstantBaseLayer,
292  flatBufferConstTensorInfo);
293 
294  // Add the AnyLayer to the FlatBufferLayers
295  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConstantLayer);
296 }
297 
298 // Build FlatBuffer for Convolution2dLayer
299 void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
300  const armnn::Convolution2dDescriptor& descriptor,
301  const armnn::ConstTensor& weights,
303  const char* name)
304 {
305  IgnoreUnused(name);
306 
307  // Create FlatBuffer BaseLayer
308  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
309 
310  auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
311  descriptor.m_PadLeft,
312  descriptor.m_PadRight,
313  descriptor.m_PadTop,
314  descriptor.m_PadBottom,
315  descriptor.m_StrideX,
316  descriptor.m_StrideY,
317  descriptor.m_DilationX,
318  descriptor.m_DilationY,
319  descriptor.m_BiasEnabled,
321  auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
322  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
323 
324  if (biases.has_value())
325  {
326  flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
327  }
328 
329  // Create the FlatBuffer Convolution2dLayer
330  auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
331  flatBufferBaseLayer,
332  flatBufferDescriptor,
333  flatBufferWeightsConstTensorInfo,
334  flatBufferBiasesConstTensorInfo);
335 
336  // Add the AnyLayer to the FlatBufferLayers
337  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
338 }
339 
340 void SerializerVisitor::VisitDepthToSpaceLayer(const armnn::IConnectableLayer* layer,
341  const armnn::DepthToSpaceDescriptor& descriptor,
342  const char* name)
343 {
344  IgnoreUnused(name);
345 
346  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
347  auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
348  descriptor.m_BlockSize,
350 
351  auto fbLayer = serializer::CreateDepthToSpaceLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
352 
353  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_DepthToSpaceLayer);
354 }
355 
356 void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectableLayer* layer,
357  const armnn::DepthwiseConvolution2dDescriptor& descriptor,
358  const armnn::ConstTensor& weights,
360  const char* name)
361 {
362  IgnoreUnused(name);
363 
364  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
365  auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
366  descriptor.m_PadLeft,
367  descriptor.m_PadRight,
368  descriptor.m_PadTop,
369  descriptor.m_PadBottom,
370  descriptor.m_StrideX,
371  descriptor.m_StrideY,
372  descriptor.m_DilationX,
373  descriptor.m_DilationY,
374  descriptor.m_BiasEnabled,
376 
377  flatbuffers::Offset<serializer::ConstTensor> fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
378  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
379  if (biases.has_value())
380  {
381  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
382  }
383 
384  auto flatBufferLayer = CreateDepthwiseConvolution2dLayer(m_flatBufferBuilder,
385  fbBaseLayer,
386  fbDescriptor,
387  fbWeightsConstTensorInfo,
388  fbBiasesConstTensorInfo);
389 
390  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DepthwiseConvolution2dLayer);
391 }
392 
393 void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
394  const char* name)
395 {
396  IgnoreUnused(name);
397 
398  auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
399  auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
400 
401  CreateAnyLayer(fbDequantizeLayer.o, serializer::Layer::Layer_DequantizeLayer);
402 }
403 
404 void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectableLayer* layer,
405  const armnn::DetectionPostProcessDescriptor& descriptor,
407  const char* name)
408 {
409  IgnoreUnused(name);
410 
411  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
412  auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
413  descriptor.m_MaxDetections,
414  descriptor.m_MaxClassesPerDetection,
415  descriptor.m_DetectionsPerClass,
416  descriptor.m_NmsScoreThreshold,
417  descriptor.m_NmsIouThreshold,
418  descriptor.m_NumClasses,
419  descriptor.m_UseRegularNms,
420  descriptor.m_ScaleX,
421  descriptor.m_ScaleY,
422  descriptor.m_ScaleW,
423  descriptor.m_ScaleH);
424 
425  flatbuffers::Offset<serializer::ConstTensor> fbAnchorsConstTensorInfo = CreateConstTensorInfo(anchors);
426 
427  auto flatBufferLayer = CreateDetectionPostProcessLayer(m_flatBufferBuilder,
428  fbBaseLayer,
429  fbDescriptor,
430  fbAnchorsConstTensorInfo);
431 
432  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_DetectionPostProcessLayer);
433 }
434 
435 void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
436 {
437  IgnoreUnused(name);
438 
439  auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
440  auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
441 
442  CreateAnyLayer(fbDivisionLayer.o, serializer::Layer::Layer_DivisionLayer);
443 }
444 
445 void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLayer* layer,
446  const armnn::ElementwiseUnaryDescriptor& descriptor,
447  const char* name)
448 {
449  IgnoreUnused(name);
450 
451  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
452  auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
453  m_flatBufferBuilder,
455 
456  auto fbLayer = serializer::CreateElementwiseUnaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
457  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_ElementwiseUnaryLayer);
458 }
459 
460 void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
461 {
462  IgnoreUnused(name);
463 
464  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
465  auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
466 
467  CreateAnyLayer(fbEqualLayer.o, serializer::Layer::Layer_EqualLayer);
468 }
469 
470 void SerializerVisitor::VisitFillLayer(const armnn::IConnectableLayer* layer,
471  const armnn::FillDescriptor& fillDescriptor,
472  const char* name)
473 {
474  IgnoreUnused(name);
475 
476  auto fbFillBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Fill);
477 
478  auto fbDescriptor = serializer::CreateFillDescriptor(m_flatBufferBuilder, fillDescriptor.m_Value);
479 
480  auto fbFillLayer = serializer::CreateFillLayer(m_flatBufferBuilder, fbFillBaseLayer, fbDescriptor);
481 
482  CreateAnyLayer(fbFillLayer.o, serializer::Layer::Layer_FillLayer);
483 }
484 
485 void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
486 {
487  IgnoreUnused(name);
488 
489  auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
490  auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
491 
492  CreateAnyLayer(flatBufferFloorLayer.o, serializer::Layer::Layer_FloorLayer);
493 }
494 
495 void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer,
496  const char* name)
497 {
498  armnn::GatherDescriptor gatherDescriptor{};
499  VisitGatherLayer(layer, gatherDescriptor, name);
500 }
501 
502 void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer,
503  const armnn::GatherDescriptor& gatherDescriptor,
504  const char* name)
505 {
506  IgnoreUnused(name);
507 
508  auto fbGatherDescriptor = CreateGatherDescriptor(m_flatBufferBuilder,
509  gatherDescriptor.m_Axis);
510  auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
511  auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer, fbGatherDescriptor);
512 
513  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_GatherLayer);
514 }
515 
516 void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
517 {
518  IgnoreUnused(name);
519 
520  auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
521  auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
522 
523  CreateAnyLayer(fbGreaterLayer.o, serializer::Layer::Layer_GreaterLayer);
524 }
525 
526 void SerializerVisitor::VisitInstanceNormalizationLayer(
527  const armnn::IConnectableLayer* layer,
528  const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
529  const char* name)
530 {
531  IgnoreUnused(name);
532 
533  auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
534  m_flatBufferBuilder,
535  instanceNormalizationDescriptor.m_Gamma,
536  instanceNormalizationDescriptor.m_Beta,
537  instanceNormalizationDescriptor.m_Eps,
538  GetFlatBufferDataLayout(instanceNormalizationDescriptor.m_DataLayout));
539 
540  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_InstanceNormalization);
541  auto fbLayer = serializer::CreateInstanceNormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
542 
543  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_InstanceNormalizationLayer);
544 }
545 
546 void SerializerVisitor::VisitL2NormalizationLayer(const armnn::IConnectableLayer* layer,
547  const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
548  const char* name)
549 {
550  IgnoreUnused(name);
551 
552  // Create FlatBuffer BaseLayer
553  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
554 
555  // Create the FlatBuffer L2Normalization Descriptor
556  auto fbDescriptor = serializer::CreateL2NormalizationDescriptor(
557  m_flatBufferBuilder,
558  GetFlatBufferDataLayout(l2NormalizationDescriptor.m_DataLayout),
559  l2NormalizationDescriptor.m_Eps);
560 
561  // Create FlatBuffer layer
562  auto fbLayer = serializer::CreateL2NormalizationLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
563 
564  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_L2NormalizationLayer);
565 }
566 
567 void SerializerVisitor::VisitLogicalBinaryLayer(const armnn::IConnectableLayer* layer,
568  const armnn::LogicalBinaryDescriptor& descriptor,
569  const char* name)
570 {
571  IgnoreUnused(name);
572 
573  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogicalBinary);
574  auto fbDescriptor = serializer::CreateLogicalBinaryDescriptor(
575  m_flatBufferBuilder,
577 
578  auto fbLayer = serializer::CreateLogicalBinaryLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
579  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_LogicalBinaryLayer);
580 }
581 
582 void SerializerVisitor::VisitLogSoftmaxLayer(const armnn::IConnectableLayer* layer,
583  const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
584  const char* name)
585 {
586  IgnoreUnused(name);
587 
588  // Create FlatBuffer BaseLayer
589  auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
590 
591  // Create the FlatBuffer LogSoftmaxDescriptor
592  auto flatBufferLogSoftmaxDesc =
593  serializer::CreateLogSoftmaxDescriptor(m_flatBufferBuilder,
594  logSoftmaxDescriptor.m_Beta,
595  logSoftmaxDescriptor.m_Axis);
596 
597  // Create the FlatBuffer LogSoftmaxLayer
598  auto flatBufferLogSoftmaxLayer =
599  serializer::CreateLogSoftmaxLayer(m_flatBufferBuilder,
600  flatBufferLogSoftmaxBaseLayer,
601  flatBufferLogSoftmaxDesc);
602 
603  CreateAnyLayer(flatBufferLogSoftmaxLayer.o, serializer::Layer::Layer_LogSoftmaxLayer);
604 }
605 
606 void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer,
607  const armnn::LstmDescriptor& descriptor,
608  const armnn::LstmInputParams& params,
609  const char* name)
610 {
611  IgnoreUnused(name);
612 
613  auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
614 
615  auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
616  m_flatBufferBuilder,
617  descriptor.m_ActivationFunc,
618  descriptor.m_ClippingThresCell,
619  descriptor.m_ClippingThresProj,
620  descriptor.m_CifgEnabled,
621  descriptor.m_PeepholeEnabled,
622  descriptor.m_ProjectionEnabled,
623  descriptor.m_LayerNormEnabled);
624 
625  // Get mandatory input parameters
626  auto inputToForgetWeights = CreateConstTensorInfo(*params.m_InputToForgetWeights);
627  auto inputToCellWeights = CreateConstTensorInfo(*params.m_InputToCellWeights);
628  auto inputToOutputWeights = CreateConstTensorInfo(*params.m_InputToOutputWeights);
629  auto recurrentToForgetWeights = CreateConstTensorInfo(*params.m_RecurrentToForgetWeights);
630  auto recurrentToCellWeights = CreateConstTensorInfo(*params.m_RecurrentToCellWeights);
631  auto recurrentToOutputWeights = CreateConstTensorInfo(*params.m_RecurrentToOutputWeights);
632  auto forgetGateBias = CreateConstTensorInfo(*params.m_ForgetGateBias);
633  auto cellBias = CreateConstTensorInfo(*params.m_CellBias);
634  auto outputGateBias = CreateConstTensorInfo(*params.m_OutputGateBias);
635 
636  //Define optional parameters, these will be set depending on configuration in Lstm descriptor
637  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
638  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
639  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
640  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
641  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
642  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
643  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
644  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
645  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
646  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
647  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
648  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
649 
650  if (!descriptor.m_CifgEnabled)
651  {
652  inputToInputWeights = CreateConstTensorInfo(*params.m_InputToInputWeights);
653  recurrentToInputWeights = CreateConstTensorInfo(*params.m_RecurrentToInputWeights);
654  cellToInputWeights = CreateConstTensorInfo(*params.m_CellToInputWeights);
655  inputGateBias = CreateConstTensorInfo(*params.m_InputGateBias);
656  }
657 
658  if (descriptor.m_ProjectionEnabled)
659  {
660  projectionWeights = CreateConstTensorInfo(*params.m_ProjectionWeights);
661  projectionBias = CreateConstTensorInfo(*params.m_ProjectionBias);
662  }
663 
664  if (descriptor.m_PeepholeEnabled)
665  {
666  cellToForgetWeights = CreateConstTensorInfo(*params.m_CellToForgetWeights);
667  cellToOutputWeights = CreateConstTensorInfo(*params.m_CellToOutputWeights);
668  }
669 
670  if (descriptor.m_LayerNormEnabled)
671  {
672  if (!descriptor.m_CifgEnabled)
673  {
674  inputLayerNormWeights = CreateConstTensorInfo((*params.m_InputLayerNormWeights));
675  }
676  forgetLayerNormWeights = CreateConstTensorInfo(*params.m_ForgetLayerNormWeights);
677  cellLayerNormWeights = CreateConstTensorInfo(*params.m_CellLayerNormWeights);
678  outputLayerNormWeights = CreateConstTensorInfo(*params.m_OutputLayerNormWeights);
679  }
680 
681  auto fbLstmParams = serializer::CreateLstmInputParams(
682  m_flatBufferBuilder,
683  inputToForgetWeights,
684  inputToCellWeights,
685  inputToOutputWeights,
686  recurrentToForgetWeights,
687  recurrentToCellWeights,
688  recurrentToOutputWeights,
689  forgetGateBias,
690  cellBias,
691  outputGateBias,
692  inputToInputWeights,
693  recurrentToInputWeights,
694  cellToInputWeights,
695  inputGateBias,
696  projectionWeights,
697  projectionBias,
698  cellToForgetWeights,
699  cellToOutputWeights,
700  inputLayerNormWeights,
701  forgetLayerNormWeights,
702  cellLayerNormWeights,
703  outputLayerNormWeights);
704 
705  auto fbLstmLayer = serializer::CreateLstmLayer(
706  m_flatBufferBuilder,
707  fbLstmBaseLayer,
708  fbLstmDescriptor,
709  fbLstmParams);
710 
711  CreateAnyLayer(fbLstmLayer.o, serializer::Layer::Layer_LstmLayer);
712 }
713 
714 void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
715 {
716  IgnoreUnused(name);
717 
718  auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
719  auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
720 
721  CreateAnyLayer(fbMaximumLayer.o, serializer::Layer::Layer_MaximumLayer);
722 }
723 
724 void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer,
725  const armnn::MeanDescriptor& descriptor,
726  const char* name)
727 {
728  IgnoreUnused(name);
729 
730  auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
731  auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
732  m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
733  descriptor.m_KeepDims);
734 
735  auto fbMeanLayer = serializer::CreateMeanLayer(m_flatBufferBuilder,
736  fbMeanBaseLayer,
737  fbMeanDescriptor);
738 
739  CreateAnyLayer(fbMeanLayer.o, serializer::Layer::Layer_MeanLayer);
740 }
741 
742 void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
743 {
744  IgnoreUnused(name);
745 
746  auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
747  auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
748 
749  CreateAnyLayer(fbMinimumLayer.o, serializer::Layer::Layer_MinimumLayer);
750 }
751 
752 void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
753 {
754  IgnoreUnused(name);
755 
756  auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
757  auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
758 
759  CreateAnyLayer(fbMergeLayer.o, serializer::Layer::Layer_MergeLayer);
760 }
761 
762 void SerializerVisitor::VisitMergerLayer(const armnn::IConnectableLayer* layer,
763  const armnn::MergerDescriptor& mergerDescriptor,
764  const char* name)
765 {
766  VisitConcatLayer(layer, mergerDescriptor, name);
767 }
768 
769 void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer,
770  const armnn::ConcatDescriptor& concatDescriptor,
771  const char* name)
772 {
773  IgnoreUnused(name);
774 
775  auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
776 
777  std::vector<flatbuffers::Offset<UintVector>> views;
778  for (unsigned int v = 0; v < concatDescriptor.GetNumViews(); ++v)
779  {
780  const uint32_t* origin = concatDescriptor.GetViewOrigin(v);
781  std::vector<uint32_t> origins;
782  for (unsigned int d = 0; d < concatDescriptor.GetNumDimensions(); ++d)
783  {
784  origins.push_back(origin[d]);
785  }
786  auto view = m_flatBufferBuilder.CreateVector(origins);
787  auto uintVector = CreateUintVector(m_flatBufferBuilder, view);
788  views.push_back(uintVector);
789  }
790 
791  auto flatBufferConcatDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
792  concatDescriptor.GetConcatAxis(),
793  concatDescriptor.GetNumViews(),
794  concatDescriptor.GetNumDimensions(),
795  m_flatBufferBuilder.CreateVector(views));
796 
797  auto flatBufferLayer = CreateConcatLayer(m_flatBufferBuilder,
798  flatBufferConcatBaseLayer,
799  flatBufferConcatDescriptor);
800 
801  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ConcatLayer);
802 }
803 
804 void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
805 {
806  IgnoreUnused(name);
807 
808  auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
809  auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
810  fbMultiplicationBaseLayer);
811 
812  CreateAnyLayer(fbMultiplicationLayer.o, serializer::Layer::Layer_MultiplicationLayer);
813 }
814 
815 void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer,
816  const armnn::PadDescriptor& padDescriptor,
817  const char* name)
818 {
819  IgnoreUnused(name);
820 
821  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
822 
823  std::vector<unsigned int> padList;
824  for (auto& p: padDescriptor.m_PadList)
825  {
826  padList.push_back(p.first);
827  padList.push_back(p.second);
828  }
829 
830  auto flatBufferPadDesc = serializer::CreatePadDescriptor(m_flatBufferBuilder,
831  m_flatBufferBuilder.CreateVector(padList),
832  padDescriptor.m_PadValue);
833 
834  auto flatBufferPadLayer = serializer::CreatePadLayer(m_flatBufferBuilder,
835  flatBufferBaseLayer,
836  flatBufferPadDesc);
837 
838  CreateAnyLayer(flatBufferPadLayer.o, serializer::Layer::Layer_PadLayer);
839 }
840 
841 void SerializerVisitor::VisitPermuteLayer(const armnn::IConnectableLayer* layer,
842  const armnn::PermuteDescriptor& permuteDescriptor,
843  const char* name)
844 {
845  IgnoreUnused(name);
846 
847  // Create FlatBuffer BaseLayer
848  auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
849 
850  std::vector<unsigned int> dimMappings;
851  for (unsigned int i=0; i<permuteDescriptor.m_DimMappings.GetSize(); ++i)
852  {
853  dimMappings.push_back(permuteDescriptor.m_DimMappings[i]);
854  }
855 
856  auto flatBufferPermuteDesc = serializer::CreatePermuteDescriptor(m_flatBufferBuilder,
857  m_flatBufferBuilder.CreateVector(dimMappings));
858 
859  // Create the FlatBuffer PermuteLayer
860  auto flatBufferPermuteLayer = serializer::CreatePermuteLayer(m_flatBufferBuilder,
861  flatBufferPermuteBaseLayer,
862  flatBufferPermuteDesc);
863 
864  // Add the AnyLayer to the FlatBufferLayers
865  CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
866 }
867 
868 // Build FlatBuffer for Rank Layer
869 void SerializerVisitor::VisitRankLayer(const armnn::IConnectableLayer* layer,
870  const char* name)
871 {
872  IgnoreUnused(name);
873  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
874  auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
875 
876  CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
877 }
878 // Build FlatBuffer for Reshape Layer
879 void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer,
880  const armnn::ReshapeDescriptor& reshapeDescriptor,
881  const char* name)
882 {
883  IgnoreUnused(name);
884 
885  // Create FlatBuffer BaseLayer
886  auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
887 
888  std::vector<unsigned int> targetShape;
889  for (unsigned int i =0; i < reshapeDescriptor.m_TargetShape.GetNumDimensions(); i++)
890  {
891  targetShape.push_back(reshapeDescriptor.m_TargetShape[i]);
892  }
893 
894  auto flatBufferReshapeDesc = serializer::CreateReshapeDescriptor(m_flatBufferBuilder,
895  m_flatBufferBuilder.CreateVector(targetShape));
896 
897  // Create the FlatBuffer ReshapeLayer
898  auto flatBufferReshapeLayer = serializer::CreateReshapeLayer(m_flatBufferBuilder, flatBufferReshapeBaseLayer,
899  flatBufferReshapeDesc);
900 
901  // Add the AnyLayer to the FlatBufferLayers
902  CreateAnyLayer(flatBufferReshapeLayer.o, serializer::Layer::Layer_ReshapeLayer);
903 }
904 
905 void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer* layer,
906  const armnn::ResizeBilinearDescriptor& resizeDescriptor,
907  const char* name)
908 {
909  IgnoreUnused(name);
910 
911  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
912 
913  auto flatBufferDescriptor =
914  CreateResizeBilinearDescriptor(m_flatBufferBuilder,
915  resizeDescriptor.m_TargetWidth,
916  resizeDescriptor.m_TargetHeight,
917  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
918  resizeDescriptor.m_AlignCorners,
919  resizeDescriptor.m_HalfPixelCenters);
920 
921  auto flatBufferLayer = serializer::CreateResizeBilinearLayer(m_flatBufferBuilder,
922  flatBufferBaseLayer,
923  flatBufferDescriptor);
924 
925  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeBilinearLayer);
926 }
927 
928 void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
929  const armnn::ResizeDescriptor& resizeDescriptor,
930  const char* name)
931 {
932  IgnoreUnused(name);
933 
934  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
935 
936  auto flatBufferDescriptor =
937  CreateResizeDescriptor(m_flatBufferBuilder,
938  resizeDescriptor.m_TargetHeight,
939  resizeDescriptor.m_TargetWidth,
940  GetFlatBufferResizeMethod(resizeDescriptor.m_Method),
941  GetFlatBufferDataLayout(resizeDescriptor.m_DataLayout),
942  resizeDescriptor.m_AlignCorners,
943  resizeDescriptor.m_HalfPixelCenters);
944 
945  auto flatBufferLayer = serializer::CreateResizeLayer(m_flatBufferBuilder,
946  flatBufferBaseLayer,
947  flatBufferDescriptor);
948 
949  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_ResizeLayer);
950 }
951 
952 void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
953 {
954  IgnoreUnused(name);
955 
956  auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
957  auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
958 
959  CreateAnyLayer(fbRsqrtLayer.o, serializer::Layer::Layer_RsqrtLayer);
960 }
961 
962 void SerializerVisitor::VisitSliceLayer(const armnn::IConnectableLayer* layer,
963  const armnn::SliceDescriptor& sliceDescriptor,
964  const char* name)
965 {
966  IgnoreUnused(name);
967 
968  auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
969  auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
970  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
971  m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Size));
972 
973  auto fbSliceLayer = serializer::CreateSliceLayer(m_flatBufferBuilder, fbSliceBaseLayer, fbSliceDescriptor);
974 
975  CreateAnyLayer(fbSliceLayer.o, serializer::Layer::Layer_SliceLayer);
976 }
977 
978 // Build FlatBuffer for Softmax Layer
979 void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
980  const armnn::SoftmaxDescriptor& softmaxDescriptor,
981  const char* name)
982 {
983  IgnoreUnused(name);
984 
985  // Create FlatBuffer BaseLayer
986  auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
987 
988  // Create the FlatBuffer SoftmaxDescriptor
989  auto flatBufferSoftmaxDesc =
990  serializer::CreateSoftmaxDescriptor(m_flatBufferBuilder, softmaxDescriptor.m_Beta);
991 
992  // Create the FlatBuffer SoftmaxLayer
993  auto flatBufferSoftmaxLayer =
994  serializer::CreateSoftmaxLayer(m_flatBufferBuilder,
995  flatBufferSoftmaxBaseLayer,
996  flatBufferSoftmaxDesc);
997 
998  CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
999 }
1000 
1001 void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* layer,
1002  const armnn::Pooling2dDescriptor& pooling2dDescriptor,
1003  const char* name)
1004 {
1005  IgnoreUnused(name);
1006 
1007  auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
1008  auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
1009  m_flatBufferBuilder,
1010  GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
1011  pooling2dDescriptor.m_PadLeft,
1012  pooling2dDescriptor.m_PadRight,
1013  pooling2dDescriptor.m_PadTop,
1014  pooling2dDescriptor.m_PadBottom,
1015  pooling2dDescriptor.m_PoolWidth,
1016  pooling2dDescriptor.m_PoolHeight,
1017  pooling2dDescriptor.m_StrideX,
1018  pooling2dDescriptor.m_StrideY,
1020  GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
1021  GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
1022 
1023  auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
1024  fbPooling2dBaseLayer,
1025  fbPooling2dDescriptor);
1026 
1027  CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
1028 }
1029 
1030 void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
1031  const char* name)
1032 {
1033  IgnoreUnused(name);
1034 
1035  // Create FlatBuffer BaseLayer
1036  auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
1037 
1038  // Create the FlatBuffer AdditionLayer
1039  auto flatBufferPreluLayer = serializer::CreatePreluLayer(m_flatBufferBuilder, flatBufferPreluBaseLayer);
1040 
1041  // Add the AnyLayer to the FlatBufferLayers
1042  CreateAnyLayer(flatBufferPreluLayer.o, serializer::Layer::Layer_PreluLayer);
1043 }
1044 
1045 void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
1046 {
1047  IgnoreUnused(name);
1048 
1049  auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
1050  auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
1051  fbQuantizeBaseLayer);
1052  CreateAnyLayer(fbQuantizeLayer.o, serializer::Layer::Layer_QuantizeLayer);
1053 }
1054 
1055 // Build FlatBuffer for FullyConnected Layer
1056 void SerializerVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer* layer,
1057  const armnn::FullyConnectedDescriptor& fullyConnectedDescriptor,
1058  const armnn::ConstTensor& weights,
1060  const char* name)
1061 {
1062  IgnoreUnused(name);
1063 
1064  // Create FlatBuffer BaseLayer
1065  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
1066 
1067  // Create FlatBuffer FullyConnectedDescriptor
1068  auto flatBufferDescriptor =
1069  serializer::CreateFullyConnectedDescriptor(m_flatBufferBuilder,
1070  fullyConnectedDescriptor.m_BiasEnabled,
1071  fullyConnectedDescriptor.m_TransposeWeightMatrix);
1072 
1073  // Create FlatBuffer weights data
1074  auto flatBufferWeights = CreateConstTensorInfo(weights);
1075 
1076  // Create FlatBuffer bias data
1077  flatbuffers::Offset<serializer::ConstTensor> flatBufferBiases;
1078  if (fullyConnectedDescriptor.m_BiasEnabled)
1079  {
1080  flatBufferBiases = CreateConstTensorInfo(biases.value());
1081  }
1082 
1083  // Create FlatBuffer FullyConnectedLayer
1084  auto flatBufferLayer = serializer::CreateFullyConnectedLayer(m_flatBufferBuilder,
1085  flatBufferBaseLayer,
1086  flatBufferDescriptor,
1087  flatBufferWeights,
1088  flatBufferBiases);
1089 
1090  // Add created FullyConnectedLayer to the FlatBufferLayers
1091  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_FullyConnectedLayer);
1092 }
1093 
1094 // Build FlatBuffer for SpaceToBatchNd Layer
1095 void SerializerVisitor::VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer* layer,
1096  const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
1097  const char* name)
1098 {
1099  IgnoreUnused(name);
1100 
1101  // Create FlatBuffer BaseLayer
1102  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
1103 
1104  std::vector<unsigned int> padList;
1105  padList.reserve(spaceToBatchNdDescriptor.m_PadList.size()*2);
1106  for (auto& pad : spaceToBatchNdDescriptor.m_PadList)
1107  {
1108  padList.push_back(pad.first);
1109  padList.push_back(pad.second);
1110  }
1111 
1112  auto flatBufferDescriptor =
1113  CreateSpaceToBatchNdDescriptor(m_flatBufferBuilder,
1114  m_flatBufferBuilder.CreateVector(spaceToBatchNdDescriptor.m_BlockShape),
1115  m_flatBufferBuilder.CreateVector(padList),
1116  GetFlatBufferDataLayout(spaceToBatchNdDescriptor.m_DataLayout));
1117 
1118  auto flatBufferLayer = serializer::CreateSpaceToBatchNdLayer(m_flatBufferBuilder,
1119  flatBufferBaseLayer,
1120  flatBufferDescriptor);
1121 
1122  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToBatchNdLayer);
1123 }
1124 
1125 // Build FlatBuffer for SpaceToDepthLayer
1126 void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* layer,
1127  const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
1128  const char* name)
1129 {
1130  IgnoreUnused(name);
1131 
1132  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
1133  auto flatBufferDescriptor =
1134  CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
1135  spaceToDepthDescriptor.m_BlockSize,
1136  GetFlatBufferDataLayout(spaceToDepthDescriptor.m_DataLayout));
1137 
1138  auto flatBufferLayer = serializer::CreateSpaceToDepthLayer(m_flatBufferBuilder,
1139  flatBufferBaseLayer,
1140  flatBufferDescriptor);
1141 
1142  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_SpaceToDepthLayer);
1143 }
1144 
1145 // Build FlatBuffer for Splitter Layer
1146 void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer,
1147  const armnn::ViewsDescriptor& viewsDescriptor,
1148  const char* name)
1149 {
1150  IgnoreUnused(name);
1151 
1152  // Create FlatBuffer ViewOrigins
1153  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
1154  flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
1155 
1156  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1157  {
1158  std::vector<uint32_t> viewOrigin;
1159  viewOrigin.reserve(viewsDescriptor.GetNumDimensions());
1160 
1161  // Copy vector
1162  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1163  {
1164  viewOrigin.push_back(viewsDescriptor.GetViewOrigin(vIdx)[dIdx]);
1165  }
1166 
1167  flatBufferViewOrigins.push_back(CreateUintVector(m_flatBufferBuilder,
1168  m_flatBufferBuilder.CreateVector(viewOrigin)));
1169  }
1170 
1171  // Create FlatBuffer OriginsDescriptor
1172  auto flatBufferOriginDescriptor = CreateOriginsDescriptor(m_flatBufferBuilder,
1173  viewsDescriptor.GetOrigins().GetConcatAxis(),
1174  viewsDescriptor.GetOrigins().GetNumViews(),
1175  viewsDescriptor.GetOrigins().GetNumDimensions(),
1176  m_flatBufferBuilder.CreateVector(flatBufferViewOrigins));
1177 
1178  // Create FlatBuffer ViewOrigins
1179  std::vector<flatbuffers::Offset<UintVector>> flatBufferViewSizes;
1180  flatBufferViewSizes.reserve(viewsDescriptor.GetNumViews());
1181 
1182  for(unsigned int vIdx = 0; vIdx < viewsDescriptor.GetNumViews(); ++vIdx)
1183  {
1184  std::vector<uint32_t> viewSize;
1185  viewSize.reserve(viewsDescriptor.GetNumDimensions());
1186 
1187  // Copy vector
1188  for(unsigned int dIdx = 0; dIdx < viewsDescriptor.GetNumDimensions(); ++dIdx)
1189  {
1190  viewSize.push_back(viewsDescriptor.GetViewSizes(vIdx)[dIdx]);
1191  }
1192 
1193  flatBufferViewSizes.push_back(CreateUintVector(m_flatBufferBuilder,
1194  m_flatBufferBuilder.CreateVector(viewSize)));
1195  }
1196 
1197  // Create FlatBuffer ViewsDescriptor
1198  auto flatBufferViewsDescriptor = CreateViewsDescriptor(m_flatBufferBuilder,
1199  flatBufferOriginDescriptor,
1200  m_flatBufferBuilder.CreateVector(flatBufferViewSizes));
1201 
1202  // Create FlatBuffer BaseLayer
1203  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Splitter);
1204 
1205  auto flatBufferSplitterLayer = serializer::CreateSplitterLayer(m_flatBufferBuilder,
1206  flatBufferBaseLayer,
1207  flatBufferViewsDescriptor);
1208 
1209  CreateAnyLayer(flatBufferSplitterLayer.o, serializer::Layer::Layer_SplitterLayer);
1210 }
1211 
1212 void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer* layer,
1213  const armnn::NormalizationDescriptor& descriptor,
1214  const char* name)
1215 {
1216  IgnoreUnused(name);
1217 
1218  auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
1219 
1220  auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
1221  m_flatBufferBuilder,
1224  descriptor.m_NormSize,
1225  descriptor.m_Alpha,
1226  descriptor.m_Beta,
1227  descriptor.m_K,
1229 
1230  auto flatBufferLayer = serializer::CreateNormalizationLayer(m_flatBufferBuilder,
1231  fbNormalizationBaseLayer,
1232  fbNormalizationDescriptor);
1233 
1234  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_NormalizationLayer);
1235 }
1236 
1237 void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
1238  const armnn::StackDescriptor& stackDescriptor,
1239  const char* name)
1240 {
1241  IgnoreUnused(name);
1242 
1243  auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
1244 
1245  std::vector<unsigned int> inputShape;
1246  for (unsigned int i =0; i < stackDescriptor.m_InputShape.GetNumDimensions(); i++)
1247  {
1248  inputShape.push_back(stackDescriptor.m_InputShape[i]);
1249  }
1250 
1251  auto flatBufferStackDescriptor = CreateStackDescriptor(m_flatBufferBuilder,
1252  stackDescriptor.m_Axis,
1253  stackDescriptor.m_NumInputs,
1254  m_flatBufferBuilder.CreateVector(inputShape));
1255 
1256  auto stackLayer = serializer::CreateStackLayer(m_flatBufferBuilder, stackBaseLayer, flatBufferStackDescriptor);
1257  CreateAnyLayer(stackLayer.o, serializer::Layer::Layer_StackLayer);
1258 }
1259 
1260 void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer,
1261  const armnn::StandInDescriptor& standInDescriptor,
1262  const char *name)
1263 {
1264  IgnoreUnused(name);
1265 
1266  auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
1267  standInDescriptor.m_NumInputs,
1268  standInDescriptor.m_NumOutputs);
1269 
1270  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StandIn);
1271  auto fbLayer = serializer::CreateStandInLayer(m_flatBufferBuilder, fbBaseLayer, fbDescriptor);
1272 
1273  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_StandInLayer);
1274 }
1275 
1276 void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* layer,
1277  const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
1278  const char* name)
1279 {
1280  IgnoreUnused(name);
1281 
1282  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
1283 
1284  auto flatBufferDescriptor =
1285  CreateStridedSliceDescriptor(m_flatBufferBuilder,
1286  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Begin),
1287  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_End),
1288  m_flatBufferBuilder.CreateVector(stridedSliceDescriptor.m_Stride),
1289  stridedSliceDescriptor.m_BeginMask,
1290  stridedSliceDescriptor.m_EndMask,
1291  stridedSliceDescriptor.m_ShrinkAxisMask,
1292  stridedSliceDescriptor.m_EllipsisMask,
1293  stridedSliceDescriptor.m_NewAxisMask,
1294  GetFlatBufferDataLayout(stridedSliceDescriptor.m_DataLayout));
1295 
1296  auto flatBufferLayer = serializer::CreateStridedSliceLayer(m_flatBufferBuilder,
1297  flatBufferBaseLayer,
1298  flatBufferDescriptor);
1299 
1300  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_StridedSliceLayer);
1301 }
1302 
1303 void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
1304 {
1305  IgnoreUnused(name);
1306 
1307  auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
1308  auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
1309 
1310  CreateAnyLayer(fbSubtractionLayer.o, serializer::Layer::Layer_SubtractionLayer);
1311 }
1312 
1313 void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
1314 {
1315  IgnoreUnused(name);
1316 
1317  auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
1318  auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
1319 
1320  CreateAnyLayer(fbSwitchLayer.o, serializer::Layer::Layer_SwitchLayer);
1321 }
1322 
1323 void SerializerVisitor::VisitTransposeConvolution2dLayer(
1324  const armnn::IConnectableLayer* layer,
1325  const armnn::TransposeConvolution2dDescriptor& descriptor,
1326  const armnn::ConstTensor& weights,
1328  const char* name)
1329 {
1330  IgnoreUnused(name);
1331 
1332  auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
1333  auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
1334  descriptor.m_PadLeft,
1335  descriptor.m_PadRight,
1336  descriptor.m_PadTop,
1337  descriptor.m_PadBottom,
1338  descriptor.m_StrideX,
1339  descriptor.m_StrideY,
1340  descriptor.m_BiasEnabled,
1342 
1343  // weights & biases
1344  auto fbWeightsConstTensorInfo = CreateConstTensorInfo(weights);
1345  flatbuffers::Offset<serializer::ConstTensor> fbBiasesConstTensorInfo;
1346  if (biases.has_value())
1347  {
1348  fbBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
1349  }
1350 
1351  auto fbLayer = CreateTransposeConvolution2dLayer(m_flatBufferBuilder,
1352  fbBaseLayer,
1353  fbDescriptor,
1354  fbWeightsConstTensorInfo,
1355  fbBiasesConstTensorInfo);
1356 
1357  CreateAnyLayer(fbLayer.o, serializer::Layer::Layer_TransposeConvolution2dLayer);
1358 }
1359 
1360 void SerializerVisitor::VisitTransposeLayer(const armnn::IConnectableLayer* layer,
1361  const armnn::TransposeDescriptor& descriptor,
1362  const char* name)
1363 {
1364  IgnoreUnused(name);
1365 
1366  // Create FlatBuffer BaseLayer
1367  auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
1368 
1369  std::vector<unsigned int> dimMappings;
1370  for (unsigned int i=0; i<descriptor.m_DimMappings.GetSize(); ++i)
1371  {
1372  dimMappings.push_back(descriptor.m_DimMappings[i]);
1373  }
1374 
1375  auto flatBufferDesc = serializer::CreateTransposeDescriptor(m_flatBufferBuilder,
1376  m_flatBufferBuilder.CreateVector(dimMappings));
1377 
1378  // Create the FlatBuffer TransposeLayer
1379  auto flatBufferLayer = serializer::CreateTransposeLayer(m_flatBufferBuilder,
1380  flatBufferBaseLayer,
1381  flatBufferDesc);
1382 
1383  // Add the AnyLayer to the FlatBufferLayers
1384  CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_TransposeLayer);
1385 }
1386 
1387 void SerializerVisitor::VisitQLstmLayer(const armnn::IConnectableLayer* layer,
1388  const armnn::QLstmDescriptor& descriptor,
1389  const armnn::LstmInputParams& params,
1390  const char* name)
1391 {
1392  IgnoreUnused(name);
1393 
1394  auto fbQLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QLstm);
1395 
1396  auto fbQLstmDescriptor = serializer::CreateQLstmDescriptor(
1397  m_flatBufferBuilder,
1398  descriptor.m_CifgEnabled,
1399  descriptor.m_PeepholeEnabled,
1400  descriptor.m_ProjectionEnabled,
1401  descriptor.m_LayerNormEnabled,
1402  descriptor.m_CellClip,
1403  descriptor.m_ProjectionClip,
1404  descriptor.m_InputIntermediateScale,
1405  descriptor.m_ForgetIntermediateScale,
1406  descriptor.m_CellIntermediateScale,
1407  descriptor.m_OutputIntermediateScale,
1408  descriptor.m_HiddenStateZeroPoint,
1409  descriptor.m_HiddenStateScale
1410  );
1411 
1412  // Mandatory params
1413  auto inputToForgetWeights = CreateConstTensorInfo(*params.m_InputToForgetWeights);
1414  auto inputToCellWeights = CreateConstTensorInfo(*params.m_InputToCellWeights);
1415  auto inputToOutputWeights = CreateConstTensorInfo(*params.m_InputToOutputWeights);
1416  auto recurrentToForgetWeights = CreateConstTensorInfo(*params.m_RecurrentToForgetWeights);
1417  auto recurrentToCellWeights = CreateConstTensorInfo(*params.m_RecurrentToCellWeights);
1418  auto recurrentToOutputWeights = CreateConstTensorInfo(*params.m_RecurrentToOutputWeights);
1419  auto forgetGateBias = CreateConstTensorInfo(*params.m_ForgetGateBias);
1420  auto cellBias = CreateConstTensorInfo(*params.m_CellBias);
1421  auto outputGateBias = CreateConstTensorInfo(*params.m_OutputGateBias);
1422 
1423  // CIFG
1424  flatbuffers::Offset<serializer::ConstTensor> inputToInputWeights;
1425  flatbuffers::Offset<serializer::ConstTensor> recurrentToInputWeights;
1426  flatbuffers::Offset<serializer::ConstTensor> inputGateBias;
1427 
1428  if (!descriptor.m_CifgEnabled)
1429  {
1430  inputToInputWeights = CreateConstTensorInfo(*params.m_InputToInputWeights);
1431  recurrentToInputWeights = CreateConstTensorInfo(*params.m_RecurrentToInputWeights);
1432  inputGateBias = CreateConstTensorInfo(*params.m_InputGateBias);
1433  }
1434 
1435  // Projectiom
1436  flatbuffers::Offset<serializer::ConstTensor> projectionWeights;
1437  flatbuffers::Offset<serializer::ConstTensor> projectionBias;
1438 
1439  if (descriptor.m_ProjectionEnabled)
1440  {
1441  projectionWeights = CreateConstTensorInfo(*params.m_ProjectionWeights);
1442  projectionBias = CreateConstTensorInfo(*params.m_ProjectionBias);
1443  }
1444 
1445  // Peephole
1446  flatbuffers::Offset<serializer::ConstTensor> cellToInputWeights;
1447  flatbuffers::Offset<serializer::ConstTensor> cellToForgetWeights;
1448  flatbuffers::Offset<serializer::ConstTensor> cellToOutputWeights;
1449 
1450  if (descriptor.m_PeepholeEnabled)
1451  {
1452  if (!descriptor.m_CifgEnabled)
1453  {
1454  cellToInputWeights = CreateConstTensorInfo(*params.m_CellToInputWeights);
1455  }
1456 
1457  cellToForgetWeights = CreateConstTensorInfo(*params.m_CellToForgetWeights);
1458  cellToOutputWeights = CreateConstTensorInfo(*params.m_CellToOutputWeights);
1459  }
1460 
1461  // Layer norm
1462  flatbuffers::Offset<serializer::ConstTensor> inputLayerNormWeights;
1463  flatbuffers::Offset<serializer::ConstTensor> forgetLayerNormWeights;
1464  flatbuffers::Offset<serializer::ConstTensor> cellLayerNormWeights;
1465  flatbuffers::Offset<serializer::ConstTensor> outputLayerNormWeights;
1466 
1467  if (descriptor.m_LayerNormEnabled)
1468  {
1469  if (!descriptor.m_CifgEnabled)
1470  {
1471  inputLayerNormWeights = CreateConstTensorInfo((*params.m_InputLayerNormWeights));
1472  }
1473 
1474  forgetLayerNormWeights = CreateConstTensorInfo(*params.m_ForgetLayerNormWeights);
1475  cellLayerNormWeights = CreateConstTensorInfo(*params.m_CellLayerNormWeights);
1476  outputLayerNormWeights = CreateConstTensorInfo(*params.m_OutputLayerNormWeights);
1477  }
1478 
1479  auto fbQLstmParams = serializer::CreateQLstmInputParams(
1480  m_flatBufferBuilder,
1481  inputToForgetWeights,
1482  inputToCellWeights,
1483  inputToOutputWeights,
1484  recurrentToForgetWeights,
1485  recurrentToCellWeights,
1486  recurrentToOutputWeights,
1487  forgetGateBias,
1488  cellBias,
1489  outputGateBias,
1490  inputToInputWeights,
1491  recurrentToInputWeights,
1492  inputGateBias,
1493  projectionWeights,
1494  projectionBias,
1495  cellToInputWeights,
1496  cellToForgetWeights,
1497  cellToOutputWeights,
1498  inputLayerNormWeights,
1499  forgetLayerNormWeights,
1500  cellLayerNormWeights,
1501  outputLayerNormWeights);
1502 
1503  auto fbQLstmLayer = serializer::CreateQLstmLayer(
1504  m_flatBufferBuilder,
1505  fbQLstmBaseLayer,
1506  fbQLstmDescriptor,
1507  fbQLstmParams);
1508 
1509  CreateAnyLayer(fbQLstmLayer.o, serializer::Layer::Layer_QLstmLayer);
1510 }
1511 
1512 void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer* layer,
1513  const armnn::QuantizedLstmInputParams& params,
1514  const char* name)
1515 {
1516  IgnoreUnused(name);
1517 
1518  auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
1519 
1520  // Get input parameters
1521  auto inputToInputWeights = CreateConstTensorInfo(params.GetInputToInputWeights());
1522  auto inputToForgetWeights = CreateConstTensorInfo(params.GetInputToForgetWeights());
1523  auto inputToCellWeights = CreateConstTensorInfo(params.GetInputToCellWeights());
1524  auto inputToOutputWeights = CreateConstTensorInfo(params.GetInputToOutputWeights());
1525 
1526  auto recurrentToInputWeights = CreateConstTensorInfo(params.GetRecurrentToInputWeights());
1527  auto recurrentToForgetWeights = CreateConstTensorInfo(params.GetRecurrentToForgetWeights());
1528  auto recurrentToCellWeights = CreateConstTensorInfo(params.GetRecurrentToCellWeights());
1529  auto recurrentToOutputWeights = CreateConstTensorInfo(params.GetRecurrentToOutputWeights());
1530 
1531  auto inputGateBias = CreateConstTensorInfo(params.GetInputGateBias());
1532  auto forgetGateBias = CreateConstTensorInfo(params.GetForgetGateBias());
1533  auto cellBias = CreateConstTensorInfo(params.GetCellBias());
1534  auto outputGateBias = CreateConstTensorInfo(params.GetOutputGateBias());
1535 
1536  auto fbQuantizedLstmParams = serializer::CreateQuantizedLstmInputParams(
1537  m_flatBufferBuilder,
1538  inputToInputWeights,
1539  inputToForgetWeights,
1540  inputToCellWeights,
1541  inputToOutputWeights,
1542  recurrentToInputWeights,
1543  recurrentToForgetWeights,
1544  recurrentToCellWeights,
1545  recurrentToOutputWeights,
1546  inputGateBias,
1547  forgetGateBias,
1548  cellBias,
1549  outputGateBias);
1550 
1551  auto fbQuantizedLstmLayer = serializer::CreateQuantizedLstmLayer(
1552  m_flatBufferBuilder,
1553  fbQuantizedLstmBaseLayer,
1554  fbQuantizedLstmParams);
1555 
1556  CreateAnyLayer(fbQuantizedLstmLayer.o, serializer::Layer::Layer_QuantizedLstmLayer);
1557 }
1558 
1559 fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
1560  const serializer::LayerType layerType)
1561 {
1562 
1563  uint32_t fbIndex = GetSerializedId(layer->GetGuid());
1564 
1565  std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
1566  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots = CreateOutputSlots(layer);
1567 
1568  return serializer::CreateLayerBase(m_flatBufferBuilder,
1569  fbIndex,
1570  m_flatBufferBuilder.CreateString(layer->GetName()),
1571  layerType,
1572  m_flatBufferBuilder.CreateVector(inputSlots),
1573  m_flatBufferBuilder.CreateVector(outputSlots));
1574 }
1575 
1576 void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
1577 {
1578 
1579  auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
1580  m_serializedLayers.push_back(anyLayer);
1581 }
1582 
1583 template <typename T>
1584 flatbuffers::Offset<flatbuffers::Vector<T>> SerializerVisitor::CreateDataVector(const void* memory, unsigned int size)
1585 {
1586  const T* buffer = reinterpret_cast<const T*>(memory);
1587  std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
1588  auto fbVector = m_flatBufferBuilder.CreateVector(vector);
1589  return fbVector;
1590 }
1591 
1592 flatbuffers::Offset<TensorInfo> SerializerVisitor::CreateTensorInfo(const armnn::TensorInfo& tensorInfo)
1593 {
1594  // Get the dimensions
1595  std::vector<unsigned int> shape;
1596  for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
1597  {
1598  shape.push_back(tensorInfo.GetShape()[dim]);
1599  }
1600 
1601  if (tensorInfo.HasPerAxisQuantization())
1602  {
1603  // Create FlatBuffer TensorInfo
1604  auto flatBufferTensorInfo =
1605  serializer::CreateTensorInfo(m_flatBufferBuilder,
1606  m_flatBufferBuilder.CreateVector(shape),
1607  GetFlatBufferDataType(tensorInfo.GetDataType()),
1608  tensorInfo.GetQuantizationScales()[0],
1609  tensorInfo.GetQuantizationOffset(),
1610  m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
1611  tensorInfo.GetQuantizationDim().value(),
1612  static_cast<unsigned int>
1613  (tensorInfo.GetShape().GetDimensionality()));
1614  return flatBufferTensorInfo;
1615  }
1616 
1617  // Create FlatBuffer TensorInfo
1618  auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
1619  m_flatBufferBuilder.CreateVector(shape),
1620  GetFlatBufferDataType(tensorInfo.GetDataType()),
1621  tensorInfo.GetQuantizationScale(),
1622  tensorInfo.GetQuantizationOffset(),
1623  0,
1624  0,
1625  static_cast<unsigned int>
1626  (tensorInfo.GetShape().GetDimensionality()));
1627  return flatBufferTensorInfo;
1628 }
1629 
1630 flatbuffers::Offset<serializer::ConstTensor>
1631  SerializerVisitor::CreateConstTensorInfo(const armnn::ConstTensor& constTensor)
1632 {
1633  armnn::TensorInfo tensorInfo = constTensor.GetInfo();
1634 
1635  flatbuffers::Offset<void> fbPayload;
1636 
1637  switch (tensorInfo.GetDataType())
1638  {
1641  {
1642  auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1643  flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
1644  m_flatBufferBuilder,
1645  fbVector);
1646  fbPayload = flatBuffersData.o;
1647  break;
1648  }
1652  {
1653  auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1654  flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
1655  m_flatBufferBuilder,
1656  fbVector);
1657  fbPayload = flatBuffersData.o;
1658  break;
1659  }
1664  default:
1665  {
1666  auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
1667  flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
1668  m_flatBufferBuilder,
1669  fbVector);
1670  fbPayload = flatBuffersData.o;
1671  }
1672  }
1673  flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
1674  m_flatBufferBuilder,
1675  CreateTensorInfo(tensorInfo),
1677  fbPayload);
1678  return flatBufferConstTensor;
1679 }
1680 
1681 flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> SerializerVisitor::GetVersionTable()
1682 {
1683  flatbuffers::Offset<armnnSerializer::FeatureCompatibilityVersions> versionsTable =
1684  serializer::CreateFeatureCompatibilityVersions(
1685  m_flatBufferBuilder,
1686  1 // Binding ids scheme version
1687  );
1688  return versionsTable;
1689 }
1690 
1691 std::vector<fb::Offset<serializer::InputSlot>>
1692  SerializerVisitor::CreateInputSlots(const armnn::IConnectableLayer* layer)
1693 {
1694  std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
1695 
1696  // Get the InputSlots
1697  for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
1698  {
1699  const IInputSlot& inputSlot = layer->GetInputSlot(slotIndex);
1700 
1701  // Get the Connection for the InputSlot
1702  const IOutputSlot* connection = inputSlot.GetConnection();
1703 
1704  // Create FlatBuffer Connection
1705  serializer::Connection conn(GetSerializedId(inputSlot.GetConnection()->GetOwningLayerGuid()),
1706  connection->CalculateIndexOnOwner());
1707  // Create FlatBuffer InputSlot
1708  inputSlots.push_back(serializer::CreateInputSlot(m_flatBufferBuilder, slotIndex, &conn));
1709  }
1710  return inputSlots;
1711 }
1712 
1713 std::vector<fb::Offset<serializer::OutputSlot>>
1714  SerializerVisitor::CreateOutputSlots(const armnn::IConnectableLayer* layer)
1715 {
1716  std::vector<fb::Offset<serializer::OutputSlot>> outputSlots;
1717 
1718  // Get the OutputSlots
1719  for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
1720  {
1721  const IOutputSlot& outputSlot = layer->GetOutputSlot(slotIndex);
1722  const armnn::TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
1723 
1724  // Create FlatBuffer Outputslot
1725  outputSlots.push_back(serializer::CreateOutputSlot(m_flatBufferBuilder,
1726  slotIndex,
1727  CreateTensorInfo(tensorInfo)));
1728  }
1729  return outputSlots;
1730 }
1731 
1732 
1733 ISerializer* ISerializer::CreateRaw()
1734 {
1735  return new Serializer();
1736 }
1737 
1738 ISerializerPtr ISerializer::Create()
1739 {
1740  return ISerializerPtr(CreateRaw(), &ISerializer::Destroy);
1741 }
1742 
1743 void ISerializer::Destroy(ISerializer* serializer)
1744 {
1745  delete serializer;
1746 }
1747 
1748 void Serializer::Serialize(const INetwork& inNetwork)
1749 {
1750  // Iterate through to network
1751  inNetwork.Accept(m_SerializerVisitor);
1752  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerVisitor.GetFlatBufferBuilder();
1753 
1754  // Create FlatBuffer SerializedGraph
1755  auto serializedGraph = serializer::CreateSerializedGraph(
1756  fbBuilder,
1757  fbBuilder.CreateVector(m_SerializerVisitor.GetSerializedLayers()),
1758  fbBuilder.CreateVector(m_SerializerVisitor.GetInputIds()),
1759  fbBuilder.CreateVector(m_SerializerVisitor.GetOutputIds()),
1760  m_SerializerVisitor.GetVersionTable());
1761 
1762  // Serialize the graph
1763  fbBuilder.Finish(serializedGraph);
1764 }
1765 
1766 bool Serializer::SaveSerializedToStream(std::ostream& stream)
1767 {
1768  flatbuffers::FlatBufferBuilder& fbBuilder = m_SerializerVisitor.GetFlatBufferBuilder();
1769 
1770  auto bytesToWrite = armnn::numeric_cast<std::streamsize>(fbBuilder.GetSize());
1771  stream.write(reinterpret_cast<const char*>(fbBuilder.GetBufferPointer()), bytesToWrite);
1772  return !stream.bad();
1773 }
1774 
1775 } // namespace armnnSerializer
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
float m_Eps
Used to avoid dividing by zero.
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
bool m_HalfPixelCenters
Half Pixel Centers.
armnnSerializer::UnaryOperation GetFlatBufferUnaryOperation(armnn::UnaryOperation comparisonOperation)
bool m_ProjectionEnabled
Enable/disable the projection layer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_AlignCorners
Aligned corners.
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
UnaryOperation m_Operation
Specifies the elementwiseUnary operation to execute.
const ConstTensor & GetRecurrentToOutputWeights() const
uint32_t m_Axis
0-based axis along which to stack the input tensors.
A ViewsDescriptor for the SplitterLayer.
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
float m_ScaleW
Center size encoding scale weight.
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
float m_K
Kappa value used for the across channel normalization equation.
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
const TensorShape & GetShape() const
Definition: Tensor.hpp:187
uint32_t m_PadBottom
Padding bottom value in the height dimension.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_ClippingThresProj
Clipping threshold value for the projection.
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1...
A ReshapeDescriptor for the ReshapeLayer.
const ConstTensor & GetRecurrentToForgetWeights() const
std::vector< int > m_Begin
Begin values for the input that will be sliced.
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t GetNumDimensions() const
Get the number of dimensions.
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:73
float m_ScaleX
Center size encoding scale x.
TensorShape m_InputShape
Required shape of all input tensors.
uint32_t m_TargetWidth
Target width value.
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:437
uint32_t m_PoolWidth
Pooling width value.
const ConstTensor & GetCellBias() const
bool m_PeepholeEnabled
Enable/disable peephole.
armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(armnn::OutputShapeRounding outputShapeRounding)
A Convolution2dDescriptor for the Convolution2dLayer.
float m_Alpha
Alpha value for the normalization equation.
uint32_t m_PadLeft
Padding left value in the width dimension.
float m_HiddenStateScale
Hidden State quantization scale.
bool m_BiasEnabled
Enable/disable bias.
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:485
float m_OutputIntermediateScale
Output intermediate quantization scale.
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0. ...
float m_Beta
Exponentiation value.
std::vector< unsigned int > m_Size
Size of the slice in each dimension.
float m_Eps
Value to add to the variance. Used to avoid dividing by zero.
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:65
uint32_t m_DetectionsPerClass
Detections per classes, used in Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ActivationFunction GetFlatBufferActivationFunction(armnn::ActivationFunction function)
Definition: Serializer.cpp:26
Main network class which provides the interface for building up a neural network. ...
Definition: INetwork.hpp:105
armnnSerializer::NormalizationAlgorithmMethod GetFlatBufferNormalizationAlgorithmMethod(armnn::NormalizationAlgorithmMethod normalizationAlgorithmMethod)
uint32_t m_PadTop
Padding top value in the height dimension.
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:292
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
uint32_t GetNumViews() const
Get the number of views.
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
Copyright (c) 2020 ARM Limited.
void IgnoreUnused(Ts &&...)
uint32_t m_PadBottom
Padding bottom value in the height dimension.
int32_t m_BeginMask
Begin mask value.
SizeType GetSize() const
Definition: Types.hpp:233
uint32_t m_DilationY
Dilation along y axis.
int32_t m_EndMask
End mask value.
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
const ConstTensor & GetInputToOutputWeights() const
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left, right}.
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:442
uint32_t m_DilationY
Dilation factor value for height dimension.
armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType)
LogicalBinaryOperation m_Operation
Specifies the logical operation to execute.
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
int LayerBindingId
Type of identifiers for bindable layers (inputs, outputs).
Definition: Types.hpp:202
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType)
uint32_t m_NumOutputs
Number of output tensors.
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
A ResizeDescriptor for the ResizeLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
uint32_t m_MaxClassesPerDetection
Maximum numbers of classes per detection, used in Fast NMS.
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
A StackDescriptor for the StackLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
serializer::ArgMinMaxFunction GetFlatBufferArgMinMaxFunction(armnn::ArgMinMaxFunction function)
Definition: Serializer.cpp:57
TensorShape m_TargetShape
Target shape value.
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadTop
Padding top value in the height dimension.
uint32_t m_MaxDetections
Maximum numbers of detections.
A PadDescriptor for the PadLayer.
const uint32_t * GetViewOrigin(uint32_t idx) const
Return the view origin at the int value idx.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
const ConstTensor & GetInputToCellWeights() const
armnnSerializer::NormalizationAlgorithmChannel GetFlatBufferNormalizationAlgorithmChannel(armnn::NormalizationAlgorithmChannel normalizationAlgorithmChannel)
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
bool m_LayerNormEnabled
Enable/disable layer normalization.
float m_NmsIouThreshold
Intersection over union threshold.
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
armnnSerializer::LogicalBinaryOperation GetFlatBufferLogicalBinaryOperation(armnn::LogicalBinaryOperation logicalBinaryOperation)
An LstmDescriptor for the LstmLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
uint32_t m_DilationX
Dilation factor value for width dimension.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< unsigned int > m_Begin
Beginning indices of the slice in each dimension.
int32_t m_NewAxisMask
New axis mask value.
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept...
std::vector< unsigned int > m_BlockShape
Block shape values.
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f...
An output connection slot for a layer.
Definition: INetwork.hpp:37
A L2NormalizationDescriptor for the L2NormalizationLayer.
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:469
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:51
float GetQuantizationScale() const
Definition: Tensor.cpp:452
DataType GetDataType() const
Definition: Tensor.hpp:194
An OriginsDescriptor for the ConcatLayer.
float m_ProjectionClip
Clipping threshold value for the projection.
bool has_value() const noexcept
Definition: Optional.hpp:53
A FullyConnectedDescriptor for the FullyConnectedLayer.
int32_t m_EllipsisMask
Ellipsis mask value.
virtual LayerGuid GetGuid() const =0
Returns the unique id of the layer.
bool m_BiasEnabled
Enable/disable bias.
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:314
float m_InputIntermediateScale
Input intermediate quantization scale.
uint32_t m_TargetWidth
Target width value.
A GatherDescriptor for the GatherLayer.
const ConstTensor & GetInputToInputWeights() const
bool m_PeepholeEnabled
Enable/disable peephole.
uint32_t m_NumClasses
Number of classes.
bool m_HalfPixelCenters
Half Pixel Centers.
uint32_t m_PadTop
Padding top value in the height dimension.
A StandInDescriptor for the StandIn layer.
A QLstmDescriptor for the QLstmLayer.
virtual unsigned int CalculateIndexOnOwner() const =0
bool m_UseRegularNms
Use Regular NMS.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
std::vector< unsigned int > m_BlockShape
Block shape value.
std::vector< int > m_Stride
Stride values for the input that will be sliced.
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:20
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:282
min(a, max(b, input)) ReLu1 & ReLu6.
uint32_t m_TargetHeight
Target height value.
uint32_t m_NumInputs
Number of input tensors.
uint32_t m_TargetHeight
Target height value.
uint32_t m_ActivationFunc
The activation function to use.
A SliceDescriptor for the SliceLayer.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
const ConstTensor & GetForgetGateBias() const
float m_ClippingThresCell
Clipping threshold value for the cell state.
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
const uint32_t * GetViewOrigin(uint32_t idx) const
Get the view origin at the int value idx.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0. ...
armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout)
float m_ScaleH
Center size encoding scale height.
ComparisonOperation m_Operation
Specifies the comparison operation to execute.
Definition: Descriptors.hpp:89
std::vector< int > m_End
End values for the input that will be sliced.
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
const uint32_t * GetViewSizes(uint32_t idx) const
Get the view sizes at the int value idx.
float m_CellClip
Clipping threshold value for the cell state.
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH, Elu).
Definition: Descriptors.hpp:45
uint32_t m_DilationX
Dilation along x axis.
const ConstTensor & GetInputGateBias() const
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
std::unique_ptr< ISerializer, void(*)(ISerializer *serializer)> ISerializerPtr
Definition: ISerializer.hpp:15
uint32_t m_PadLeft
Padding left value in the width dimension.
armnnSerializer::ComparisonOperation GetFlatBufferComparisonOperation(armnn::ComparisonOperation comparisonOperation)
bool m_AlignCorners
Aligned corners.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
int32_t m_Axis
The axis in params to gather indices from.
virtual void Accept(ILayerVisitor &visitor) const =0
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:93
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
bool m_ProjectionEnabled
Enable/disable the projection layer.
ArgMinMaxFunction
Definition: Types.hpp:72
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
armnnSerializer::ResizeMethod GetFlatBufferResizeMethod(armnn::ResizeMethod method)
uint32_t m_NumInputs
Number of input tensors.
const ConstTensor & GetRecurrentToCellWeights() const
const ConstTensor & GetInputToForgetWeights() const
uint32_t GetNumDimensions() const
Get the number of dimensions.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
A MeanDescriptor for the MeanLayer.
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
virtual const IOutputSlot * GetConnection() const =0
armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod)
bool m_LayerNormEnabled
Enable/disable layer normalization.
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
Definition: NumericCast.hpp:35
uint32_t m_PadRight
Padding right value in the width dimension.
const ConstTensor & GetRecurrentToInputWeights() const
A TransposeDescriptor for the TransposeLayer.
A StridedSliceDescriptor for the StridedSliceLayer.
virtual const TensorInfo & GetTensorInfo() const =0
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:67
virtual const char * GetName() const =0
Returns the name of the layer.
float m_ScaleY
Center size encoding scale y.
uint32_t GetNumViews() const
Get the number of views.
float m_NmsScoreThreshold
NMS score threshold.
virtual LayerGuid GetOwningLayerGuid() const =0
A Pooling2dDescriptor for the Pooling2dLayer.
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
A NormalizationDescriptor for the NormalizationLayer.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
unsigned int GetConcatAxis() const
Get the concatenation axis value.
A ResizeBilinearDescriptor for the ResizeBilinearLayer.
float m_CellIntermediateScale
Cell intermediate quantization scale.
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:47
A SoftmaxDescriptor for the SoftmaxLayer.
float m_Beta
Beta value for the normalization equation.
const OriginsDescriptor & GetOrigins() const
Get the View Origins.
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination, when source and target potentially have different memory layouts e.g.
uint32_t m_NormSize
Depth radius value.
const ConstTensor & GetOutputGateBias() const
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu, LeakyReLu, Abs, Sqrt, Square, Elu).
Definition: Descriptors.hpp:43
An input connection slot for a layer.
Definition: INetwork.hpp:24
armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm)
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
A FillDescriptor for the FillLayer.
A BatchNormalizationDescriptor for the BatchNormalizationLayer.
uint32_t m_PadLeft
Padding left value in the width dimension.
unsigned int GetNumBytes() const
Definition: Tensor.hpp:289
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
ActivationFunction
Definition: Types.hpp:56
A PermuteDescriptor for the PermuteLayer.
uint32_t m_PadRight
Padding right value in the width dimension.
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40
std::vector< float > anchors({ 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 0.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 10.5f, 1.0f, 1.0f, 0.5f, 100.5f, 1.0f, 1.0f })