ArmNN
 23.05
Converter.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Converter.hpp"
7 #include <half/half.hpp>
9 
10 namespace armnn_driver
11 {
12 
13 using namespace android::nn;
14 using Half = half_float::half;
15 
16 namespace
17 {
18 
19 } // anonymouse namespace
20 
21 bool Converter::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
22 {
23  switch (operation.type)
24  {
25  case OperationType::ABS:
26  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
27  case OperationType::ADD:
28  return ConvertAdd(operation, model, data);
29  case OperationType::ARGMAX:
30  return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
31  case OperationType::ARGMIN:
32  return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
33  case OperationType::AVERAGE_POOL_2D:
34  return ConvertAveragePool2d(operation, model, data);
35  case OperationType::BATCH_MATMUL:
36  return ConvertBatchMatMul(operation, model, data);
37  case OperationType::BATCH_TO_SPACE_ND:
38  return ConvertBatchToSpaceNd(operation, model, data);
39  case OperationType::CAST:
40  return ConvertCast(operation, model, data);
41  case OperationType::CONCATENATION:
42  return ConvertConcatenation(operation, model, data);
43  case OperationType::CONV_2D:
44  return ConvertConv2d(operation, model, data);
45  case OperationType::DEPTH_TO_SPACE:
46  return ConvertDepthToSpace(operation, model, data);
47  case OperationType::DEPTHWISE_CONV_2D:
48  return ConvertDepthwiseConv2d(operation, model, data);
49  case OperationType::DEQUANTIZE:
50  return ConvertDequantize(operation, model, data);
51  case OperationType::DIV:
52  return ConvertDiv(operation, model, data);
53  case OperationType::ELU:
54  return ConvertElu(operation, model, data);
55  case OperationType::EQUAL:
56  return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
57  case OperationType::EXP:
58  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Exp);
59  case OperationType::EXPAND_DIMS:
60  return ConvertExpandDims(operation, model, data);
61  case OperationType::FILL:
62  return ConvertFill(operation, model, data);
63  case OperationType::FLOOR:
64  return ConvertFloor(operation, model, data);
65  case OperationType::FULLY_CONNECTED:
66  return ConvertFullyConnected(operation, model, data);
67  case OperationType::GATHER:
68  return ConvertGather(operation, model, data);
69  case OperationType::GREATER:
70  return ConvertComparison(operation, model, data, ComparisonOperation::Greater);
71  case OperationType::GREATER_EQUAL:
72  return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
73  case OperationType::GROUPED_CONV_2D:
74  return ConvertGroupedConv2d(operation, model, data);
75  case OperationType::HARD_SWISH:
76  return ConvertHardSwish(operation, model, data);
77  case OperationType::INSTANCE_NORMALIZATION:
78  return ConvertInstanceNormalization(operation, model, data);
79  case OperationType::L2_NORMALIZATION:
80  return ConvertL2Normalization(operation, model, data);
81  case OperationType::L2_POOL_2D:
82  return ConvertL2Pool2d(operation, model, data);
83  case OperationType::LESS:
84  return ConvertComparison(operation, model, data, ComparisonOperation::Less);
85  case OperationType::LESS_EQUAL:
86  return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
87  case OperationType::LOCAL_RESPONSE_NORMALIZATION:
88  return ConvertLocalResponseNormalization(operation, model, data);
89  case OperationType::LOG:
90  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Log);
91  case OperationType::LOGICAL_AND:
92  return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
93  case OperationType::LOGICAL_NOT:
94  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
95  case OperationType::LOGICAL_OR:
96  return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
97  case OperationType::LOGISTIC:
98  return ConvertLogistic(operation, model, data);
99  case OperationType::LOG_SOFTMAX:
100  return ConvertLogSoftmax(operation, model, data);
101  case OperationType::LSTM:
102  return ConvertLstm(operation, model, data);
103  case OperationType::MAX_POOL_2D:
104  return ConvertMaxPool2d(operation, model, data);
105  case OperationType::MAXIMUM:
106  return ConvertMaximum(operation, model, data);
107  case OperationType::MEAN:
108  return ConvertMean(operation, model, data);
109  case OperationType::MINIMUM:
110  return ConvertMinimum(operation, model, data);
111  case OperationType::MUL:
112  return ConvertMul(operation, model, data);
113  case OperationType::NEG:
114  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
115  case OperationType::NOT_EQUAL:
116  return ConvertComparison(operation, model, data, ComparisonOperation::NotEqual);
117  case OperationType::PAD:
118  return ConvertPad(operation, model, data);
119  case OperationType::PAD_V2:
120  return ConvertPadV2(operation, model, data);
121  case OperationType::PRELU:
122  return ConvertPrelu(operation, model, data);
123  case OperationType::QUANTIZE:
124  return ConvertQuantize(operation, model, data);
125  case OperationType::QUANTIZED_LSTM:
126  return ConvertQuantizedLstm(operation, model, data);
127  case OperationType::QUANTIZED_16BIT_LSTM:
128  return ConvertQuantized16BitLstm(operation, model, data);
129  case OperationType::RANK:
130  return ConvertRank(operation, model, data);
131  case OperationType::REDUCE_MAX:
132  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Max);
133  case OperationType::REDUCE_MIN:
134  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Min);
135  case OperationType::REDUCE_SUM:
136  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Sum);
137  case OperationType::RELU:
138  return ConvertReLu(operation, model, data);
139  case OperationType::RELU1:
140  return ConvertReLu1(operation, model, data);
141  case OperationType::RELU6:
142  return ConvertReLu6(operation, model, data);
143  case OperationType::RESHAPE:
144  return ConvertReshape(operation, model, data);
145  case OperationType::RESIZE_BILINEAR:
146  return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
147  case OperationType::RESIZE_NEAREST_NEIGHBOR:
148  return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
149  case OperationType::RSQRT:
150  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
151  case OperationType::SIN:
152  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Sin);
153  case OperationType::SOFTMAX:
154  return ConvertSoftmax(operation, model, data);
155  case OperationType::SPACE_TO_BATCH_ND :
156  return ConvertSpaceToBatchNd(operation, model, data);
157  case OperationType::SPACE_TO_DEPTH:
158  return ConvertSpaceToDepth(operation, model, data);
159  case OperationType::SQRT:
160  return ConvertSqrt(operation, model, data);
161  case OperationType::SQUEEZE:
162  return ConvertSqueeze(operation, model, data);
163  case OperationType::STRIDED_SLICE:
164  return ConvertStridedSlice(operation, model, data);
165  case OperationType::SUB:
166  return ConvertSub(operation, model, data);
167  case OperationType::TRANSPOSE:
168  return ConvertTranspose(operation, model, data);
169  case OperationType::TRANSPOSE_CONV_2D:
170  return ConvertTransposeConv2d(operation, model, data);
171  case OperationType::TANH:
172  return ConvertTanH(operation, model, data);
173  default:
174  VLOG(DRIVER) << "Operation type: " << operation.type << "is not supported in ArmnnDriver";
175  return false;
176  }
177 }
178 
179 bool Converter::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
180 {
181  VLOG(DRIVER) << "Converter::ConvertAdd()";
182  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
183  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
184 
185  if (!input0.IsValid() || !input1.IsValid())
186  {
187  return Fail("%s: Operation has invalid inputs", __func__);
188  }
189 
190  // The FuseActivation parameter is always the input index 2, and it should be optional
191  ActivationFn activationFunction;
192  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
193  {
194  return Fail("%s: Operation has invalid inputs", __func__);
195  }
196 
197  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
198  if (!outputOperand)
199  {
200  return false;
201  }
202 
203  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
204  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
205 
206  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
207 
208  bool isSupported = false;
209  armnn::BackendId setBackend;
210  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
211  {
215  data.m_Backends,
216  isSupported,
217  setBackend,
218  inputInfo0,
219  inputInfo1,
220  outputInfo);
222  };
223 
224  if(!IsDynamicTensor(outputInfo))
225  {
226  validateFunc(outputInfo, isSupported);
227  }
228  else
229  {
230  isSupported = AreDynamicTensorsSupported();
231  }
232 
233  if (!isSupported)
234  {
235  return false;
236  }
237 
239  armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
241  startLayer->SetBackendId(setBackend);
242 
243  bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
244  if (!isReshapeSupported)
245  {
246  return false;
247  }
248 
249  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
250  data, nullptr, validateFunc, activationFunction);
251 }
252 
253 bool Converter::ConvertArgMinMax(const Operation& operation,
254  const Model& model,
255  ConversionData& data,
256  armnn::ArgMinMaxFunction argMinMaxFunction)
257 {
258  VLOG(DRIVER) << "Converter::ConvertArgMinMax()";
259  VLOG(DRIVER) << "argMinMaxFunction = " << GetArgMinMaxFunctionAsCString(argMinMaxFunction);
260 
261  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
262 
263  if (!input0.IsValid())
264  {
265  return Fail("%s: Operation has invalid inputs", __func__);
266  }
267 
268  int32_t axis;
269  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
270  {
271  return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
272  }
273 
274  const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
275  int rank = static_cast<int>(inputInfo.GetNumDimensions());
276 
277  if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
278  {
279  // Square bracket denotes inclusive n while parenthesis denotes exclusive n
280  // E.g. Rank 4 tensor can have axis in range [-4, 3)
281  // -1 == 3, -2 == 2, -3 == 1, -4 == 0
282  return Fail("%s: Axis must be in range [-n, n)", __func__);
283  }
284 
285  const Operand* output = GetOutputOperand(operation, 0, model);
286  if (!output)
287  {
288  return Fail("%s: Could not read output 0", __func__);
289  }
290 
291  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
292 
293  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
294 
295  armnn::ArgMinMaxDescriptor descriptor;
296  descriptor.m_Function = argMinMaxFunction;
297  descriptor.m_Axis = axis;
298 
299  bool isSupported = false;
300  armnn::BackendId setBackend;
301  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
302  {
304  IsArgMinMaxSupported,
305  data.m_Backends,
306  isSupported,
307  setBackend,
308  inputInfo0,
309  outputInfo,
310  descriptor);
311  };
312 
313  if(IsDynamicTensor(outputInfo))
314  {
315  isSupported = AreDynamicTensorsSupported();
316  }
317  else
318  {
319  validateFunc(outputInfo, isSupported);
320  }
321 
322  if (!isSupported)
323  {
324  return false;
325  }
326 
327  armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
328  layer->SetBackendId(setBackend);
329  assert(layer != nullptr);
330 
331  input0.Connect(layer->GetInputSlot(0));
332 
333  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
334 }
335 
336 bool Converter::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
337 {
338  VLOG(DRIVER) << "Converter::ConvertAveragePool2d()";
339  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Average, model, data);
340 }
341 
342 bool Converter::ConvertBatchMatMul(const Operation& operation, const Model& model, ConversionData& data)
343 {
344  VLOG(DRIVER) << "Converter::ConvertBatchMatMul()";
345  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
346  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
347 
348  if (!input0.IsValid() || !input1.IsValid())
349  {
350  return Fail("%s: Operation has invalid inputs", __func__);
351  }
352 
353  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
354  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
355 
356  unsigned int rankInput0 = inputInfo0.GetNumDimensions();
357  if (rankInput0 > 4 || rankInput0 < 2)
358  {
359  Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
360  }
361 
362  unsigned int rankInput1 = inputInfo1.GetNumDimensions();
363  if (rankInput1 > 4 || rankInput1 < 2)
364  {
365  Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
366  }
367 
368  // Determine data type of input tensor 0
369  OperandType input0Type;
370  if (!GetOperandType(operation, 0, model, input0Type))
371  {
372  return Fail("%s: Operation has invalid inputs", __func__);
373  }
374 
375  // Determine data type of input tensor 0
376  OperandType input1Type;
377  if (!GetOperandType(operation, 0, model, input1Type))
378  {
379  return Fail("%s: Operation has invalid inputs", __func__);
380  }
381 
382  if (input0Type != input1Type)
383  {
384  return Fail("%s: Operation has invalid inputs (Inputs must have same OperandCode)", __func__);
385  }
386 
387  const Operand* output = GetOutputOperand(operation, 0, model);
388  if (!output)
389  {
390  return Fail("%s: Could not read output 0", __func__);
391  }
392 
393  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
394 
395  armnn::BatchMatMulDescriptor batchMatMulDesc;
396 
397  // Inputs 2 and 3 are adjoint in Android NeuralNetworks, but they perform transpose.
398  // This is why we are linking them with transpose parameters in the descriptor
399  batchMatMulDesc.m_TransposeX = GetOptionalBool(operation, 2, model, data);
400  batchMatMulDesc.m_TransposeY = GetOptionalBool(operation, 3, model, data);
401 
402  bool isSupported = false;
403  armnn::BackendId setBackend;
404  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
405  {
407  IsBatchMatMulSupported,
408  data.m_Backends,
409  isSupported,
410  setBackend,
411  inputInfo0,
412  inputInfo1,
413  outputInfo,
414  batchMatMulDesc);
415  };
416 
417  if(!IsDynamicTensor(outputInfo))
418  {
419  validateFunc(outputInfo, isSupported);
420  }
421  else
422  {
423  isSupported = AreDynamicTensorsSupported();
424  }
425 
426 
427  if (!isSupported)
428  {
429  return false;
430  }
431 
432  armnn::IConnectableLayer* const layer = data.m_Network->AddBatchMatMulLayer(batchMatMulDesc);
433  layer->SetBackendId(setBackend);
434  assert(layer != nullptr);
435  input0.Connect(layer->GetInputSlot(0));
436  input1.Connect(layer->GetInputSlot(1));
437 
438  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
439 }
440 
441 bool Converter::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
442 {
443  VLOG(DRIVER) << "Converter::ConvertBatchToSpaceNd()";
444  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
445  if (!input.IsValid())
446  {
447  return Fail("%s: Operation has invalid inputs", __func__);
448  }
449 
450  const Operand* output = GetOutputOperand(operation, 0, model);
451  if (!output)
452  {
453  return Fail("%s: Could not read output 0", __func__);
454  }
455 
456  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
457 
458  const Operand* blockOperand = GetInputOperand(operation, 1, model);
459  if (!blockOperand)
460  {
461  return Fail("%s: Could not read input 1", __func__);
462  }
463 
464  // Convert the block operand to int32
465  std::vector<int32_t> block;
466  if (!GetTensorInt32Values(*blockOperand, block, model, data))
467  {
468  return Fail("%s: Input 1 has invalid values", __func__);
469  }
470 
471  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
472 
473  unsigned int rank = inputInfo.GetNumDimensions();
474  if (rank != 4)
475  {
476  Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
477  }
478 
479  if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
480  {
481  return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
482  " greater than or equal to 1", __func__);
483  }
484 
485  armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
486  batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
487  batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
488 
489  if (Is12OrLaterOperand(*output))
490  {
491  batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
492  }
493  // Setting crops to 0,0 0,0 as it is not supported in Android NN API
494  batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
495 
496  bool isSupported = false;
497  armnn::BackendId setBackend;
498  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
499  {
502  data.m_Backends,
503  isSupported,
504  setBackend,
505  inputInfo,
506  outputInfo,
507  batchToSpaceNdDesc);
508  };
509 
510  if(!IsDynamicTensor(outputInfo))
511  {
512  validateFunc(outputInfo, isSupported);
513  }
514  else
515  {
516  isSupported = AreDynamicTensorsSupported();
517  }
518 
519 
520  if (!isSupported)
521  {
522  return false;
523  }
524 
525  armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
526  layer->SetBackendId(setBackend);
527  assert(layer != nullptr);
528  input.Connect(layer->GetInputSlot(0));
529 
530  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
531 }
532 
533 bool Converter::ConvertCast(const Operation& operation, const Model& model, ConversionData& data)
534 {
535  VLOG(DRIVER) << "Converter::ConvertCast()";
536 
537  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
538 
539  if (!input.IsValid())
540  {
541  return Fail("%s: Operation has invalid inputs", __func__);
542  }
543 
544  const Operand* output = GetOutputOperand(operation, 0, model);
545  if (!output)
546  {
547  return Fail("%s: Could not read output 0", __func__);
548  }
549 
550  const TensorInfo& inputInfo = input.GetTensorInfo();
551  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
552 
553  bool isSupported = false;
554  armnn::BackendId setBackend;
555  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
556  {
558  IsCastSupported,
559  data.m_Backends,
560  isSupported,
561  setBackend,
562  inputInfo,
563  outputInfo);
564  };
565 
566  if(!IsDynamicTensor(outputInfo))
567  {
568  validateFunc(outputInfo, isSupported);
569  }
570  else
571  {
572  isSupported = AreDynamicTensorsSupported();
573  }
574 
575  if (!isSupported)
576  {
577  return false;
578  }
579 
580  IConnectableLayer* layer = data.m_Network->AddCastLayer();
581  layer->SetBackendId(setBackend);
582  assert(layer != nullptr);
583  input.Connect(layer->GetInputSlot(0));
584 
585  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
586 }
587 
588 bool Converter::ConvertComparison(const Operation& operation,
589  const Model& model,
590  ConversionData& data,
591  ComparisonOperation comparisonOperation)
592 {
593  VLOG(DRIVER) << "Converter::ConvertComparison()";
594  VLOG(DRIVER) << "comparisonOperation = " << GetComparisonOperationAsCString(comparisonOperation);
595 
596  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
597  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
598 
599  if (!(input0.IsValid() && input1.IsValid()))
600  {
601  return Fail("%s: Operation has invalid inputs", __func__);
602  }
603 
604  const Operand* output = GetOutputOperand(operation, 0, model);
605  if (!output)
606  {
607  return Fail("%s: Could not read output 0", __func__);
608  }
609 
610  const TensorInfo& inputInfo0 = input0.GetTensorInfo();
611  const TensorInfo& inputInfo1 = input1.GetTensorInfo();
612  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
613 
614  ComparisonDescriptor descriptor(comparisonOperation);
615 
616  bool isSupported = false;
617  armnn::BackendId setBackend;
618  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
619  {
621  IsComparisonSupported,
622  data.m_Backends,
623  isSupported,
624  setBackend,
625  inputInfo0,
626  inputInfo1,
627  outputInfo,
628  descriptor);
629  };
630 
631  if(!IsDynamicTensor(outputInfo))
632  {
633  validateFunc(outputInfo, isSupported);
634  }
635  else
636  {
637  isSupported = AreDynamicTensorsSupported();
638  }
639 
640  if (!isSupported)
641  {
642  return false;
643  }
644 
645  IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
646  layer->SetBackendId(setBackend);
647  assert(layer != nullptr);
648 
649  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
650  if (!isReshapeSupported)
651  {
652  return false;
653  }
654 
655  if(IsDynamicTensor(outputInfo))
656  {
657  input0.Connect(layer->GetInputSlot(0));
658  input1.Connect(layer->GetInputSlot(1));
659  }
660 
661  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
662 }
663 
664 
665 bool Converter::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
666 {
667  VLOG(DRIVER) << "Converter::ConvertConcatenation()";
668 
669  // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
670  if (operation.inputs.size() <= 1)
671  {
672  return Fail("%s: Operation has insufficient arguments", __func__);
673  }
674 
675  // Get inputs and outputs
676  const std::size_t numInputTensors = operation.inputs.size() - 1;
677 
678  int32_t concatDim;
679  if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
680  {
681  return Fail("%s: Operation has invalid inputs", __func__);
682  }
683 
684  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
685  if (!outputOperand)
686  {
687  return Fail("%s: Operation has no outputs", __func__);
688  }
689 
690  armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
691  armnn::TensorShape outputShape = outputInfo.GetShape();
692  const bool isDynamicTensor = IsDynamicTensor(outputInfo);
693  //
694  // handle negative concat dims along the lines of tensorflow as described here:
695  // https://www.tensorflow.org/api_docs/python/tf/concat
696  // "negative axis refers to axis + rank(values)-th dimension"
697  //
698  if (concatDim < 0)
699  {
700  concatDim += outputShape.GetNumDimensions();
701  }
702 
703  if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
704  {
705  return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
706  }
707 
708  std::vector<LayerInputHandle> inputHandles;
709  std::vector<armnn::TensorShape> inputShapes;
710 
711  inputHandles.reserve(numInputTensors);
712  inputShapes.reserve(numInputTensors);
713 
714  bool inputsHaveBeenReshaped = false;
715  unsigned int tensorDimensionsAdded = 0;
716  for (uint32_t i = 0; i < numInputTensors; ++i)
717  {
718  const Operand* operand = GetInputOperand(operation, i, model);
719  if (!operand)
720  {
721  return Fail("%s: Operation has invalid inputs", __func__);
722  }
723 
724  LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
725  if (!operandInputHandle.IsValid())
726  {
727  return Fail("%s: Operation has invalid inputs", __func__);
728  }
729 
730  armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
731  if (operandShape.GetNumDimensions() == 0)
732  {
733  return Fail("%s: Operands with rank 0 are not supported", __func__);
734  }
735 
736  if (RequiresReshape(operandShape))
737  {
738  inputsHaveBeenReshaped = true;
739 
740  armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
741 
742  // Expand the tensor to three dimensions
743  if (operandShape.GetNumDimensions() == 2)
744  {
745  reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
746  tensorDimensionsAdded = 1;
747  }
748  else
749  {
750  reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
751  tensorDimensionsAdded = 2;
752  }
753 
754  armnn::ReshapeDescriptor reshapeDescriptor;
755  reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
756 
757  bool isSupported = false;
758  armnn::BackendId setBackendReshape;
761  data.m_Backends,
762  isSupported,
763  setBackendReshape,
764  operandInputHandle.GetTensorInfo(),
765  reshapeInfo,
766  reshapeDescriptor);
767 
768  if (!isSupported)
769  {
770  return false;
771  }
772  armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
773  newReshape.SetBackendId(setBackendReshape);
774 
775  // Point to the reshape operation rather then the input operation
776  operandShape = reshapeInfo.GetShape();
777  operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
778  }
779 
780  inputShapes.emplace_back(operandShape);
781  inputHandles.emplace_back(operandInputHandle);
782 
783  if (!inputHandles.back().IsValid())
784  {
785  return Fail("%s: Operation has invalid inputs", __func__);
786  }
787  }
788 
789  ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
790 
791  if (inputsHaveBeenReshaped)
792  {
793  // Adjust the concatenation dimension by the amount of dimensions added (if any)
794  concatDim += tensorDimensionsAdded;
795 
796  // Add extra dimensions to the output shape to reflect the addition of the reshape layers
797  if (tensorDimensionsAdded == 1)
798  {
799  if (IsDynamicTensor(outputInfo))
800  {
801  outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
802  }
803  else
804  {
805  outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
806  }
807  }
808  else if (tensorDimensionsAdded == 2)
809  {
810  if (IsDynamicTensor(outputInfo))
811  {
812  outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
813  }
814  else
815  {
816  outputShape = armnn::TensorShape({1, 1, outputShape[0]});
817  }
818  }
819  }
820 
821  // Check if permutations is required and get the pair of permutations required for the concatenation.
822  // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
823  std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
824  std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
825  bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
826  concatDim,
827  permutationPair);
828 
829  // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
830  if (!isDynamicTensor)
831  {
832  if (needPermute)
833  {
834  outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
835  }
836 
837  outputInfo.SetShape(outputShape);
838  }
839  // this is no-op for identity swizzles, otherwise it replaces both
840  // the handles and shapes with the swizzled layer output handles and shapes
841  if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
842  {
843  return false;
844  }
845 
846  // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
847  armnn::OriginsDescriptor concatDescriptor;
848 
849  try
850  {
851  // The concat descriptor is always created across the only supported concat dimension
852  // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
853  concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
854  inputShapes.end(),
855  concatDim);
856  } catch (std::exception& error)
857  {
858  return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
859  }
860 
861  // Validate the output shape is correct given the input shapes based on the
862  // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
863  if (!isDynamicTensor)
864  {
865  if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
866  {
867  return Fail("%s: Error validating the output shape for concat", __func__);
868  }
869  }
870 
871  std::vector<const armnn::TensorInfo*> inputTensorInfos;
872  std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
873  [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
874 
875  bool isSupported = false;
876  armnn::BackendId setBackendConcat;
877  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
880  data.m_Backends,
881  isSupported,
882  setBackendConcat,
883  inputTensorInfos,
884  outputInfo,
885  concatDescriptor);
886  };
887 
888  if (!isDynamicTensor)
889  {
890  validateFunc(outputInfo, isSupported);
891  }
892  else
893  {
894  isSupported = AreDynamicTensorsSupported();
895  }
896 
897  if (!isSupported)
898  {
899  return false;
900  }
901 
902  armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
903  layer->SetBackendId(setBackendConcat);
904  assert(layer != nullptr);
905  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
906  // Connect inputs to the layer
907  const int numInputSlots = layer->GetNumInputSlots();
908  assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
909  for (int i = 0; i < numInputSlots; ++i)
910  {
911  // connect the input directly to the merge (concat) layer
912  inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
913  }
914 
915  // Transpose the output shape
916  auto transposeOutputShape = [&](){
917  armnn::TransposeDescriptor transposeDesc;
918  transposeDesc.m_DimMappings = permutationPair.second;
919  armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
920  armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
921  permutationPair.second);
922  isSupported = false;
923  armnn::BackendId setBackendTranspose;
925  IsTransposeSupported,
926  data.m_Backends,
927  isSupported,
928  setBackendTranspose,
929  inputTransposeInfo,
930  outputTransposeInfo,
931  transposeDesc);
932  if (!isSupported)
933  {
934  return false;
935  }
936  // Add permutation layer and connect the output to it, the permutation becomes the output layer
937  armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
938  permutationPair.second);
939  deswizzleLayer.SetBackendId(setBackendTranspose);
940  layer = &deswizzleLayer;
941 
942  return true;
943  };
944 
945  if (needPermute && !isDynamicTensor)
946  {
947  transposeOutputShape();
948  }
949 
950  if (inputsHaveBeenReshaped)
951  {
952  if (isDynamicTensor)
953  {
954  // Infer the output shapes of concat if outputs are type 1 dynamic
956  if (!ValidateConcatOutputShape(inputShapes,
957  layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
958  concatDim))
959  {
960  return Fail("%s: Error validating the output shape for concat", __func__);
961  }
962  transposeOutputShape();
963  }
964 
965  armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
966  // Undo the reshape knowing the amount of dimensions added
967  if (tensorDimensionsAdded == 1)
968  {
969  afterConcatInfo.SetShape(
970  armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
971  }
972  else if (tensorDimensionsAdded == 2)
973  {
974  afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
975  }
976 
977  armnn::ReshapeDescriptor reshapeDescriptor;
978  reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
979  armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
980 
981  isSupported = false;
982  armnn::BackendId setBackendReshape2;
983  auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
986  data.m_Backends,
987  isSupported,
988  setBackendReshape2,
989  concatInfo,
990  afterConcatInfo,
991  reshapeDescriptor);
992  };
993 
994  if (!IsDynamicTensor(afterConcatInfo))
995  {
996  validateReshapeFunc(afterConcatInfo, isSupported);
997  }
998  else
999  {
1000  isSupported = AreDynamicTensorsSupported();
1001  }
1002 
1003  if (!isSupported)
1004  {
1005  return false;
1006  }
1007  layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
1008  layer->SetBackendId(setBackendReshape2);
1009  return SetupAndTrackLayerOutputSlot(operation,
1010  0,
1011  *layer,
1012  model,
1013  data,
1014  nullptr,
1015  validateReshapeFunc);
1016  }
1017 
1018  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1019 }
1020 
1021 bool Converter::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
1022 {
1023  VLOG(DRIVER) << "Converter::ConvertConv2d()";
1024 
1025  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1026  if (!input.IsValid())
1027  {
1028  return Fail("%s: Operation has invalid inputs", __func__);
1029  }
1030 
1031  const Operand* output = GetOutputOperand(operation, 0, model);
1032  if (!output)
1033  {
1034  return Fail("%s: Could not read output 0", __func__);
1035  }
1036 
1037  const TensorInfo& inputInfo = input.GetTensorInfo();
1038  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1039 
1041  desc.m_DataLayout = DataLayout::NHWC;
1042 
1043  // Determine whether padding is implicit or explicit
1044  bool implicitPadding = operation.inputs.size() == 7
1045  || (operation.inputs.size() >= 8
1046  && GetInputOperand(operation, 7, model)->type == OperandType::BOOL);
1047 
1048  if (implicitPadding)
1049  {
1050  desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
1051  }
1052  else if (operation.inputs.size() >= 10)
1053  {
1054  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
1055  }
1056 
1057  const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
1058 
1059  // ArmNN does not currently support non-fixed weights or bias
1060  // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
1061  // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
1062  // the DataLayout is NCHW
1063 
1064  if (!IsWeightsValid(operation, 1, model) && desc.m_DataLayout == DataLayout::NCHW)
1065  {
1066  return Fail("%s: Operation has unsupported weights OperandLifeTime", __func__);
1067  }
1068 
1069  LayerInputHandle weightsInput = (desc.m_DataLayout == DataLayout::NCHW)
1070  ? ConvertToLayerInputHandle(operation, 1, model, data, OHWIToOIHW, &input)
1071  : ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
1072 
1073  if (!weightsInput.IsValid())
1074  {
1075  return Fail("%s: Operation has invalid inputs", __func__);
1076  }
1077 
1078  LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
1079  if (!biasInput.IsValid())
1080  {
1081  return Fail("%s: Operation has invalid inputs", __func__);
1082  }
1083 
1084  biasInput.SanitizeQuantizationScale(weightsInput, input);
1085  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1086  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1087 
1088  ActivationFn activation;
1089  if (implicitPadding)
1090  {
1091  ::android::nn::PaddingScheme paddingScheme;
1092  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1093  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1094  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1095  || !GetInputActivationFunction(operation, 6, activation, model, data)
1096  || !GetOptionalConvolutionDilationParams(operation, 8, desc, model, data))
1097  {
1098  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1099  }
1100 
1101  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1102  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1103  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1104  const uint32_t kernelX = weightsInfo.GetShape()[widthIndex];
1105  const uint32_t kernelY = weightsInfo.GetShape()[heightIndex];
1106  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1107  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1108 
1109  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1110  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1111 
1112  }
1113  else if (operation.inputs.size() >= 10)
1114  {
1115  // explicit padding
1116  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1117  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1118  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1119  || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1120  || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1121  || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1122  || !GetInputActivationFunction(operation, 9, activation, model, data)
1123  || !GetOptionalConvolutionDilationParams(operation, 11, desc, model, data))
1124  {
1125  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1126  }
1127  }
1128  else
1129  {
1130  return Fail("%s: Unsupported number of operation inputs", __func__);
1131  }
1132 
1133  desc.m_BiasEnabled = true;
1134  Optional<TensorInfo> biases(biasInfo);
1135 
1136  bool requiresValidation = true;
1137  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1138  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1139  if (IsConnectedToDequantize(weightsInput.GetOutputSlot())
1140  || IsConnectedToDequantize(biasInput.GetOutputSlot()))
1141  {
1142  // Do not require validation for now. There will be an optimization step
1143  // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1144  // then at the end of the optimization there will be layer supported validation.
1145  requiresValidation = false;
1146  VLOG(DRIVER) << "Converter::ConvertConv2d(): Weights and Biases are as INPUTS.";
1147  }
1148 
1149  armnn::BackendId setBackend;
1150  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
1151  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1153  data.m_Backends,
1154  isSupported,
1155  setBackend,
1156  inputInfo,
1157  outputInfo,
1158  desc,
1159  weightsInfo,
1160  biases);
1161  };
1162 
1163  if (requiresValidation)
1164  {
1165  VLOG(DRIVER) << "Converter::ConvertConv2d(): Requires Validation!";
1166  bool isSupported = false;
1167  if (!IsDynamicTensor(outputInfo))
1168  {
1169  validateFunc(outputInfo, isSupported);
1170  }
1171  else
1172  {
1173  isSupported = AreDynamicTensorsSupported();
1174  }
1175 
1176  if (!isSupported)
1177  {
1178  return false;
1179  }
1180  }
1181 
1182  armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
1183  startLayer->SetBackendId(setBackend);
1184 
1185  if (!startLayer)
1186  {
1187  return Fail("%s: AddConvolution2dLayer failed", __func__);
1188  }
1189 
1190  input.Connect(startLayer->GetInputSlot(0));
1191  weightsInput.Connect(startLayer->GetInputSlot(1));
1192  biasInput.Connect(startLayer->GetInputSlot(2));
1193 
1194  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1195 }
1196 
1197 bool Converter::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
1198 {
1199  VLOG(DRIVER) << "Converter::ConvertDepthToSpace()";
1200 
1201  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1202  if (!input.IsValid() )
1203  {
1204  return Fail("%s: Operation has invalid inputs", __func__);
1205  }
1206 
1207  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1208  unsigned int rank = inputInfo.GetNumDimensions();
1209  if (rank != 4)
1210  {
1211  return Fail("%s: Only inputs with rank 4 are supported", __func__);
1212  }
1213 
1214  const Operand* output = GetOutputOperand(operation, 0, model);
1215  if (!output)
1216  {
1217  return Fail("%s: Could not read output 0", __func__);
1218  }
1219 
1220  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1221 
1222  armnn::DepthToSpaceDescriptor descriptor;
1223 
1224  GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_BlockSize, model, data);
1225  if (descriptor.m_BlockSize <= 1)
1226  {
1227  return Fail("%s: Block size must be at least 1 in all dimensions");
1228  }
1229 
1231  if (Is12OrLaterOperand(*output))
1232  {
1233  descriptor.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
1234  }
1235 
1236  bool isSupported = false;
1237  armnn::BackendId setBackend;
1238  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1239  {
1240  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1241  IsDepthToSpaceSupported,
1242  data.m_Backends,
1243  isSupported,
1244  setBackend,
1245  inputInfo,
1246  outputInfo,
1247  descriptor);
1248  };
1249 
1250  if(!IsDynamicTensor(outputInfo))
1251  {
1252  validateFunc(outputInfo, isSupported);
1253  }
1254  else
1255  {
1256  isSupported = AreDynamicTensorsSupported();
1257  }
1258 
1259  if (!isSupported)
1260  {
1261  return false;
1262  }
1263 
1264  armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
1265  layer->SetBackendId(setBackend);
1266  assert(layer != nullptr);
1267  input.Connect(layer->GetInputSlot(0));
1268 
1269  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1270 }
1271 
1272 bool Converter::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
1273 {
1274  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d()";
1275 
1276  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1277 
1278  if (!input.IsValid())
1279  {
1280  return Fail("%s: Operation has invalid inputs", __func__);
1281  }
1282 
1283  const Operand* output = GetOutputOperand(operation, 0, model);
1284 
1285  if (!output)
1286  {
1287  return Fail("%s: Could not read output 0", __func__);
1288  }
1289 
1290  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1291  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1292 
1293  // ArmNN does not currently support non-fixed weights or bias
1294  // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1295  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1296 
1297  if (!weightsOperand)
1298  {
1299  return Fail("%s: Could not read weights", __func__);
1300  }
1301  // Basic sanity check on the weights shape.
1302  // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
1303  // [1, filter_height, filter_width, depth_out]
1304  if (weightsOperand->dimensions[0] != 1)
1305  {
1306  return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
1307  }
1308 
1311 
1312  // Determine whether padding is implicit or explicit
1313  bool implicitPadding = operation.inputs.size() == 8
1314  || (operation.inputs.size() >= 9
1315  && GetInputOperand(operation, 8, model)->type == OperandType::BOOL);
1316 
1317  // Look ahead to find the optional DataLayout, if present
1318  const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
1319  desc.m_DataLayout = OptionalDataLayout(operation, dataLayoutFlagIndex, model, data);
1320 
1321  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1322  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1323  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1324 
1325  LayerInputHandle weightsInput = ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
1326  if (!weightsInput.IsValid())
1327  {
1328  return Fail("%s: Operation has invalid inputs", __func__);
1329  }
1330 
1331  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1332  if (!biasOperand)
1333  {
1334  return Fail("%s: Could not read bias", __func__);
1335  }
1336 
1337  LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
1338  if (!biasInput.IsValid())
1339  {
1340  return Fail("%s: Operation has invalid inputs", __func__);
1341  }
1342 
1343  biasInput.SanitizeQuantizationScale(weightsInput, input);
1344  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1345  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1346 
1347  ActivationFn activation;
1348  if (implicitPadding)
1349  {
1350  ::android::nn::PaddingScheme paddingScheme;
1351  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1352  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1353  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1354  || !GetInputActivationFunction(operation, 7, activation, model, data)
1355  || !GetOptionalConvolutionDilationParams(operation, 9, desc, model, data))
1356  {
1357  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1358  }
1359 
1360  const uint32_t kernelX = weightsInfo.GetShape()[2];
1361  const uint32_t kernelY = weightsInfo.GetShape()[1];
1362  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1363  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1364 
1365  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1366  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1367  }
1368  else if (operation.inputs.size() >= 11)
1369  {
1370  // explicit padding
1371  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1372  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1373  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1374  || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1375  || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1376  || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1377  || !GetInputActivationFunction(operation, 10, activation, model, data)
1378  || !GetOptionalConvolutionDilationParams(operation, 12, desc, model, data))
1379  {
1380  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1381  }
1382  }
1383  else
1384  {
1385  return Fail("%s: Unsupported number of operation inputs", __func__);
1386  }
1387 
1388  desc.m_BiasEnabled = true;
1389  Optional<TensorInfo> biases(biasInfo);
1390 
1391  bool requiresValidation = true;
1392  if (IsConnectedToDequantize(weightsInput.GetOutputSlot()) || IsConnectedToDequantize(biasInput.GetOutputSlot()))
1393  {
1394  // Do not require validation for now. There will be an optimization step
1395  // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1396  // then at the end of the optimization there will be layer supported validation.
1397  requiresValidation = false;
1398  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Weights and Biases are as INPUTS.";
1399  }
1400 
1401  armnn::BackendId setBackend;
1402  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
1403  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1405  data.m_Backends,
1406  isSupported,
1407  setBackend,
1408  inputInfo,
1409  outputInfo,
1410  desc,
1411  weightsInfo,
1412  biases);
1413  };
1414 
1415  if (requiresValidation)
1416  {
1417  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Requires Validation!";
1418  bool isSupported = false;
1419  if (!IsDynamicTensor(outputInfo))
1420  {
1421  validateFunc(outputInfo, isSupported);
1422  }
1423  else
1424  {
1425  isSupported = AreDynamicTensorsSupported();
1426  }
1427 
1428  if (!isSupported)
1429  {
1430  return false;
1431  }
1432  }
1433 
1434  armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
1435  startLayer->SetBackendId(setBackend);
1436 
1437  if (!startLayer)
1438  {
1439  return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1440  }
1441 
1442  input.Connect(startLayer->GetInputSlot(0));
1443 
1444  // Connect weights and bias inputs
1445  weightsInput.Connect(startLayer->GetInputSlot(1));
1446  biasInput.Connect(startLayer->GetInputSlot(2));
1447 
1448  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1449 }
1450 
1451 bool Converter::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
1452 {
1453  VLOG(DRIVER) << "Converter::ConvertDequantize()";
1454 
1455  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1456  if (!input.IsValid())
1457  {
1458  return Fail("%s: Operation has invalid input", __func__);
1459  }
1460 
1461  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1462  const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
1463  if (quantizationDim.has_value() && quantizationDim.value() != 0)
1464  {
1465  return Fail("%s: Operation has quantization dimension different than 0", __func__);
1466  }
1467 
1468  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1469  if (!outputOperand)
1470  {
1471  return Fail("%s: Operation has invalid outputs", __func__);
1472  }
1473 
1474  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1475 
1476  bool isSupported = false;
1477  armnn::BackendId setBackend;
1478  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1479  {
1480  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1482  data.m_Backends,
1483  isSupported,
1484  setBackend,
1485  inputInfo,
1486  outputInfo);
1487  };
1488 
1489  if(IsDynamicTensor(outputInfo))
1490  {
1491  isSupported = AreDynamicTensorsSupported();
1492  }
1493  else
1494  {
1495  validateFunc(outputInfo, isSupported);
1496  }
1497 
1498  if (!isSupported)
1499  {
1500  return false;
1501  }
1502 
1503  armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
1504  layer->SetBackendId(setBackend);
1505  assert(layer != nullptr);
1506  input.Connect(layer->GetInputSlot(0));
1507 
1508  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1509 }
1510 
1511 bool Converter::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
1512 {
1513  VLOG(DRIVER) << "Converter::ConvertDiv()";
1514 
1515  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1516  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1517 
1518  if (!input0.IsValid() || !input1.IsValid())
1519  {
1520  return Fail("%s: Operation has invalid inputs", __func__);
1521  }
1522 
1523  // The FuseActivation parameter is always the input index 2
1524  // and it should be optional
1525  ActivationFn activationFunction;
1526  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1527  {
1528  return Fail("%s: Operation has invalid inputs", __func__);
1529  }
1530 
1531  const Operand* output = GetOutputOperand(operation, 0, model);
1532  if (!output)
1533  {
1534  return Fail("%s: Could not read output 0", __func__);
1535  }
1536 
1537  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1538 
1539  bool isSupported = false;
1540  armnn::BackendId setBackend;
1541  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1542  {
1544  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1546  data.m_Backends,
1547  isSupported,
1548  setBackend,
1549  input0.GetTensorInfo(),
1550  input1.GetTensorInfo(),
1551  outputInfo);
1553  };
1554 
1555  if(!IsDynamicTensor(outputInfo))
1556  {
1557  validateFunc(outputInfo, isSupported);
1558  }
1559  else
1560  {
1561  isSupported = AreDynamicTensorsSupported();
1562  }
1563 
1564  if (!isSupported)
1565  {
1566  return false;
1567  }
1568 
1570  armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
1572  startLayer->SetBackendId(setBackend);
1573 
1574  bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1575  if (!isReshapeSupported)
1576  {
1577  return false;
1578  }
1579 
1580  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
1581  data, nullptr, validateFunc, activationFunction);
1582 }
1583 
1584 bool Converter::ConvertElementwiseUnary(const Operation& operation,
1585  const Model& model,
1586  ConversionData& data,
1587  UnaryOperation unaryOperation)
1588 {
1589  VLOG(DRIVER) << "Converter::ConvertElementwiseUnary()";
1590  VLOG(DRIVER) << "unaryOperation = " << GetUnaryOperationAsCString(unaryOperation);
1591 
1592  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1593 
1594  if (!input.IsValid())
1595  {
1596  return Fail("%s: Operation has invalid input", __func__);
1597  }
1598 
1599  const Operand* output = GetOutputOperand(operation, 0, model);
1600  if (!output)
1601  {
1602  return Fail("%s: Could not read output 0", __func__);
1603  }
1604 
1605  const TensorInfo& inputInfo = input.GetTensorInfo();
1606  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1607 
1608  ElementwiseUnaryDescriptor descriptor(unaryOperation);
1609 
1610  bool isSupported = false;
1611  armnn::BackendId setBackend;
1612  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1613  {
1614  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1615  IsElementwiseUnarySupported,
1616  data.m_Backends,
1617  isSupported,
1618  setBackend,
1619  inputInfo,
1620  outputInfo,
1621  descriptor);
1622  };
1623 
1624  if(!IsDynamicTensor(outputInfo))
1625  {
1626  validateFunc(outputInfo, isSupported);
1627  }
1628  else
1629  {
1630  isSupported = AreDynamicTensorsSupported();
1631  }
1632 
1633  if (!isSupported)
1634  {
1635  return false;
1636  }
1637 
1638  IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
1639  layer->SetBackendId(setBackend);
1640  assert(layer != nullptr);
1641  input.Connect(layer->GetInputSlot(0));
1642 
1643  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1644 }
1645 
1646 bool Converter::ConvertElu(const Operation& operation, const Model& model, ConversionData& data)
1647 {
1648  VLOG(DRIVER) << "Converter::ConvertElu()";
1649 
1650  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1651  if (!input0.IsValid())
1652  {
1653  return Fail("%s: Operation has invalid inputs", __func__);
1654  }
1655 
1656  // Determine data type of input tensor
1657  OperandType inputType;
1658  if (!GetOperandType(operation, 0, model, inputType))
1659  {
1660  return Fail("%s: Operation has invalid inputs", __func__);
1661  }
1662 
1663  ActivationDescriptor desc;
1664  desc.m_Function = ActivationFunction::Elu;
1665 
1666  // Read alpha
1667  if (inputType == OperandType::TENSOR_FLOAT16)
1668  {
1669  Half alpha;
1670 
1671  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, alpha, model, data))
1672  {
1673  return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
1674  }
1675 
1676  desc.m_A = static_cast<float>(alpha);
1677  }
1678  else if (inputType == OperandType::TENSOR_FLOAT32)
1679  {
1680  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_A, model, data))
1681  {
1682  return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
1683  }
1684  }
1685  else
1686  {
1687  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
1688  }
1689 
1690  return ::ConvertToActivation(operation, __func__, desc, model, data);
1691 }
1692 
1693 bool Converter::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
1694 {
1695  VLOG(DRIVER) << "Converter::ConvertExpandDims()";
1696 
1697  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1698 
1699  if (!input.IsValid())
1700  {
1701  return Fail("%s: Operation has invalid input", __func__);
1702  }
1703 
1704  const Operand* output = GetOutputOperand(operation, 0, model);
1705  if (!output)
1706  {
1707  return Fail("%s: Operation has invalid output", __func__);
1708  }
1709 
1710  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1711 
1712  int32_t axis;
1713  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
1714  {
1715  return Fail("%s: failed to get axis input value", __func__);
1716  }
1717 
1718  TensorShape targetShape;
1719 
1720  try
1721  {
1722  targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
1723  }
1724  catch (const std::exception& e)
1725  {
1726  return Fail("%s: %s", __func__, e.what());
1727  }
1728 
1729  ReshapeDescriptor reshapeDescriptor;
1730  reshapeDescriptor.m_TargetShape = targetShape;
1731 
1732  bool isSupported = false;
1733  armnn::BackendId setBackend;
1734  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1735  {
1736  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1738  data.m_Backends,
1739  isSupported,
1740  setBackend,
1741  input.GetTensorInfo(),
1742  outputInfo,
1743  reshapeDescriptor);
1744  };
1745 
1746  if(!IsDynamicTensor(outputInfo))
1747  {
1748  if (targetShape != outputInfo.GetShape())
1749  {
1750  return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
1751  }
1752  validateFunc(outputInfo, isSupported);
1753  }
1754  else
1755  {
1756  isSupported = AreDynamicTensorsSupported();
1757  }
1758 
1759  if (!isSupported)
1760  {
1761  return false;
1762  }
1763 
1764  IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1765  layer->SetBackendId(setBackend);
1766  assert(layer != nullptr);
1767  input.Connect(layer->GetInputSlot(0));
1768 
1769  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1770 }
1771 
1772 bool Converter::ConvertFill(const Operation& operation, const Model& model, ConversionData& data)
1773 {
1774  VLOG(DRIVER) << "Converter::ConvertFill()";
1775  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1776  if (!input.IsValid())
1777  {
1778  return Fail("%s: Operation has invalid inputs", __func__);
1779  }
1780 
1781  const Operand* output = GetOutputOperand(operation, 0, model);
1782  if (!output)
1783  {
1784  return Fail("%s: Could not read output", __func__);
1785  }
1786 
1787  const TensorInfo& inputInfo = input.GetTensorInfo();
1788  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1789  if (IsDynamicTensor(outputInfo))
1790  {
1791  return Fail("%s: Dynamic output tensors are not supported", __func__);
1792  }
1793 
1794  // Determine data type of output tensor
1795  OperandType outputType = output->type;
1796  FillDescriptor descriptor;
1797  // Read the scalar fill value
1798  if (outputType == OperandType::TENSOR_FLOAT16)
1799  {
1800  Half value;
1801 
1802  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
1803  {
1804  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1805  }
1806 
1807  descriptor.m_Value = static_cast<float>(value);
1808  }
1809  else if (outputType == OperandType::TENSOR_FLOAT32)
1810  {
1811  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Value, model, data))
1812  {
1813  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1814  }
1815  }
1816  else if (outputType == OperandType::TENSOR_INT32)
1817  {
1818  int32_t value;
1819 
1820  if (!GetInputScalar(operation, 1, OperandType::INT32, value, model, data))
1821  {
1822  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1823  }
1824 
1825  descriptor.m_Value = static_cast<float>(value);
1826  }
1827  else
1828  {
1829  return Fail("%s: Unsupported input tensor type: %d", __func__, outputType);
1830  }
1831 
1832  bool isSupported = false;
1833  armnn::BackendId setBackend;
1834  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1835  IsFillSupported,
1836  data.m_Backends,
1837  isSupported,
1838  setBackend,
1839  inputInfo,
1840  outputInfo,
1841  descriptor);
1842  if (!isSupported)
1843  {
1844  return false;
1845  }
1846 
1847  IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
1848  layer->SetBackendId(setBackend);
1849  assert(layer != nullptr);
1850  input.Connect(layer->GetInputSlot(0));
1851 
1852  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1853 }
1854 
1855 bool Converter::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
1856 {
1857  VLOG(DRIVER) << "Converter::ConvertFloor()";
1858  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1859  if (!input.IsValid())
1860  {
1861  return Fail("%s: Operation has invalid inputs", __func__);
1862  }
1863 
1864  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1865  if (!outputOperand)
1866  {
1867  return Fail("%s: Operation has invalid outputs", __func__);
1868  }
1869 
1870  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1871 
1872  bool isSupported = false;
1873  armnn::BackendId setBackend;
1874  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1875  {
1876  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1878  data.m_Backends,
1879  isSupported,
1880  setBackend,
1881  input.GetTensorInfo(),
1882  outputInfo);
1883  };
1884 
1885  if(!IsDynamicTensor(outputInfo))
1886  {
1887  validateFunc(outputInfo, isSupported);
1888  }
1889  else
1890  {
1891  isSupported = AreDynamicTensorsSupported();
1892  }
1893 
1894  if (!isSupported)
1895  {
1896  return false;
1897  }
1898 
1899  armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
1900  layer->SetBackendId(setBackend);
1901  assert(layer != nullptr);
1902  input.Connect(layer->GetInputSlot(0));
1903 
1904  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1905 }
1906 
1907 bool Converter::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
1908 {
1909  VLOG(DRIVER) << "Converter::ConvertFullyConnected()";
1910  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1911  if (!input.IsValid())
1912  {
1913  return Fail("%s: Operation has invalid inputs", __func__);
1914  }
1915 
1916  const Operand* output = GetOutputOperand(operation, 0, model);
1917  if (!output)
1918  {
1919  return Fail("%s: Could not read output 0", __func__);
1920  }
1921 
1922  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1923  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1924 
1925  LayerInputHandle weightsInput = LayerInputHandle();
1926  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1927  if (!weightsOperand)
1928  {
1929  return Fail("%s: Could not read weights", __func__);
1930  }
1931 
1932  // If weights are constant a separate constant layer will be created to store data.
1933  // Otherwise handle non const weights as inputs.
1934  weightsInput = ConvertToLayerInputHandle(operation, 1, model, data);
1935  if (!weightsInput.IsValid())
1936  {
1937  return Fail("%s: Operation has invalid inputs", __func__);
1938  }
1939 
1940  LayerInputHandle biasInput = LayerInputHandle();
1941  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1942  if (!biasOperand)
1943  {
1944  return Fail("%s: Could not read bias", __func__);
1945  }
1946 
1947  // If bias are constant a separate constant layer will be created to store data.
1948  // Otherwise handle non const bias as inputs.
1949  biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D
1950  if (!biasInput.IsValid())
1951  {
1952  return Fail("%s: Operation has invalid inputs", __func__);
1953  }
1954 
1955  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1956  armnn::TensorInfo reshapedInfo = inputInfo;
1957  try
1958  {
1959  reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
1960  }
1961  catch (const std::exception& e)
1962  {
1963  return Fail("%s: %s", __func__, e.what());
1964  }
1965 
1966  // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
1967  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1968  SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
1969 
1970  ActivationFn activationFunction;
1971  if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
1972  {
1973  return Fail("%s: Operation has invalid inputs", __func__);
1974  }
1975 
1977  desc.m_TransposeWeightMatrix = true;
1978  desc.m_BiasEnabled = true;
1979  desc.m_ConstantWeights = IsOperandConstant(*weightsOperand);
1980 
1981  bool isSupported = false;
1982  armnn::BackendId setBackend;
1983  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1984  {
1985  if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
1986  weightsInfo.GetShape(),
1987  outputInfo.GetShape(),
1989  {
1990  isSupported = false;
1991  Fail("%s: Expected outputShape does not match actual outputShape", __func__);
1992  return;
1993  }
1994 
1995  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1997  data.m_Backends,
1998  isSupported,
1999  setBackend,
2000  reshapedInfo,
2001  outputInfo,
2002  weightsInfo,
2003  biasInfo,
2004  desc);
2005  };
2006 
2007  if(!IsDynamicTensor(outputInfo))
2008  {
2009  validateFunc(outputInfo, isSupported);
2010  }
2011  else
2012  {
2013  isSupported = AreDynamicTensorsSupported();
2014  }
2015 
2016  if (!isSupported)
2017  {
2018  return false;
2019  }
2020 
2021  // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
2022  armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
2023  startLayer->SetBackendId(setBackend);
2024 
2025  if (inputInfo.GetNumDimensions() > 2U)
2026  {
2027  armnn::ReshapeDescriptor reshapeDescriptor;
2028  reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2029 
2030  armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2031  assert(reshapeLayer != nullptr);
2032  input.Connect(reshapeLayer->GetInputSlot(0));
2033  reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2034  reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2035  }
2036  else
2037  {
2038  input.Connect(startLayer->GetInputSlot(0));
2039  }
2040 
2041  // Connect weights and bias inputs
2042  weightsInput.Connect(startLayer->GetInputSlot(1));
2043  biasInput.Connect(startLayer->GetInputSlot(2));
2044 
2045  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
2046  data, nullptr, validateFunc, activationFunction);
2047 }
2048 
2049 bool Converter::ConvertGather(const Operation& operation, const Model& model, ConversionData& data)
2050 {
2051  VLOG(DRIVER) << "Converter::ConvertGather()";
2052 
2053  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2054  if (!input.IsValid())
2055  {
2056  return Fail("%s: Operation has invalid input", __func__);
2057  }
2058  auto inputDimensions = input.GetTensorInfo().GetNumDimensions();
2059 
2060  LayerInputHandle indices = ConvertToLayerInputHandle(operation, 2, model, data);
2061  if (!indices.IsValid())
2062  {
2063  return Fail("%s: Operation has invalid indices", __func__);
2064  }
2065  auto indicesDimensions = indices.GetTensorInfo().GetNumDimensions();
2066 
2067  const Operand* output = GetOutputOperand(operation, 0, model);
2068  if (!output)
2069  {
2070  return Fail("%s: Operation has invalid output", __func__);
2071  }
2072  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2073  auto outputDimensions = outputInfo.GetNumDimensions();
2074  if (outputDimensions != inputDimensions + indicesDimensions - 1)
2075  {
2076  return Fail("%s: Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
2077  __func__, outputDimensions, inputDimensions, indicesDimensions);
2078  }
2079 
2080  int32_t axis;
2081  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
2082  {
2083  return Fail("%s: Operation has invalid or unsupported axis operand", __func__);
2084  }
2085  if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2086  {
2087  return Fail("%s: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", __func__, axis,
2088  inputDimensions, inputDimensions);
2089  }
2090 
2091  GatherDescriptor desc;
2092  desc.m_Axis = axis;
2093 
2094  bool isSupported = false;
2095  armnn::BackendId setBackend;
2096  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2097  {
2098  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2099  IsGatherSupported,
2100  data.m_Backends,
2101  isSupported,
2102  setBackend,
2103  input.GetTensorInfo(),
2104  indices.GetTensorInfo(),
2105  outputInfo,
2106  desc);
2107  };
2108 
2109  if(!IsDynamicTensor(outputInfo))
2110  {
2111  validateFunc(outputInfo, isSupported);
2112  }
2113  else
2114  {
2115  isSupported = AreDynamicTensorsSupported();
2116  }
2117 
2118  if (!isSupported)
2119  {
2120  return false;
2121  }
2122 
2123  IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
2124  layer->SetBackendId(setBackend);
2125  assert(layer != nullptr);
2126  input.Connect(layer->GetInputSlot(0));
2127  indices.Connect(layer->GetInputSlot(1));
2128 
2129  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2130 }
2131 
2132 bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data)
2133 {
2134  VLOG(DRIVER) << "Converter::ConvertGroupedConv2d()";
2135  //
2136  // Parse data
2137  //
2138  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2139  if (!input.IsValid())
2140  {
2141  return Fail("%s: Operation has invalid inputs", __func__);
2142  }
2143  const TensorInfo& inputInfo = input.GetTensorInfo();
2144 
2145  const Operand* output = GetOutputOperand(operation, 0, model);
2146  if (!output)
2147  {
2148  return Fail("%s: Could not read output 0", __func__);
2149  }
2150  TensorInfo outputInfo = GetTensorInfoForOperand(*output);
2151 
2152  // Look ahead to determine data layout
2153  DataLayout dataLayout = DataLayout::NHWC;
2154  if (operation.inputs.size() == 12)
2155  {
2156  dataLayout = OptionalDataLayout(operation, 11, model, data);
2157  }
2158  else
2159  {
2160  dataLayout = OptionalDataLayout(operation, 8, model, data);
2161  }
2162 
2163  // NOTE:
2164  // NNAPI weights are always OHWI, i.e. [depth_out, filter_height, filter_width, depth_group],
2165  // but Arm NN expects the filter's height and width indices to match the input's height and
2166  // width indices so when the DataLayout is NCHW, we need to permute the weights to OIHW
2167  const PermutationVector ohwiToOihw = { 0u, 2u, 3u, 1u };
2168  const ConstTensorPin weightsPin = (dataLayout == DataLayout::NCHW) ?
2170  model, data, ohwiToOihw) :
2171  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
2172  const ConstTensorPin biasesPin =
2173  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
2174  if (!weightsPin.IsValid() || !biasesPin.IsValid())
2175  {
2176  return Fail("%s: Operation has invalid inputs", __func__);
2177  }
2178 
2179  ConstTensor weights = weightsPin.GetConstTensor();
2180  ConstTensor biases = biasesPin.GetConstTensor();
2181  SanitizeBiasQuantizationScale(biases.GetInfo(), weights.GetInfo(), inputInfo);
2182 
2183  const TensorShape& inputShape = inputInfo.GetShape();
2184  const TensorShape& outputShape = outputInfo.GetShape();
2185  const TensorShape& weightsShape = weights.GetShape();
2186  const TensorShape& biasesShape = biases.GetShape();
2187 
2188  armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
2189  const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
2190  const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
2191  const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
2192 
2194  desc.m_DataLayout = dataLayout;
2195  desc.m_BiasEnabled = true;
2196 
2197  int numGroups;
2198  ActivationFn activation;
2199 
2200  if (operation.inputs.size() == 12)
2201  {
2202  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
2203  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
2204  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
2205  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
2206  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
2207  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
2208  !GetInputScalar(operation, 9, OperandType::INT32, numGroups, model, data) ||
2209  !GetInputActivationFunction(operation, 10, activation, model, data))
2210  {
2211  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
2212  }
2213 
2214  }
2215  else if (operation.inputs.size() == 9)
2216  {
2217  ::android::nn::PaddingScheme paddingScheme;
2218  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
2219  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
2220  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
2221  !GetInputScalar(operation, 6, OperandType::INT32, numGroups, model, data) ||
2222  !GetInputActivationFunction(operation, 7, activation, model, data))
2223  {
2224  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
2225  }
2226 
2227  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
2228  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
2229 
2230  const uint32_t kernelX = weightsShape[widthIndex];
2231  const uint32_t kernelY = weightsShape[heightIndex];
2232 
2233  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2234  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2235  }
2236  else
2237  {
2238  return Fail("%s: Unsupported number of operation inputs", __func__);
2239  }
2240 
2241  // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2242  const unsigned int outputChannels = weightsShape[0];
2243 
2244  const unsigned int channelsPerGroup = weightsShape[channelsIndex];
2245  const unsigned int channelMultiplier = outputChannels / numGroups;
2246 
2247  //
2248  // Validate all relevant inputs
2249  //
2250  if (numGroups <= 0)
2251  {
2252  return Fail("%s: Number of groups must be greater than 0. Got: %d", __func__, numGroups);
2253  }
2254 
2255  if (outputChannels % numGroups != 0u)
2256  {
2257  return Fail("%s: Output channels must be divisible by the number of groups", __func__);
2258  }
2259 
2260  //
2261  // Set up Splitter layer
2262  //
2263  unsigned int splitterDimSizes[4] = { inputShape[0], inputShape[1], inputShape[2], inputShape[3] };
2264  splitterDimSizes[channelsIndex] /= numGroups; // split in depth
2265 
2266  TensorInfo splitterOutputInfo(4,
2267  splitterDimSizes,
2268  inputInfo.GetDataType(),
2269  inputInfo.GetQuantizationScale(),
2270  inputInfo.GetQuantizationOffset());
2271 
2272  std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(numGroups, std::ref(splitterOutputInfo));
2273 
2274  ViewsDescriptor splitterDesc(numGroups);
2275  for (unsigned int group = 0u; group < numGroups; ++group)
2276  {
2277  splitterDesc.SetViewOriginCoord(group, channelsIndex, splitterDimSizes[channelsIndex] * group);
2278  for (unsigned int dimIdx = 0u; dimIdx < 4u; dimIdx++)
2279  {
2280  splitterDesc.SetViewSize(group, dimIdx, splitterDimSizes[dimIdx]);
2281  }
2282  }
2283 
2284  bool isSupported = false;
2285  armnn::BackendId setBackendSplit;
2286  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2288  data.m_Backends,
2289  isSupported,
2290  setBackendSplit,
2291  inputInfo,
2292  splitterOutputInfos,
2293  splitterDesc);
2294  if (!isSupported)
2295  {
2296  return false;
2297  }
2298 
2299  IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
2300  splitterLayer->SetBackendId(setBackendSplit);
2301  if (!splitterLayer)
2302  {
2303  return Fail("%s: Failed to add SplitterLayer", __func__);
2304  }
2305 
2306  input.Connect(splitterLayer->GetInputSlot(0));
2307  for (unsigned int group = 0u; group < splitterLayer->GetNumOutputSlots(); ++group)
2308  {
2309  splitterLayer->GetOutputSlot(group).SetTensorInfo(splitterOutputInfo);
2310  }
2311 
2312  //
2313  // Set up Convolution2d layers for each group
2314  //
2315 
2316  // Set up group tensor shapes
2317  TensorShape groupInputShape(inputShape);
2318  groupInputShape[channelsIndex] = channelsPerGroup;
2319 
2320  TensorShape groupWeightsShape(weightsShape);
2321  groupWeightsShape[0] /= channelMultiplier * numGroups;
2322 
2323  TensorShape groupBiasesShape({ 1 });
2324 
2325  // Set up group tensor infos
2326  TensorInfo groupInputInfo(inputInfo);
2327  groupInputInfo.SetShape(groupInputShape);
2328 
2329  const TensorInfo& weightsInfo = weights.GetInfo();
2330  TensorInfo groupWeightsInfo(weightsInfo);
2331  groupWeightsInfo.SetShape(groupWeightsShape);
2332 
2333  const TensorInfo& biasesInfo = biases.GetInfo();
2334  TensorInfo groupBiasesInfo(biasesInfo);
2335  groupBiasesInfo.SetShape(groupBiasesShape);
2336 
2337  TensorInfo groupOutputInfo(outputInfo);
2338 
2339  TensorShape groupOutputShape(outputShape);
2340  const bool isDynamic = IsDynamicTensor(outputInfo);
2341  if (!isDynamic)
2342  {
2343  groupOutputShape[channelsIndex] = 1;
2344  }
2345  groupOutputInfo.SetShape(groupOutputShape);
2346 
2347  const unsigned int weightsDataTypeSize = GetDataTypeSize(groupWeightsInfo.GetDataType());
2348  const unsigned int biasesDataTypeSize = GetDataTypeSize(groupBiasesInfo.GetDataType());
2349 
2350  std::vector<IConnectableLayer*> convLayers(numGroups * channelMultiplier, nullptr);
2351  for (unsigned int group = 0u; group < numGroups; ++group)
2352  {
2353  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2354  {
2355  auto index = group * channelMultiplier + m;
2356 
2357  const unsigned int weightsDataOffset = groupWeightsShape.GetNumElements() * index * weightsDataTypeSize;
2358  const unsigned int biasesDataOffset = groupBiasesShape.GetNumElements() * index * biasesDataTypeSize;
2359 
2360  if (weightsInfo.HasPerAxisQuantization())
2361  {
2362  // Extract per-axis quantization scales for group weights
2363  const std::vector<float>& weightsQuantScales = weightsInfo.GetQuantizationScales();
2364  groupWeightsInfo.SetQuantizationScales(
2365  std::vector<float>(weightsQuantScales.begin() + index,
2366  weightsQuantScales.begin() + index + groupWeightsShape[0]));
2367 
2368  // Extract per-axis quantization scales for group biases
2369  const std::vector<float>& biasesQuantScales = biasesInfo.GetQuantizationScales();
2370  groupBiasesInfo.SetQuantizationScales(
2371  std::vector<float>(biasesQuantScales.begin() + index,
2372  biasesQuantScales.begin() + index + groupWeightsShape[0]));
2373  }
2374 
2375  // Extract weights and biases data for current group convolution
2376  ConstTensor groupWeights(groupWeightsInfo,
2377  static_cast<const void *>(reinterpret_cast<const char *>(weights.GetMemoryArea()) +
2378  weightsDataOffset));
2379  ConstTensor groupBiases(groupBiasesInfo,
2380  static_cast<const void *>(reinterpret_cast<const char *>(biases.GetMemoryArea()) +
2381  biasesDataOffset));
2382 
2383  isSupported = false;
2384  armnn::BackendId setBackendConv;
2385  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2386  {
2387  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2389  data.m_Backends,
2390  isSupported,
2391  setBackendConv,
2392  groupInputInfo,
2393  outputInfo,
2394  desc,
2395  groupWeightsInfo,
2396  Optional<TensorInfo>(groupBiasesInfo));
2397  };
2398 
2399  if(!isDynamic)
2400  {
2401  validateFunc(groupOutputInfo, isSupported);
2402  }
2403  else
2404  {
2405  isSupported = AreDynamicTensorsSupported();
2406  }
2407 
2408  if (!isSupported)
2409  {
2410  return false;
2411  }
2412 
2413  IConnectableLayer* weightsLayer = data.m_Network->AddConstantLayer(groupWeights);
2414  IConnectableLayer* biasLayer = data.m_Network->AddConstantLayer(groupBiases);
2415  IConnectableLayer* convLayer = data.m_Network->AddConvolution2dLayer(desc);
2416 
2417  convLayer->SetBackendId(setBackendConv);
2418 
2419  if (!convLayer)
2420  {
2421  return Fail("%s: AddConvolution2dLayer failed", __func__);
2422  }
2423 
2424  splitterLayer->GetOutputSlot(group).Connect(convLayer->GetInputSlot(0));
2425  weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
2426  biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
2427 
2428  weightsLayer->GetOutputSlot(0).SetTensorInfo(groupWeightsInfo);
2429  biasLayer->GetOutputSlot(0).SetTensorInfo(groupBiasesInfo);
2430  convLayer->GetOutputSlot(0).SetTensorInfo(groupOutputInfo);
2431 
2432  if(isDynamic)
2433  {
2434  convLayer->GetOutputSlot(0).IsTensorInfoSet();
2435 
2436  validateFunc(convLayer->GetOutputSlot(0).GetTensorInfo(), isSupported);
2437 
2438  outputInfo = convLayer->GetOutputSlot(0).GetTensorInfo();
2439 
2440  if (!isSupported)
2441  {
2442  return false;
2443  }
2444  }
2445 
2446  convLayers[index] = convLayer;
2447  }
2448  }
2449 
2450  //
2451  // Set up Concat layer
2452  //
2453  ConcatDescriptor concatDescriptor;
2454  // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2455  concatDescriptor = ConcatDescriptor(weightsShape[0]);
2456  for (unsigned int group = 0u; group < numGroups; ++group)
2457  {
2458  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2459  {
2460  auto index = group * channelMultiplier + m;
2461  concatDescriptor.SetViewOriginCoord(index, channelsIndex, index);
2462  concatDescriptor.SetConcatAxis(channelsIndex);
2463  }
2464  }
2465 
2466  isSupported = false;
2467  armnn::BackendId setBackendConcat;
2468  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2470  data.m_Backends,
2471  isSupported,
2472  setBackendConcat,
2473  std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
2474  outputInfo,
2475  concatDescriptor);
2476 
2477  if (!isSupported)
2478  {
2479  return false;
2480  }
2481 
2482  IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
2483  concatLayer->SetBackendId(setBackendConcat);
2484  if (!concatLayer)
2485  {
2486  return Fail("%s: AddConcatLayer failed", __func__);
2487  }
2488 
2489  for (unsigned int group = 0u; group < numGroups; ++group)
2490  {
2491  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2492  {
2493  auto index = group * channelMultiplier + m;
2494  convLayers[index]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(index));
2495  }
2496  }
2497  concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2498 
2499  return SetupAndTrackLayerOutputSlot(operation, 0, *concatLayer, model,
2500  data, nullptr, nullptr, activation);
2501 }
2502 
2503 bool Converter::ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data)
2504 {
2505  VLOG(DRIVER) << "Converter::ConvertHardSwish()";
2506  ActivationDescriptor desc;
2507  desc.m_Function = ActivationFunction::HardSwish;
2508 
2509  return ::ConvertToActivation(operation, __func__, desc, model, data);
2510 }
2511 
2512 bool Converter::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
2513 {
2514  VLOG(DRIVER) << "Converter::ConvertInstanceNormalization()";
2515 
2516  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2517  if (!input.IsValid())
2518  {
2519  return Fail("%s: Operation has an invalid input 0", __func__);
2520  }
2521 
2522  const Operand* output = GetOutputOperand(operation, 0, model);
2523  if (!output)
2524  {
2525  return Fail("%s: Operation has an invalid output", __func__);
2526  }
2527 
2528  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2529 
2530  // Determine data type of input tensor
2531  OperandType inputType;
2532  if (!GetOperandType(operation, 0, model, inputType))
2533  {
2534  return Fail("%s: Operation has invalid inputs", __func__);
2535  }
2536 
2538 
2539  // Read gamma, beta & epsilon
2540  if (inputType == OperandType::TENSOR_FLOAT16)
2541  {
2542  Half fp16Gamma;
2543  Half fp16Beta;
2544  Half fp16Epsilon;
2545 
2546  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Gamma, model, data) ||
2547  !GetInputScalar(operation, 2, OperandType::FLOAT16, fp16Beta, model, data) ||
2548  !GetInputScalar(operation, 3, OperandType::FLOAT16, fp16Epsilon, model, data))
2549  {
2550  return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
2551  }
2552 
2553  desc.m_Gamma = static_cast<float>(fp16Gamma);
2554  desc.m_Beta = static_cast<float>(fp16Beta);
2555  desc.m_Eps = static_cast<float>(fp16Epsilon);
2556  }
2557  else if (inputType == OperandType::TENSOR_FLOAT32)
2558  {
2559  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_Gamma, model, data) ||
2560  !GetInputScalar(operation, 2, OperandType::FLOAT32, desc.m_Beta, model, data) ||
2561  !GetInputScalar(operation, 3, OperandType::FLOAT32, desc.m_Eps, model, data))
2562  {
2563  return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
2564  }
2565  }
2566  else
2567  {
2568  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2569  }
2570 
2571  desc.m_DataLayout = OptionalDataLayout(operation, 4, model, data);
2572 
2573  bool isSupported = false;
2574  armnn::BackendId setBackend;
2575  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2576  {
2577  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2578  IsInstanceNormalizationSupported,
2579  data.m_Backends,
2580  isSupported,
2581  setBackend,
2582  input.GetTensorInfo(),
2583  outputInfo,
2584  desc);
2585  };
2586 
2587  if(IsDynamicTensor(outputInfo))
2588  {
2589  isSupported = AreDynamicTensorsSupported();
2590  }
2591  else
2592  {
2593  validateFunc(outputInfo, isSupported);
2594  }
2595 
2596  if (!isSupported)
2597  {
2598  return false;
2599  }
2600 
2601  IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
2602  layer->SetBackendId(setBackend);
2603  input.Connect(layer->GetInputSlot(0));
2604 
2605  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2606 }
2607 
2608 bool Converter::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2609 {
2610  VLOG(DRIVER) << "Converter::ConvertL2Normalization()";
2611 
2612  if (operation.inputs.size() != 1)
2613  {
2614  return Fail("%s: Optional inputs are not supported", __func__);
2615  }
2616 
2617  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2618  if (!input.IsValid())
2619  {
2620  return Fail("%s: Operation has invalid inputs", __func__);
2621  }
2622 
2623  const Operand* output = GetOutputOperand(operation, 0, model);
2624  if (!output)
2625  {
2626  return Fail("%s: Could not read output 0", __func__);
2627  }
2628 
2629  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2630  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2631 
2632  if (outputInfo.GetNumDimensions() != 4u)
2633  {
2634  return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2635  }
2636 
2639 
2640  bool isSupported = false;
2641  armnn::BackendId setBackend;
2642  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2643  {
2644  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2646  data.m_Backends,
2647  isSupported,
2648  setBackend,
2649  inputInfo,
2650  outputInfo,
2651  desc);
2652  };
2653 
2654  if(!IsDynamicTensor(outputInfo))
2655  {
2656  validateFunc(outputInfo, isSupported);
2657  }
2658  else
2659  {
2660  isSupported = AreDynamicTensorsSupported();
2661  }
2662 
2663  if (!isSupported)
2664  {
2665  return false;
2666  }
2667 
2668  armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2669  layer->SetBackendId(setBackend);
2670  assert(layer != nullptr);
2671  input.Connect(layer->GetInputSlot(0));
2672 
2673  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2674 }
2675 
2676 bool Converter::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
2677 {
2678  VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
2679  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::L2, model, data);
2680 }
2681 
2682 bool Converter::ConvertLocalResponseNormalization(const Operation& operation,
2683  const Model& model,
2684  ConversionData& data)
2685 {
2686  VLOG(DRIVER) << "Converter::ConvertLocalResponseNormalization()";
2687 
2688  if (operation.inputs.size() != 5)
2689  {
2690  return Fail("%s: Optional inputs are not supported", __func__);
2691  }
2692 
2693  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2694  if (!input.IsValid())
2695  {
2696  return Fail("%s: Operation has invalid inputs", __func__);
2697  }
2698 
2699  const Operand* output = GetOutputOperand(operation, 0, model);
2700  if (!output)
2701  {
2702  return Fail("%s: Could not read output 0", __func__);
2703  }
2704 
2705  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2706  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2707 
2708  if (outputInfo.GetNumDimensions() != 4u)
2709  {
2710  return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2711  }
2712 
2713  armnn::NormalizationDescriptor descriptor;
2717 
2718  if (!input.IsValid() ||
2719  !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2720  !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
2721  !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
2722  !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
2723  {
2724  return Fail("%s: Operation has invalid inputs", __func__);
2725  }
2726 
2727  // ArmNN expects normSize to be the full size of the normalization
2728  // window rather than the radius as in AndroidNN.
2729  descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2730 
2731  bool isSupported = false;
2732  armnn::BackendId setBackend;
2733  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2734  {
2735  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2737  data.m_Backends,
2738  isSupported,
2739  setBackend,
2740  inputInfo,
2741  outputInfo,
2742  descriptor);
2743  };
2744 
2745  if(!IsDynamicTensor(outputInfo))
2746  {
2747  validateFunc(outputInfo, isSupported);
2748  }
2749  else
2750  {
2751  isSupported = AreDynamicTensorsSupported();
2752  }
2753 
2754  if (!isSupported)
2755  {
2756  return false;
2757  }
2758 
2759 
2760  armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2761  layer->SetBackendId(setBackend);
2762  assert(layer != nullptr);
2763  input.Connect(layer->GetInputSlot(0));
2764 
2765  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2766 }
2767 
2768 bool Converter::ConvertLogicalBinary(const Operation& operation,
2769  const Model& model,
2770  ConversionData& data,
2771  armnn::LogicalBinaryOperation logicalOperation)
2772 {
2773  VLOG(DRIVER) << "Converter::ConvertLogicalBinary()";
2774  VLOG(DRIVER) << "ConvertLogicalBinary()";
2775  VLOG(DRIVER) << "logicalOperation = " << GetLogicalBinaryOperationAsCString(logicalOperation);
2776 
2777  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
2778  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
2779 
2780  if (!(input0.IsValid() && input1.IsValid()))
2781  {
2782  return Fail("%s: Operation has invalid inputs", __func__);
2783  }
2784 
2785  const Operand* output = GetOutputOperand(operation, 0, model);
2786  if (!output)
2787  {
2788  return Fail("%s: Could not read output 0", __func__);
2789  }
2790 
2791  const TensorInfo& inputInfo0 = input0.GetTensorInfo();
2792  const TensorInfo& inputInfo1 = input1.GetTensorInfo();
2793  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2794 
2795  LogicalBinaryDescriptor descriptor(logicalOperation);
2796 
2797  bool isSupported = false;
2798  armnn::BackendId setBackend;
2799  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2800  {
2801  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2802  IsLogicalBinarySupported,
2803  data.m_Backends,
2804  isSupported,
2805  setBackend,
2806  inputInfo0,
2807  inputInfo1,
2808  outputInfo,
2809  descriptor);
2810  };
2811 
2812  if(!IsDynamicTensor(outputInfo))
2813  {
2814  validateFunc(outputInfo, isSupported);
2815  }
2816  else
2817  {
2818  isSupported = AreDynamicTensorsSupported();
2819  }
2820 
2821  if (!isSupported)
2822  {
2823  return false;
2824  }
2825 
2826  IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
2827  layer->SetBackendId(setBackend);
2828  assert(layer != nullptr);
2829 
2830  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
2831  if (!isReshapeSupported)
2832  {
2833  return false;
2834  }
2835 
2836  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2837 }
2838 
2839 bool Converter::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2840 {
2841  VLOG(DRIVER) << "Converter::ConvertLogistic()";
2844 
2845  return ConvertToActivation(operation, __func__, desc, model, data);
2846 }
2847 
2848 bool Converter::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data)
2849 {
2850  VLOG(DRIVER) << "Converter::ConvertLogSoftmax()";
2851 
2852  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2853  if (!input.IsValid())
2854  {
2855  return Fail("%s: Failed to read input 0", __func__);
2856  }
2857 
2858  const Operand* output = GetOutputOperand(operation, 0, model);
2859  if (!output)
2860  {
2861  return Fail("%s: Failed to read output", __func__);
2862  }
2863 
2864  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2865 
2866  // Determine data type of input tensor
2867  OperandType inputType;
2868  if (!GetOperandType(operation, 0, model, inputType))
2869  {
2870  return Fail("%s: Operation has invalid inputs", __func__);
2871  }
2872 
2873  LogSoftmaxDescriptor descriptor;
2874 
2875  // Read beta
2876  if (inputType == OperandType::TENSOR_FLOAT16)
2877  {
2878  Half fp16Beta;
2879  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Beta, model, data))
2880  {
2881  return Fail("%s: Failed to read input 1 (FLOAT16)", __func__);
2882  }
2883 
2884  descriptor.m_Beta = static_cast<float>(fp16Beta);
2885  }
2886  else if (inputType == OperandType::TENSOR_FLOAT32)
2887  {
2888  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Beta, model, data))
2889  {
2890  return Fail("%s: Failed to read input 1 (FLOAT32)", __func__);
2891  }
2892  }
2893  else
2894  {
2895  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2896  }
2897 
2898  // Read axis
2899  if (!GetInputInt32(operation, 2, descriptor.m_Axis, model, data))
2900  {
2901  return Fail("%s: Failed to read input 2", __func__);
2902  }
2903 
2904  bool isSupported = false;
2905  armnn::BackendId setBackend;
2906  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2907  {
2908  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2909  IsLogSoftmaxSupported,
2910  data.m_Backends,
2911  isSupported,
2912  setBackend,
2913  input.GetTensorInfo(),
2914  outputInfo,
2915  descriptor);
2916  };
2917 
2918  if(IsDynamicTensor(outputInfo))
2919  {
2920  isSupported = AreDynamicTensorsSupported();
2921  }
2922  else
2923  {
2924  validateFunc(outputInfo, isSupported);
2925  }
2926 
2927  if (!isSupported)
2928  {
2929  return false;
2930  }
2931 
2932  IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
2933  layer->SetBackendId(setBackend);
2934  if (!layer)
2935  {
2936  return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
2937  }
2938 
2939  input.Connect(layer->GetInputSlot(0));
2940 
2941  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2942 }
2943 
2944 bool Converter::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
2945 {
2946  VLOG(DRIVER) << "Converter::ConvertLstm()";
2947 
2948  // Inputs:
2949  // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2950  // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2951  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2952  if (!input.IsValid())
2953  {
2954  return Fail("%s: Could not read input 0: input", __func__);
2955  }
2956  // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2957  LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
2958  if (!outputStateIn.IsValid())
2959  {
2960  return Fail("%s: Could not read input 18: outputStateIn", __func__);
2961  }
2962  // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2963  LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
2964  if (!cellStateIn.IsValid())
2965  {
2966  return Fail("%s: Could not read input 19: cellStateIn", __func__);
2967  }
2968 
2969  // Get the mandatory input tensors:
2970  // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2971  // [num_units, input_size].
2972  const ConstTensorPin inputToForgetWeightsPin =
2973  (DequantizeAndMakeConstTensorPin(operation, model, data, 2));
2974  // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2975  // [num_units, input_size].
2976  const ConstTensorPin inputToCellWeightsPin =
2977  (DequantizeAndMakeConstTensorPin(operation, model, data, 3));
2978  // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2979  // [num_units, input_size].
2980  const ConstTensorPin inputToOutputWeightsPin =
2981  (DequantizeAndMakeConstTensorPin(operation, model, data, 4));
2982  // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2983  // [num_units, output_size].
2984  const ConstTensorPin recurrentToForgetWeightsPin =
2985  (DequantizeAndMakeConstTensorPin(operation, model, data, 6));
2986  // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2987  // [num_units, output_size].
2988  const ConstTensorPin recurrentToCellWeightsPin =
2989  (DequantizeAndMakeConstTensorPin(operation, model, data, 7));
2990  // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2991  // [num_units, output_size].
2992  const ConstTensorPin recurrentToOutputWeightsPin =
2993  (DequantizeAndMakeConstTensorPin(operation, model, data, 8));
2994  // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2995  const ConstTensorPin forgetGateBiasPin =
2996  ConvertOperationInputToConstTensorPin(operation, 13, model, data);
2997  // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2998  const ConstTensorPin cellBiasPin =
2999  ConvertOperationInputToConstTensorPin(operation, 14, model, data);
3000  // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3001  const ConstTensorPin outputGateBiasPin =
3002  ConvertOperationInputToConstTensorPin(operation, 15, model, data);
3003 
3004  if (!inputToForgetWeightsPin.IsValid() ||
3005  !inputToCellWeightsPin.IsValid() ||
3006  !inputToOutputWeightsPin.IsValid() ||
3007  !recurrentToForgetWeightsPin.IsValid() ||
3008  !recurrentToCellWeightsPin.IsValid() ||
3009  !recurrentToOutputWeightsPin.IsValid() ||
3010  !forgetGateBiasPin.IsValid() ||
3011  !cellBiasPin.IsValid() ||
3012  !outputGateBiasPin.IsValid())
3013  {
3014  return Fail("%s: Operation has invalid tensor inputs", __func__);
3015  }
3016 
3017  // Get the optional input tensors:
3018  // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3019  // [num_units, input_size], where “num_units” corresponds to the number of cell units.
3020  const ConstTensorPin inputToInputWeightsPin =
3021  (DequantizeAndMakeConstTensorPin(operation, model, data, 1, true));
3022  // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3023  // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
3024  // “num_units”), or the second dimension of the “projection_weights”, if defined.
3025  const ConstTensorPin recurrentToInputWeightsPin =
3026  (DequantizeAndMakeConstTensorPin(operation, model, data, 5, true));
3027  // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3028  const ConstTensorPin cellToInputWeightsPin =
3029  (DequantizeAndMakeConstTensorPin(operation, model, data, 9, true));
3030  // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3031  const ConstTensorPin cellToForgetWeightsPin =
3032  (DequantizeAndMakeConstTensorPin(operation, model, data, 10, true));
3033  // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3034  const ConstTensorPin cellToOutputWeightsPin =
3035  (DequantizeAndMakeConstTensorPin(operation, model, data, 11, true));
3036  // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3037  const ConstTensorPin inputGateBiasPin =
3039  12,
3040  model,
3041  data,
3042  g_DontPermute,
3043  nullptr,
3044  true);
3045 
3046  // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3047  // [output_size, num_units].
3048  const ConstTensorPin projectionWeightsPin =
3049  (DequantizeAndMakeConstTensorPin(operation, model, data, 16, true));
3050  // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
3051  const ConstTensorPin projectionBiasPin =
3053  17,
3054  model,
3055  data,
3056  g_DontPermute,
3057  nullptr,
3058  true);
3059 
3060  if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
3061  (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
3062  (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
3063  (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
3064  (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
3065  (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
3066  (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
3067  (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
3068  {
3069  return Fail("%s: Operation has invalid tensor inputs", __func__);
3070  }
3071 
3072  // Get the mandatory input scalars (actually 1-D tensors of size 1):
3073  // 20: The activation function: A value indicating the activation function:
3074  // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
3075  // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
3076  // If set to 0.0 then clipping is disabled.
3077  // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
3078  // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
3079  ActivationFn activation = ActivationFn::kActivationNone;
3080  float cellClip;
3081  float projClip;
3082  if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
3083  !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
3084  !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
3085  {
3086  return Fail("%s: Operation has invalid scalar inputs", __func__);
3087  }
3088 
3089  // Get the normalization tensors
3090  // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
3091  // Used to rescale normalized inputs to activation at input gate.
3092  const ConstTensorPin inputLayerNormWeightsPin
3093  (DequantizeAndMakeConstTensorPin(operation, model, data, 23, true));
3094 
3095  // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
3096  // Used to rescale normalized inputs to activation at forget gate.
3097  const ConstTensorPin forgetLayerNormWeightsPin =
3099  24,
3100  model,
3101  data,
3102  g_DontPermute,
3103  nullptr,
3104  true);
3105 
3106  // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
3107  // Used to rescale normalized inputs to activation at cell gate.
3108  const ConstTensorPin cellLayerNormWeightsPin =
3110  25,
3111  model,
3112  data,
3113  g_DontPermute,
3114  nullptr,
3115  true);
3116 
3117  // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
3118  // Used to rescale normalized inputs to activation at output gate.
3119  const ConstTensorPin outputLayerNormWeightsPin =
3121  26,
3122  model,
3123  data,
3124  g_DontPermute,
3125  nullptr,
3126  true);
3127 
3128  // Outputs:
3129  // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
3130  // with CIFG, or [batch_size, num_units * 3] without CIFG.
3131  const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
3132  if (!scratchBuffer)
3133  {
3134  return Fail("%s: Could not read output 0: scratchBuffer", __func__);
3135  }
3136  // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
3137  const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
3138  if (!outputStateOut)
3139  {
3140  return Fail("%s: Could not read output 1: outputStateOut", __func__);
3141  }
3142  // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
3143  const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
3144  if (!cellStateOut)
3145  {
3146  return Fail("%s: Could not read output 2: cellStateOut", __func__);
3147  }
3148  // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
3149  // effectively the same as the current “output state (out)” value.
3150  const Operand* output = GetOutputOperand(operation, 3, model);
3151  if (!output)
3152  {
3153  return Fail("%s: Could not read output 3: output", __func__);
3154  }
3155 
3156  // set the params structure for the AddLstmLayer call
3157  LstmInputParams params;
3158  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
3159  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
3160  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
3161  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
3162  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
3163  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
3164  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
3165  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
3166  params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
3167  params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
3168  params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
3169  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
3170  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
3171  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
3172  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
3173  params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
3174  params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
3175  params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
3176  params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
3177  params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
3178  params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
3179 
3180  // set the layer descriptor
3181  LstmDescriptor desc;
3182  desc.m_ActivationFunc = activation;
3183  desc.m_ClippingThresCell = cellClip;
3184  desc.m_ClippingThresProj = projClip;
3185  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
3186  params.m_RecurrentToInputWeights == nullptr ||
3187  params.m_InputGateBias == nullptr);
3188  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
3189  params.m_CellToOutputWeights != nullptr);
3190  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
3191  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
3192  params.m_ForgetLayerNormWeights != nullptr ||
3193  params.m_CellLayerNormWeights != nullptr ||
3194  params.m_OutputLayerNormWeights != nullptr);
3195 
3196  // validate the optional input groups
3197  if (desc.m_CifgEnabled &&
3198  (params.m_InputToInputWeights != nullptr ||
3199  params.m_RecurrentToInputWeights != nullptr ||
3200  params.m_InputGateBias != nullptr))
3201  {
3202  return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
3203  " and input gate bias must be provided", __func__);
3204  }
3205 
3206  if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
3207  {
3208  return Fail("%s: projection bias should not be provided without projection weights", __func__);
3209  }
3210 
3211  if (desc.m_PeepholeEnabled &&
3212  (params.m_CellToForgetWeights == nullptr ||
3213  params.m_CellToOutputWeights == nullptr ||
3214  (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
3215  {
3216  return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
3217  " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
3218  }
3219 
3220  if (desc.m_LayerNormEnabled &&
3221  (params.m_ForgetLayerNormWeights == nullptr ||
3222  params.m_CellLayerNormWeights == nullptr ||
3223  params.m_OutputLayerNormWeights == nullptr ||
3224  (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
3225  {
3226  return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
3227  " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
3228  }
3229 
3230  // Check if the layer is supported
3231  // Inputs
3232  const TensorInfo& inputInfo = input.GetTensorInfo();
3233  const TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
3234  const TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
3235 
3236  // Outputs
3237  const TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
3238  const TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
3239  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
3240  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3241 
3242  // Basic parameters
3243  LstmInputParamsInfo paramsInfo;
3244  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
3245  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
3246  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
3248  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
3250  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
3251  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
3252  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
3253 
3254  // Optional parameters
3255  if (!desc.m_CifgEnabled)
3256  {
3257  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
3259  if (params.m_CellToInputWeights != nullptr)
3260  {
3261  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
3262  }
3263  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
3264  }
3265 
3266  if (desc.m_ProjectionEnabled)
3267  {
3268  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
3269  if (params.m_ProjectionBias != nullptr)
3270  {
3271  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
3272  }
3273  }
3274 
3275  if (desc.m_PeepholeEnabled)
3276  {
3277  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
3278  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
3279  }
3280 
3281  if (desc.m_LayerNormEnabled)
3282  {
3283  if(!desc.m_CifgEnabled)
3284  {
3285  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
3286  }
3287  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
3288  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
3289  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
3290  }
3291 
3292  bool isSupported = false;
3293  armnn::BackendId setBackend;
3294  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3295  {
3296  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3298  data.m_Backends,
3299  isSupported,
3300  setBackend,
3301  inputInfo,
3302  outputStateInInfo,
3303  cellStateInInfo,
3304  scratchBufferInfo,
3305  outputStateOutInfo,
3306  cellStateOutInfo,
3307  outputInfo,
3308  desc,
3309  paramsInfo);
3310  };
3311 
3312  bool isDynamic = false;
3313  if (!IsDynamicTensor(outputStateOutInfo) &&
3314  !IsDynamicTensor(scratchBufferInfo) &&
3315  !IsDynamicTensor(cellStateOutInfo) &&
3316  !IsDynamicTensor(outputInfo))
3317  {
3318  validateFunc(outputInfo, isSupported);
3319  }
3320  else
3321  {
3322  isDynamic = true;
3323  isSupported = AreDynamicTensorsSupported();
3324  }
3325 
3326  if (!isSupported)
3327  {
3328  return false;
3329  }
3330 
3331  // Add the layer
3332  IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
3333  layer->SetBackendId(setBackend);
3334 
3335  input.Connect(layer->GetInputSlot(0));
3336  outputStateIn.Connect(layer->GetInputSlot(1));
3337  cellStateIn.Connect(layer->GetInputSlot(2));
3338 
3339  if (!isDynamic)
3340  {
3341  return (
3342  SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3343  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3344  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3345  SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
3346  }
3347  else
3348  {
3349  return (
3350  SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3351  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3352  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3354  operation, 3, *layer, 3, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
3355  }
3356 
3357 }
3358 
3359 bool Converter::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
3360 {
3361  VLOG(DRIVER) << "Converter::ConvertMaxPool2d()";
3362  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Max, model, data);
3363 }
3364 
3365 bool Converter::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
3366 {
3367  VLOG(DRIVER) << "Converter::ConvertMaximum()";
3368 
3369  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3370  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3371 
3372  if (!input0.IsValid() || !input1.IsValid())
3373  {
3374  return Fail("%s: Operation has invalid inputs", __func__);
3375  }
3376 
3377  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
3378  if (!outputOperand)
3379  {
3380  return Fail("%s: Could not read output", __func__);
3381  }
3382 
3383  const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
3384 
3385  bool isSupported = false;
3386  armnn::BackendId setBackend;
3387  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
3388  {
3390  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3392  data.m_Backends,
3393  isSupported,
3394  setBackend,
3395  input0.GetTensorInfo(),
3396  input1.GetTensorInfo(),
3397  outInfo);
3399  };
3400 
3401  if(IsDynamicTensor(outInfo))
3402  {
3403  isSupported = AreDynamicTensorsSupported();
3404  }
3405  else
3406  {
3407  validateFunc(outInfo, isSupported);
3408  }
3409 
3410  if (!isSupported)
3411  {
3412  return false;
3413  }
3414 
3416  IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
3418  layer->SetBackendId(setBackend);
3419  assert(layer != nullptr);
3420  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
3421  if (!isReshapeSupported)
3422  {
3423  return false;
3424  }
3425 
3426  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3427 }
3428 
3429 bool Converter::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
3430 {
3431  VLOG(DRIVER) << "Converter::ConvertMean()";
3432 
3433  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3434  if (!input.IsValid())
3435  {
3436  return Fail("%s: Operation has invalid inputs", __func__);
3437  }
3438 
3439  const Operand* output = GetOutputOperand(operation, 0, model);
3440  if (!output)
3441  {
3442  return Fail("%s: Could not read output 0", __func__);
3443  }
3444 
3445  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3446 
3447  const Operand* axisOperand = GetInputOperand(operation, 1, model);
3448  if (!axisOperand)
3449  {
3450  return Fail("%s: Could not read input 1", __func__);
3451  }
3452 
3453  std::vector<int32_t> axis;
3454  if (!GetTensorInt32Values(*axisOperand, axis, model, data))
3455  {
3456  return Fail("%s: Input 1 has invalid values", __func__);
3457  }
3458 
3459  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3460 
3461  // Convert the axis to unsigned int and remove duplicates.
3462  unsigned int rank = inputInfo.GetNumDimensions();
3463  std::set<unsigned int> uniqueAxis;
3464  std::transform(axis.begin(), axis.end(),
3465  std::inserter(uniqueAxis, uniqueAxis.begin()),
3466  [rank](int i) -> unsigned int { return (i + rank) % rank; });
3467 
3468  // Get the "keep dims" flag.
3469  int32_t keepDims = 0;
3470  if (!GetInputInt32(operation, 2, keepDims, model, data))
3471  {
3472  return Fail("%s: Could not read input 2", __func__);
3473  }
3474 
3475  armnn::MeanDescriptor descriptor;
3476  descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3477  descriptor.m_KeepDims = keepDims > 0;
3478 
3479  bool isSupported = false;
3480  armnn::BackendId setBackend;
3481  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3482  {
3483  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3485  data.m_Backends,
3486  isSupported,
3487  setBackend,
3488  inputInfo,
3489  outputInfo,
3490  descriptor);
3491  };
3492 
3493  if(!IsDynamicTensor(outputInfo))
3494  {
3495  validateFunc(outputInfo, isSupported);
3496  }
3497  else
3498  {
3499  isSupported = AreDynamicTensorsSupported();
3500  }
3501 
3502  if (!isSupported)
3503  {
3504  return false;
3505  }
3506 
3507  armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3508  layer->SetBackendId(setBackend);
3509  assert(layer != nullptr);
3510  input.Connect(layer->GetInputSlot(0));
3511 
3512  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3513 }
3514 
3515 bool Converter::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
3516 {
3517  VLOG(DRIVER) << "Converter::ConvertMinimum()";
3518 
3519  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3520  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3521 
3522  if (!input0.IsValid() || !input1.IsValid())
3523  {
3524  return Fail("%s: Operation has invalid inputs", __func__);
3525  }
3526 
3527  const Operand* output = GetOutputOperand(operation, 0, model);
3528  if (!output)
3529  {
3530  return Fail("%s: Could not read output 0", __func__);
3531  }
3532 
3533  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3534 
3535  bool isSupported = false;
3536  armnn::BackendId setBackend;
3537  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3538  {
3540  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3542  data.m_Backends,
3543  isSupported,
3544  setBackend,
3545  input0.GetTensorInfo(),
3546  input1.GetTensorInfo(),
3547  outputInfo);
3549  };
3550 
3551  if(IsDynamicTensor(outputInfo))
3552  {
3553  isSupported = AreDynamicTensorsSupported();
3554  }
3555  else
3556  {
3557  validateFunc(outputInfo, isSupported);
3558  }
3559 
3560  if (!isSupported)
3561  {
3562  return false;
3563  }
3564 
3566  IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
3568  layer->SetBackendId(setBackend);
3569  assert(layer != nullptr);
3570  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
3571  if (!isReshapeSupported)
3572  {
3573  return false;
3574  }
3575 
3576  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3577 }
3578 
3579 bool Converter::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
3580 {
3581  VLOG(DRIVER) << "Converter::ConvertMul()";
3582 
3583  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3584  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3585 
3586  if (!input0.IsValid() || !input1.IsValid())
3587  {
3588  return Fail("%s: Operation has invalid inputs", __func__);
3589  }
3590 
3591  // The FuseActivation parameter is always the input index 2
3592  // and it should be optional
3593  ActivationFn activationFunction;
3594  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
3595  {
3596  return Fail("%s: Operation has invalid inputs", __func__);
3597  }
3598 
3599  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
3600 
3601  if (outputOperand == nullptr)
3602  {
3603  return false;
3604  }
3605 
3606  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3607 
3608  bool isSupported = false;
3609  armnn::BackendId setBackend;
3610  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3611  {
3613  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3615  data.m_Backends,
3616  isSupported,
3617  setBackend,
3618  input0.GetTensorInfo(),
3619  input1.GetTensorInfo(),
3620  outputInfo);
3622  };
3623 
3624  if(!IsDynamicTensor(outputInfo))
3625  {
3626  validateFunc(outputInfo, isSupported);
3627  }
3628  else
3629  {
3630  isSupported = AreDynamicTensorsSupported();
3631  }
3632 
3633  if (!isSupported)
3634  {
3635  return false;
3636  }
3637 
3639  armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3641  startLayer->SetBackendId(setBackend);
3642 
3643  bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3644  if (!isReshapeSupported)
3645  {
3646  return false;
3647  }
3648 
3649  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
3650  data, nullptr, validateFunc, activationFunction);
3651 }
3652 
3653 bool Converter::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
3654 {
3655  VLOG(DRIVER) << "Converter::ConvertPad()";
3656 
3657  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3658  if (!input.IsValid())
3659  {
3660  return Fail("%s: Operation has invalid inputs", __func__);
3661  }
3662 
3663  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3664  unsigned int rank = inputInfo.GetNumDimensions();
3665 
3666  armnn::PadDescriptor descriptor;
3667  if (!ConvertPaddings(operation, model, data, rank, descriptor))
3668  {
3669  return Fail("%s: Could not convert paddings", __func__);
3670  }
3671 
3672  // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3673  // the scale and zeroPoint must be the same as input0
3674  // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3675  // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3676  // (QuantizationOffset - QuantizationOffset) * scale = 0.
3677  if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
3678  {
3679  descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3680  }
3681 
3682  const Operand* output = GetOutputOperand(operation, 0, model);
3683  if (!output)
3684  {
3685  return Fail("%s: Could not read output", __func__);
3686  }
3687 
3688  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3689 
3690  bool isSupported = false;
3691  armnn::BackendId setBackend;
3692  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3693  {
3694  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3696  data.m_Backends,
3697  isSupported,
3698  setBackend,
3699  inputInfo,
3700  outputInfo,
3701  descriptor);
3702  };
3703 
3704  if(!IsDynamicTensor(outputInfo))
3705  {
3706  validateFunc(outputInfo, isSupported);
3707  }
3708  else
3709  {
3710  isSupported = AreDynamicTensorsSupported();
3711  }
3712 
3713  if (!isSupported)
3714  {
3715  return false;
3716  }
3717 
3718  armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3719  layer->SetBackendId(setBackend);
3720  assert(layer != nullptr);
3721  input.Connect(layer->GetInputSlot(0));
3722 
3723  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3724 }
3725 
3726 bool Converter::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
3727 {
3728  VLOG(DRIVER) << "Converter::ConvertPadV2()";
3729 
3730  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3731  if (!input.IsValid())
3732  {
3733  return Fail("%s: Could not read input 0", __func__);
3734  }
3735 
3736  const Operand* output = GetOutputOperand(operation, 0, model);
3737  if (!output)
3738  {
3739  return Fail("%s: Could not read output", __func__);
3740  }
3741 
3742  const TensorInfo& inputInfo = input.GetTensorInfo();
3743  unsigned int rank = inputInfo.GetNumDimensions();
3744 
3745  PadDescriptor descriptor;
3746  if (!ConvertPaddings(operation, model, data, rank, descriptor))
3747  {
3748  return Fail("%s: Could not convert paddings", __func__);
3749  }
3750 
3751  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3752 
3753  // Determine type of padding value
3754  OperandType operandType0;
3755  OperandType operandType2;
3756 
3757  if (!GetOperandType(operation, 0, model, operandType0) ||
3758  !GetOperandType(operation, 2, model, operandType2))
3759  {
3760  return Fail("%s: Operation has invalid inputs", __func__);
3761  }
3762 
3763  // Read value to use for padding
3764  if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
3765  {
3766  Half f16PadValue;
3767  if (!GetInputScalar(operation, 2, operandType2, f16PadValue, model, data))
3768  {
3769  return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
3770  }
3771 
3772  descriptor.m_PadValue = f16PadValue;
3773  }
3774  else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
3775  {
3776  if (!GetInputFloat32(operation, 2, descriptor.m_PadValue, model, data))
3777  {
3778  return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
3779  }
3780  }
3781  else if (isQuantizedOperand(operandType0) && operandType2 == OperandType::INT32)
3782  {
3783  int32_t intPadValue = 0;
3784  if (!GetInputInt32(operation, 2, intPadValue, model, data))
3785  {
3786  return Fail("%s: Could not read input 2 (INT32)", __func__);
3787  }
3788  descriptor.m_PadValue = intPadValue;
3789  }
3790  else
3791  {
3792  return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
3793  }
3794 
3795  bool isSupported = false;
3796  armnn::BackendId setBackend;
3797  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3798  {
3799  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3801  data.m_Backends,
3802  isSupported,
3803  setBackend,
3804  inputInfo,
3805  outputInfo,
3806  descriptor);
3807  };
3808 
3809  if(IsDynamicTensor(outputInfo))
3810  {
3811  isSupported = AreDynamicTensorsSupported();
3812  }
3813  else
3814  {
3815  validateFunc(outputInfo, isSupported);
3816  }
3817 
3818  if (!isSupported)
3819  {
3820  return false;
3821  }
3822 
3823  IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3824  layer->SetBackendId(setBackend);
3825  assert(layer != nullptr);
3826  input.Connect(layer->GetInputSlot(0));
3827 
3828  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3829 }
3830 
3831 bool Converter::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
3832 {
3833  VLOG(DRIVER) << "Converter::ConvertPrelu()";
3834 
3835  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3836  LayerInputHandle alpha = ConvertToLayerInputHandle(operation, 1, model, data);
3837 
3838  if (!input.IsValid() || !alpha.IsValid())
3839  {
3840  return Fail("%s: Operation has invalid inputs", __func__);
3841  }
3842 
3843  const Operand* output = GetOutputOperand(operation, 0, model);
3844 
3845  if (!output)
3846  {
3847  return Fail("%s: Could not read output", __func__);
3848  }
3849 
3850  const TensorInfo& inputInfo = input.GetTensorInfo();
3851  const TensorInfo& alphaInfo = alpha.GetTensorInfo();
3852  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3853 
3854  bool isSupported = false;
3855  armnn::BackendId setBackend;
3856  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3857  {
3858  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3860  data.m_Backends,
3861  isSupported,
3862  setBackend,
3863  inputInfo,
3864  alphaInfo,
3865  outputInfo);
3866  };
3867 
3868  if(IsDynamicTensor(outputInfo))
3869  {
3870  isSupported = AreDynamicTensorsSupported();
3871  }
3872  else
3873  {
3874  validateFunc(outputInfo, isSupported);
3875  }
3876 
3877  if (!isSupported)
3878  {
3879  return false;
3880  }
3881 
3882  IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
3883  layer->SetBackendId(setBackend);
3884 
3885  if (!layer)
3886  {
3887  return Fail("%s: AddPreluLayer failed", __func__);
3888  }
3889 
3890  bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
3891  if (!isReshapeSupported)
3892  {
3893  return false;
3894  }
3895 
3896  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3897 }
3898 
3899 bool Converter::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
3900 {
3901  VLOG(DRIVER) << "Converter::ConvertQuantize()";
3902 
3903  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3904  if (!input.IsValid())
3905  {
3906  return Fail("%s: Operation has invalid input", __func__);
3907  }
3908 
3909  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
3910  if (!outputOperand)
3911  {
3912  return Fail("%s: Operation has invalid outputs", __func__);
3913  }
3914 
3915  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3916 
3917  bool isSupported = false;
3918  armnn::BackendId setBackend;
3919  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3920  {
3921  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3922  IsQuantizeSupported,
3923  data.m_Backends,
3924  isSupported,
3925  setBackend,
3926  input.GetTensorInfo(),
3927  outputInfo);
3928  };
3929 
3930  if(IsDynamicTensor(outputInfo))
3931  {
3932  isSupported = AreDynamicTensorsSupported();
3933  }
3934  else
3935  {
3936  validateFunc(outputInfo, isSupported);
3937  }
3938 
3939  if (!isSupported)
3940  {
3941  return false;
3942  }
3943 
3944  IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
3945  layer->SetBackendId(setBackend);
3946  assert(layer != nullptr);
3947  input.Connect(layer->GetInputSlot(0));
3948 
3949  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3950 }
3951 
3952 bool Converter::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
3953 {
3954  VLOG(DRIVER) << "Converter::ConvertQuantizedLstm()";
3955 
3956  VLOG(DRIVER) << "ConvertQuantizedLstm()";
3957 
3958  //Inputs:
3959  // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
3960  // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
3961  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3962  if (!input.IsValid())
3963  {
3964  return Fail("%s: Could not read input 0: input", __func__);
3965  }
3966 
3967  // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, of shape [batch_size, output_size].
3968  LayerInputHandle outputStatePrevTimeStep = ConvertToLayerInputHandle(operation, 18, model, data);
3969  if (!outputStatePrevTimeStep.IsValid())
3970  {
3971  return Fail("%s: Could not read input 18: outputStatePrevTimeStep", __func__);
3972  }
3973 
3974  // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
3975  LayerInputHandle cellStatePrevTimeStep = ConvertToLayerInputHandle(operation, 19, model, data);
3976  if (!cellStatePrevTimeStep.IsValid())
3977  {
3978  return Fail("%s: Could not read input 19: cellStatePrevTimeStep", __func__);
3979  }
3980 
3981  // Get the mandatory input tensors:
3982 
3983  // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3984  // [num_units, input_size].
3985  const ConstTensorPin inputToForgetWeightsPin =
3986  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
3987 
3988  // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3989  // [num_units, input_size].
3990  const ConstTensorPin inputToCellWeightsPin =
3991  ConvertOperationInputToConstTensorPin(operation, 3, model, data);
3992 
3993  // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3994  // [num_units, input_size].
3995  const ConstTensorPin inputToOutputWeightsPin =
3996  ConvertOperationInputToConstTensorPin(operation, 4, model, data);
3997 
3998  // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3999  // [num_units, output_size].
4000  const ConstTensorPin recurrentToForgetWeightsPin =
4001  ConvertOperationInputToConstTensorPin(operation, 6, model, data);
4002 
4003  // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4004  // [num_units, output_size].
4005  const ConstTensorPin recurrentToCellWeightsPin =
4006  ConvertOperationInputToConstTensorPin(operation, 7, model, data);
4007 
4008  // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4009  // [num_units, output_size].
4010  const ConstTensorPin recurrentToOutputWeightsPin =
4011  ConvertOperationInputToConstTensorPin(operation, 8, model, data);
4012 
4013  // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
4014  const ConstTensorPin forgetGateBiasPin =
4015  ConvertOperationInputToConstTensorPin(operation, 13, model, data);
4016 
4017  // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
4018  const ConstTensorPin cellBiasPin =
4019  ConvertOperationInputToConstTensorPin(operation, 14, model, data);
4020 
4021  // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
4022  const ConstTensorPin outputGateBiasPin =
4023  ConvertOperationInputToConstTensorPin(operation, 15, model, data);
4024 
4025  if (!inputToForgetWeightsPin.IsValid() ||
4026  !inputToCellWeightsPin.IsValid() ||
4027  !inputToOutputWeightsPin.IsValid() ||
4028  !recurrentToForgetWeightsPin.IsValid() ||
4029  !recurrentToCellWeightsPin.IsValid() ||
4030  !recurrentToOutputWeightsPin.IsValid() ||
4031  !forgetGateBiasPin.IsValid() ||
4032  !cellBiasPin.IsValid() ||
4033  !outputGateBiasPin.IsValid())
4034  {
4035  return Fail("%s: Operation has invalid tensor inputs", __func__);
4036  }
4037 
4038  // Get the optional input tensors:
4039 
4040  // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4041  // [num_units, input_size], where “num_units” corresponds to the number of cell units.
4042  const ConstTensorPin inputToInputWeightsPin =
4044  1,
4045  model,
4046  data,
4047  g_DontPermute,
4048  nullptr,
4049  true);
4050 
4051  // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4052  // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
4053  // “num_units”), or the second dimension of the “projection_weights”, if defined.
4054  const ConstTensorPin recurrentToInputWeightsPin =
4056  5,
4057  model,
4058  data,
4059  g_DontPermute,
4060  nullptr,
4061  true);
4062 
4063  // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4064  // [num_units].
4065  const ConstTensorPin cellToInputWeightsPin =
4067  9,
4068  model,
4069  data,
4070  g_DontPermute,
4071  nullptr,
4072  true);
4073 
4074  // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4075  // [num_units].
4076  const ConstTensorPin cellToForgetWeightsPin =
4078  10,
4079  model,
4080  data,
4081  g_DontPermute,
4082  nullptr,
4083  true);
4084 
4085  // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4086  // [num_units].
4087  const ConstTensorPin cellToOutputWeightsPin =
4089  11,
4090  model,
4091  data,
4092  g_DontPermute,
4093  nullptr,
4094  true);
4095 
4096  // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
4097  const ConstTensorPin inputGateBiasPin =
4099  12,
4100  model,
4101  data,
4102  g_DontPermute,
4103  nullptr,
4104  true);
4105 
4106  // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4107  // [output_size, num_units].
4108  const ConstTensorPin projectionWeightsPin =
4110  16,
4111  model,
4112  data,
4113  g_DontPermute,
4114  nullptr,
4115  true);
4116 
4117  // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [output_size].
4118  const ConstTensorPin projectionBiasPin =
4120  17,
4121  model,
4122  data,
4123  g_DontPermute,
4124  nullptr,
4125  true);
4126 
4127  if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional())
4128  || (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional())
4129  || (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional())
4130  || (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional())
4131  || (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional())
4132  || (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional())
4133  || (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional())
4134  || (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
4135  {
4136  return Fail("%s: Operation has invalid tensor inputs", __func__);
4137  }
4138 
4139 
4140  // Get the optional normalization tensors
4141 
4142  // 20: The input layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
4143  // Used to rescale normalized inputs to activation at input gate.
4144  const ConstTensorPin inputLayerNormWeightsPin =
4146  20,
4147  model,
4148  data,
4149  g_DontPermute,
4150  nullptr,
4151  true);
4152 
4153  // 21: The forget layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM
4154  // Used to rescale normalized inputs to activation at forget gate.
4155  const ConstTensorPin forgetLayerNormWeightsPin =
4157  21,
4158  model,
4159  data,
4160  g_DontPermute,
4161  nullptr,
4162  true);
4163 
4164  // 22: The cell layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
4165  // Used to rescale normalized inputs to activation at cell gate.
4166  const ConstTensorPin cellLayerNormWeightsPin =
4168  22,
4169  model,
4170  data,
4171  g_DontPermute,
4172  nullptr,
4173  true);
4174 
4175  // 23: The output layer normalization weights. A 1-D tensor of shape [num_units].
4176  // Used to rescale normalized inputs to activation at output gate.
4177  const ConstTensorPin outputLayerNormWeightsPin =
4179  23,
4180  model,
4181  data,
4182  g_DontPermute,
4183  nullptr,
4184  true);
4185 
4186  if ((!inputLayerNormWeightsPin.IsValid() && !inputLayerNormWeightsPin.IsOptional())
4187  || (!forgetLayerNormWeightsPin.IsValid() && !forgetLayerNormWeightsPin.IsOptional())
4188  || (!cellLayerNormWeightsPin.IsValid() && !cellLayerNormWeightsPin.IsOptional())
4189  || (!outputLayerNormWeightsPin.IsValid() && !outputLayerNormWeightsPin.IsOptional()))
4190  {
4191  return Fail("%s: Operation has invalid tensor inputs", __func__);
4192  }
4193 
4194  // Get the optional input scalars:
4195  // 24: The cell clip: If provided the cell state is clipped by this value prior to the cell output activation.
4196  // 25: The projection clip: If provided and projection is enabled, this is used for clipping the projected values.
4197 
4198  // Get the mandatory input scalars:
4199  // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
4200  // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
4201  // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
4202  // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
4203  // 30: The zero point of the hidden state, i.e. input to projection.
4204  // 31: The scale of the hidden state, i.e. input to projection.
4205  float cellClip, projClip, matMulInputGate, matMulForgetGate, matMulCellGate, matMulOutputGate, projInputScale;
4206  int projInputZeroPoint;
4207 
4208  if (!GetInputScalar(operation, 24, OperandType::FLOAT32, cellClip, model, data, true) ||
4209  !GetInputScalar(operation, 25, OperandType::FLOAT32, projClip, model, data, true) ||
4210  !GetInputScalar(operation, 26, OperandType::FLOAT32, matMulInputGate, model, data) ||
4211  !GetInputScalar(operation, 27, OperandType::FLOAT32, matMulForgetGate, model, data) ||
4212  !GetInputScalar(operation, 28, OperandType::FLOAT32, matMulCellGate, model, data) ||
4213  !GetInputScalar(operation, 29, OperandType::FLOAT32, matMulOutputGate, model, data) ||
4214  !GetInputScalar(operation, 30, OperandType::INT32, projInputZeroPoint, model, data) ||
4215  !GetInputScalar(operation, 31, OperandType::FLOAT32, projInputScale, model, data))
4216  {
4217  return Fail("%s: Operation has invalid scalar inputs", __func__);
4218  }
4219 
4220  // Outputs:
4221  // 0: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size,
4222  // output_size].
4223  const Operand* outputStateOut = GetOutputOperand(operation, 0, model);
4224  if (!outputStateOut)
4225  {
4226  return Fail("%s: Could not read output 0: outputStateOut", __func__);
4227  }
4228 
4229  // 1: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
4230  const Operand* cellStateOut = GetOutputOperand(operation, 1, model);
4231  if (!cellStateOut)
4232  {
4233  return Fail("%s: Could not read output 1: cellStateOut", __func__);
4234  }
4235 
4236  // 2: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size, output_size].
4237  // This is effectively the same as the current “output state (out)” value.
4238  const Operand* output = GetOutputOperand(operation, 2, model);
4239  if (!output)
4240  {
4241  return Fail("%s: Could not read output 2: output", __func__);
4242  }
4243 
4244  // set the params structure for the AddLstmLayer call
4245  LstmInputParams params;
4246  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
4247  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
4248  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
4249  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
4250  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
4251  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
4252  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
4253  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4254  params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
4255  params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
4256  params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
4257  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4258  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4259  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4260  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4261  params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
4262  params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
4263  params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
4264  params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
4265  params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
4266  params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
4267 
4268  // set the layer descriptor
4269  QLstmDescriptor desc;
4270  desc.m_CellClip = cellClip;
4271  desc.m_ProjectionClip = projClip;
4272  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
4273  params.m_RecurrentToInputWeights == nullptr ||
4274  params.m_InputGateBias == nullptr);
4275  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
4276  params.m_CellToOutputWeights != nullptr);
4277  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
4278  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
4279  params.m_ForgetLayerNormWeights != nullptr ||
4280  params.m_CellLayerNormWeights != nullptr ||
4281  params.m_OutputLayerNormWeights != nullptr);
4282  desc.m_InputIntermediateScale = matMulInputGate;
4283  desc.m_ForgetIntermediateScale = matMulForgetGate;
4284  desc.m_CellIntermediateScale = matMulCellGate;
4285  desc.m_OutputIntermediateScale = matMulOutputGate;
4286  desc.m_HiddenStateScale = projInputScale;
4287  desc.m_HiddenStateZeroPoint = projInputZeroPoint;
4288 
4289  // validate the optional input groups
4290  if (desc.m_CifgEnabled &&
4291  (params.m_InputToInputWeights != nullptr ||
4292  params.m_RecurrentToInputWeights != nullptr ||
4293  params.m_InputGateBias != nullptr))
4294  {
4295  return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
4296  " and input gate bias must be provided", __func__);
4297  }
4298 
4299  if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
4300  {
4301  return Fail("%s: projection bias should not be provided without projection weights", __func__);
4302  }
4303 
4304  if (desc.m_PeepholeEnabled &&
4305  (params.m_CellToForgetWeights == nullptr ||
4306  params.m_CellToOutputWeights == nullptr ||
4307  (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
4308  {
4309  return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
4310  " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
4311  }
4312 
4313  if (desc.m_LayerNormEnabled &&
4314  (params.m_ForgetLayerNormWeights == nullptr ||
4315  params.m_CellLayerNormWeights == nullptr ||
4316  params.m_OutputLayerNormWeights == nullptr ||
4317  (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
4318  {
4319  return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
4320  " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
4321  }
4322 
4323  // Basic parameters
4324  LstmInputParamsInfo paramsInfo;
4325  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4326  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4327  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4329  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4331  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4332  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4333  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4334 
4335  // Inputs
4336  const TensorInfo& inputInfo = input.GetTensorInfo();
4337  const TensorInfo& outputStatePrevTimeStepInfo = outputStatePrevTimeStep.GetTensorInfo();
4338  const TensorInfo& cellStatePrevTimeStepInfo = cellStatePrevTimeStep.GetTensorInfo();
4339 
4340  // Outputs
4341  TensorInfo outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
4342  TensorInfo outputInfo = GetTensorInfoForOperand(*output);
4343  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4344 
4345  // Optional parameters
4346  if (!desc.m_CifgEnabled)
4347  {
4348  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4350  if (desc.m_PeepholeEnabled)
4351  {
4352  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
4353  }
4354  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4355  }
4356 
4357 
4358  if (desc.m_ProjectionEnabled)
4359  {
4360  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
4361  if (params.m_ProjectionBias != nullptr)
4362  {
4363  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
4364  }
4365  }
4366  else
4367  {
4368  // If Projection is disabled, override non-const outputs to change the quant info with hidden params, then
4369  // create a new const TensorInfo based on this
4370  outputStateOutInfo.SetQuantizationScale(projInputScale);
4371  outputStateOutInfo.SetQuantizationOffset(projInputZeroPoint);
4372  outputInfo.SetQuantizationScale(projInputScale);
4373  outputInfo.SetQuantizationOffset(projInputZeroPoint);
4374  }
4375 
4376  const TensorInfo constOutputStateOutInfo(outputStateOutInfo);
4377  const TensorInfo constOutputInfo(outputInfo);
4378 
4379  if (desc.m_PeepholeEnabled)
4380  {
4381  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
4382  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
4383  }
4384 
4385  if (desc.m_LayerNormEnabled)
4386  {
4387  if(!desc.m_CifgEnabled)
4388  {
4389  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
4390  }
4391  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
4392  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
4393  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
4394  }
4395 
4396  // Check if the layer is supported
4397  bool isSupported = false;
4398  armnn::BackendId setBackend;
4399  auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
4400  {
4401  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4402  IsQLstmSupported,
4403  data.m_Backends,
4404  isSupported,
4405  setBackend,
4406  inputInfo,
4407  outputStatePrevTimeStepInfo,
4408  cellStatePrevTimeStepInfo,
4409  constOutputStateOutInfo,
4410  cellStateOutInfo,
4411  constOutputInfo,
4412  desc,
4413  paramsInfo);
4414  };
4415 
4416  bool isDynamic = false;
4417  if (!IsDynamicTensor(constOutputStateOutInfo) &&
4418  !IsDynamicTensor(cellStateOutInfo) &&
4419  !IsDynamicTensor(constOutputInfo))
4420  {
4421  validateFunc(outputInfo, isSupported);
4422  }
4423  else
4424  {
4425  isDynamic = true;
4426  isSupported = AreDynamicTensorsSupported();
4427  }
4428 
4429  if (!isSupported)
4430  {
4431  return false;
4432  }
4433 
4434  // Add the layer
4435  IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
4436  layer->SetBackendId(setBackend);
4437 
4438  input.Connect(layer->GetInputSlot(0));
4439  outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
4440  cellStatePrevTimeStep.Connect(layer->GetInputSlot(2));
4441 
4442  if (!isDynamic)
4443  {
4445  operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4446  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
4447  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4448  }
4449  else
4450  {
4452  operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4454  operation, 1, *layer, 1, model, data, nullptr, validateFunc,
4455  ActivationFn::kActivationNone, true) &&
4456  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4457  }
4458 }
4459 
4460 bool Converter::ConvertQuantized16BitLstm(const Operation& operation, const Model& model, ConversionData& data)
4461 {
4462  VLOG(DRIVER) << "Converter::ConvertQuantized16BitLstm()";
4463  VLOG(DRIVER) << "Policy::ConvertQuantized16BitLstm()";
4464 
4465  //Inputs:
4466  // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
4467  // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
4468  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4469  if (!input.IsValid())
4470  {
4471  return Fail("%s: Could not read input 0: input", __func__);
4472  }
4473 
4474  //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
4475  // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
4476  // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
4477  LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle(operation, 13, model, data);
4478  if (!previousCellStateIn.IsValid())
4479  {
4480  return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
4481  }
4482 
4483  // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4484  // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
4485  // is quantized with a fixed quantization range of -1, 127/128.
4486  LayerInputHandle previousOutputIn = ConvertToLayerInputHandle(operation, 14, model, data);
4487  if (!previousOutputIn.IsValid())
4488  {
4489  return Fail("%s: Could not read input 14: previousOutputIn", __func__);
4490  }
4491 
4492  // Get the input tensors:
4493  // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4494  // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
4495  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4496  const ConstTensorPin inputToInputWeightsPin =
4497  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
4498 
4499  // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4500  // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
4501  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4502  const ConstTensorPin inputToForgetWeightsPin =
4503  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
4504 
4505  // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4506  // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
4507  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4508  const ConstTensorPin inputToCellWeightsPin =
4509  ConvertOperationInputToConstTensorPin(operation, 3, model, data);
4510 
4511  // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4512  // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
4513  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4514  const ConstTensorPin inputToOutputWeightsPin =
4515  ConvertOperationInputToConstTensorPin(operation, 4, model, data);
4516 
4517  // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4518  // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
4519  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4520  const ConstTensorPin recurrentToInputWeightsPin =
4521  ConvertOperationInputToConstTensorPin(operation, 5, model, data);
4522 
4523  // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4524  // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
4525  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4526  const ConstTensorPin recurrentToForgetWeightsPin =
4527  ConvertOperationInputToConstTensorPin(operation, 6, model, data);
4528 
4529  // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4530  // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
4531  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4532  const ConstTensorPin recurrentToCellWeightsPin =
4533  ConvertOperationInputToConstTensorPin(operation, 7, model, data);
4534 
4535  // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4536  // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
4537  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4538  const ConstTensorPin recurrentToOutputWeightsPin =
4539  ConvertOperationInputToConstTensorPin(operation, 8, model, data);
4540 
4541  // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
4542  // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4543  // of input and weights scales and zeroPoint equal to 0.
4544  const ConstTensorPin inputGateBiasPin =
4545  ConvertOperationInputToConstTensorPin(operation, 9, model, data);
4546 
4547  // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4548  // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4549  // of input and weights scales and zeroPoint equal to 0.
4550  const ConstTensorPin forgetGateBiasPin =
4551  ConvertOperationInputToConstTensorPin(operation, 10, model, data);
4552 
4553  // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
4554  // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
4555  // and weights scales and zeroPoint equal to 0.
4556  const ConstTensorPin cellBiasPin =
4557  ConvertOperationInputToConstTensorPin(operation, 11, model, data);
4558 
4559  // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4560  // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4561  // of input and weights scales and zeroPoint equal to 0.
4562  const ConstTensorPin outputGateBiasPin =
4563  ConvertOperationInputToConstTensorPin(operation, 12, model, data);
4564 
4565  if (!inputToInputWeightsPin.IsValid() ||
4566  !inputToForgetWeightsPin.IsValid() ||
4567  !inputToCellWeightsPin.IsValid() ||
4568  !inputToOutputWeightsPin.IsValid() ||
4569  !recurrentToInputWeightsPin.IsValid() ||
4570  !recurrentToForgetWeightsPin.IsValid() ||
4571  !recurrentToCellWeightsPin.IsValid() ||
4572  !recurrentToOutputWeightsPin.IsValid() ||
4573  !inputGateBiasPin.IsValid() ||
4574  !forgetGateBiasPin.IsValid() ||
4575  !cellBiasPin.IsValid() ||
4576  !outputGateBiasPin.IsValid())
4577  {
4578  return Fail("%s: Operation has invalid tensor inputs", __func__);
4579  }
4580 
4581  // Outputs:
4582  // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
4583  // which contains a cell state from the current time step. Tensor is quantized using a quantization range
4584  // of -2^4, 2^4 * 32767/32768.
4585  const Operand* cellStateOut = GetOutputOperand(operation, 0, model);
4586  if (!cellStateOut)
4587  {
4588  return Fail("%s: Could not read output 0: cellStateOut", __func__);
4589  }
4590 
4591  // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
4592  // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
4593  const Operand* output = GetOutputOperand(operation, 1, model);
4594  if (!output)
4595  {
4596  return Fail("%s: Could not read output 1: output", __func__);
4597  }
4598 
4599  // Inputs
4600  const TensorInfo& inputInfo = input.GetTensorInfo();
4601  const TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
4602  const TensorInfo& previousOutputInInfo = previousOutputIn.GetTensorInfo();
4603 
4604  // Outputs
4605  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4606  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4607 
4608  // Dynamic tensors currently not supported
4609  if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
4610  {
4611  return Fail("%s: Dynamic output tensors are not supported", __func__);
4612  }
4613 
4614  QuantizedLstmInputParams params;
4615 
4616  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
4617  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
4618  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
4619  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
4620  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
4621  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
4622  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
4623  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4624  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4625  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4626  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4627  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4628 
4629  QuantizedLstmInputParamsInfo paramsInfo;
4630  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4631  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4632  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4633  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4636  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4638  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4639  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4640  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4641  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4642 
4643  bool isSupported = false;
4644  armnn::BackendId setBackend;
4645  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4646  {
4647  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4649  data.m_Backends,
4650  isSupported,
4651  setBackend,
4652  inputInfo,
4653  previousCellStateInInfo,
4654  previousOutputInInfo,
4655  cellStateOutInfo,
4656  outputInfo,
4657  paramsInfo);
4658  };
4659 
4660  bool isDynamic = false;
4661  if (!IsDynamicTensor(cellStateOutInfo) &&
4662  !IsDynamicTensor(outputInfo))
4663  {
4664  validateFunc(outputInfo, isSupported);
4665  }
4666  else
4667  {
4668  isDynamic = true;
4669  isSupported = AreDynamicTensorsSupported();
4670  }
4671 
4672  if (!isSupported)
4673  {
4674  return false;
4675  }
4676 
4677  IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
4678  layer->SetBackendId(setBackend);
4679  input.Connect(layer->GetInputSlot(0));
4680  previousCellStateIn.Connect(layer->GetInputSlot(1));
4681  previousOutputIn.Connect(layer->GetInputSlot(2));
4682 
4683  if (!isDynamic)
4684  {
4685  return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4686  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data));
4687  }
4688  else
4689  {
4690  return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4692  operation, 1, *layer, 1, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
4693  }
4694 
4695 }
4696 
4697 bool Converter::ConvertRank(const Operation& operation, const Model& model, ConversionData& data)
4698 {
4699  VLOG(DRIVER) << "Converter::ConvertRank()";
4700 
4701  const Operand* inputOperand = GetInputOperand(operation, 0, model);
4702  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4703 
4704  if (inputOperand == nullptr || outputOperand == nullptr)
4705  {
4706  return Fail("%s: Operation has invalid inputs", __func__);
4707  }
4708 
4709  const Shape inputOperandShape = GetOperandShape(*inputOperand);
4710  const Shape outputOperandShape = GetOperandShape(*outputOperand);
4711 
4712  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4713  if (!input.IsValid())
4714  {
4715  return Fail("%s: Could not read input 0", __func__);
4716  }
4717 
4718  armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
4719  if (IsDynamicTensor(outInfo))
4720  {
4721  return Fail("%s: Dynamic output tensors are not supported", __func__);
4722  }
4723 
4724  bool isSupported = false;
4725  armnn::BackendId setBackend;
4726  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4727  IsRankSupported,
4728  data.m_Backends,
4729  isSupported,
4730  setBackend,
4731  input.GetTensorInfo(),
4732  outInfo);
4733  if (!isSupported)
4734  {
4735  return false;
4736  }
4737 
4738  armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
4739  layer->SetBackendId(setBackend);
4740  assert(layer != nullptr);
4741  input.Connect(layer->GetInputSlot(0));
4742 
4743  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, &outInfo);
4744 }
4745 
4746 bool Converter::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
4747 {
4748  VLOG(DRIVER) << "Converter::ConvertReLu()";
4751 
4752 
4753  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4754  if (!input.IsValid())
4755  {
4756  return Fail("%s: Input 0 is invalid", "operationName");
4757  }
4758 
4759  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4760  if (!outputOperand)
4761  {
4762  return false;
4763  }
4764 
4765  const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
4766 
4767  bool isSupported = false;
4768  armnn::BackendId setBackend;
4769  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
4770  {
4771  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4773  data.m_Backends,
4774  isSupported,
4775  setBackend,
4776  input.GetTensorInfo(),
4777  outInfo,
4778  desc);
4779  };
4780 
4781  if(IsDynamicTensor(outInfo))
4782  {
4783  isSupported = AreDynamicTensorsSupported();
4784  }
4785  else
4786  {
4787  validateFunc(outInfo, isSupported);
4788  }
4789 
4790  if (!isSupported)
4791  {
4792  return false;
4793  }
4794 
4795  armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
4796  layer->SetBackendId(setBackend);
4797  ARMNN_ASSERT(layer != nullptr);
4798  input.Connect(layer->GetInputSlot(0));
4799 
4800  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4801 }
4802 
4803 bool Converter::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
4804 {
4805  VLOG(DRIVER) << "Converter::ConvertReLu1()";
4808  desc.m_A = 1.0f;
4809  desc.m_B = -1.0f;
4810 
4811  return ConvertToActivation(operation, __func__, desc, model, data);
4812 }
4813 
4814 bool Converter::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
4815 {
4816  VLOG(DRIVER) << "Converter::ConvertReLu6()";
4819  desc.m_A = 6.0f;
4820 
4821  return ConvertToActivation(operation, __func__, desc, model, data);
4822 }
4823 
4824 bool Converter::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
4825 {
4826  VLOG(DRIVER) << "Converter::ConvertReshape()";
4827 
4828  const Operand* inputOperand = GetInputOperand(operation, 0, model);
4829  const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
4830  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4831 
4832  if (inputOperand == nullptr
4833  || requestedShapeOperand == nullptr
4834  || outputOperand == nullptr)
4835  {
4836  return Fail("%s: Operation has invalid inputs", __func__);
4837  }
4838 
4839  if (requestedShapeOperand->dimensions.size() != 1)
4840  {
4841  return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
4842  __func__, requestedShapeOperand->dimensions.size());
4843  }
4844 
4845  std::vector<int32_t> targetDimensions;
4846  if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
4847  {
4848  return Fail("%s: Could not read values of input 1", __func__);
4849  }
4850 
4851  const Shape inputOperandShape = GetOperandShape(*inputOperand);
4852 
4853  Shape requestedShape;
4854  // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
4855  // function that resolves these values into a fully specified tensor shape.
4856  if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
4857  {
4858  return Fail("%s: Failed to resolve the requested shape", __func__);
4859  }
4860 
4861  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4862  if (!input.IsValid())
4863  {
4864  return Fail("%s: Could not read input 0", __func__);
4865  }
4866 
4867  armnn::ReshapeDescriptor reshapeDescriptor;
4868  reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
4869  requestedShape.dimensions.data());
4870 
4871  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
4872 
4873  bool isSupported = false;
4874  armnn::BackendId setBackend;
4875  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4876  {
4877  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4879  data.m_Backends,
4880  isSupported,
4881  setBackend,
4882  input.GetTensorInfo(),
4883  outputInfo,
4884  reshapeDescriptor);
4885  };
4886 
4887  if(!IsDynamicTensor(outputInfo))
4888  {
4889  validateFunc(outputInfo, isSupported);
4890  }
4891  else
4892  {
4893  isSupported = AreDynamicTensorsSupported();
4894  }
4895 
4896  if (!isSupported)
4897  {
4898  return false;
4899  }
4900 
4901  armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
4902  layer->SetBackendId(setBackend);
4903  assert(layer != nullptr);
4904  input.Connect(layer->GetInputSlot(0));
4905 
4906  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4907 }
4908 
4909 bool Converter::ConvertResize(const Operation& operation,
4910  const Model& model,
4911  ConversionData& data,
4912  ResizeMethod resizeMethod)
4913 {
4914  VLOG(DRIVER) << "Converter::ConvertResize()";
4915  VLOG(DRIVER) << "resizeMethod = " << GetResizeMethodAsCString(resizeMethod);
4916 
4917  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4918  if (!input.IsValid())
4919  {
4920  return Fail("%s: Could not read input 0", __func__);
4921  }
4922 
4923  const Operand* output = GetOutputOperand(operation, 0, model);
4924  if (!output)
4925  {
4926  return Fail("%s: Could not read output 0", __func__);
4927  }
4928 
4929  const TensorInfo& inputInfo = input.GetTensorInfo();
4930  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4931 
4932  ResizeDescriptor descriptor;
4933  descriptor.m_Method = resizeMethod;
4934  descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
4935 
4936  OperandType operandType1;
4937  OperandType operandType2;
4938 
4939  if (!GetOperandType(operation, 1, model, operandType1) ||
4940  !GetOperandType(operation, 2, model, operandType2))
4941  {
4942  return Fail("%s: Operation has invalid inputs", __func__);
4943  }
4944 
4945  if (operandType1 != operandType2)
4946  {
4947  return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
4948  }
4949 
4950  if (operandType1 == OperandType::INT32)
4951  {
4952  // Case 1: resizing by shape
4953  int32_t targetWidth = 0;
4954  int32_t targetHeight = 0;
4955 
4956  if (!GetInputInt32(operation, 1, targetWidth, model, data) ||
4957  !GetInputInt32(operation, 2, targetHeight, model, data))
4958  {
4959  return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
4960  }
4961 
4962  if (targetWidth < 0 || targetHeight < 0)
4963  {
4964  return Fail("%s: Operation has invalid inputs for resizing by shape. "
4965  "Target width/height cannot be < 0", __func__);
4966  }
4967 
4968  descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
4969  descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
4970  }
4971  else if (operandType1 == OperandType::FLOAT32)
4972  {
4973  // Case 2: resizing by scale
4974  float widthScale = 1.0f;
4975  float heightScale = 1.0f;
4976 
4977  if (!GetInputFloat32(operation, 1, widthScale, model, data) ||
4978  !GetInputFloat32(operation, 2, heightScale, model, data))
4979  {
4980  return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4981  }
4982 
4983  const TensorShape& inputShape = inputInfo.GetShape();
4984  armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4985 
4986  float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
4987  float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
4988 
4989  descriptor.m_TargetWidth = std::floor(width * widthScale);
4990  descriptor.m_TargetHeight = std::floor(height * heightScale);
4991  }
4992  else if (operandType1 == OperandType::FLOAT16)
4993  {
4994  Half widthScale;
4995  Half heightScale;
4996 
4997  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, widthScale, model, data) ||
4998  !GetInputScalar(operation, 2, OperandType::FLOAT16, heightScale, model, data))
4999  {
5000  return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
5001  }
5002 
5003  const TensorShape& inputShape = inputInfo.GetShape();
5004  armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
5005 
5006  Half width = static_cast<Half>(inputShape[dataLayoutIndexed.GetWidthIndex()]);
5007  Half height = static_cast<Half>(inputShape[dataLayoutIndexed.GetHeightIndex()]);
5008 
5009  descriptor.m_TargetWidth = std::floor(width * widthScale);
5010  descriptor.m_TargetHeight = std::floor(height * heightScale);
5011  }
5012  else
5013  {
5014  return Fail("%s: Operand has invalid data type for resizing by scale", __func__);
5015  }
5016 
5017  descriptor.m_AlignCorners = GetOptionalBool(operation, 4, model, data);
5018  descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data);
5019 
5020  bool isSupported = false;
5021  armnn::BackendId setBackend;
5022  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5023  {
5024  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5026  data.m_Backends,
5027  isSupported,
5028  setBackend,
5029  inputInfo,
5030  outputInfo,
5031  descriptor);
5032  };
5033 
5034  if(IsDynamicTensor(outputInfo))
5035  {
5036  isSupported = AreDynamicTensorsSupported();
5037  }
5038  else
5039  {
5040  validateFunc(outputInfo, isSupported);
5041  }
5042 
5043  if (!isSupported)
5044  {
5045  return false;
5046  }
5047 
5048  IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
5049  layer->SetBackendId(setBackend);
5050  assert(layer != nullptr);
5051  input.Connect(layer->GetInputSlot(0));
5052 
5053  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5054 }
5055 
5056 bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
5057 {
5058  VLOG(DRIVER) << "Converter::ConvertSpaceToBatchNd()";
5059 
5060  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5061  if(!input.IsValid())
5062  {
5063  return Fail("%s: Operation has invalid inputs", __func__);
5064  }
5065 
5066  const armnn::TensorInfo &inputInfo = input.GetTensorInfo();
5067  unsigned int rank = inputInfo.GetNumDimensions();
5068  unsigned int spatialDim = rank - 2;
5069 
5070  if(rank != 4)
5071  {
5072  Fail("%s: Only inputs with rank 4 are supported", __func__);
5073  }
5074 
5075  const Operand *output = GetOutputOperand(operation, 0, model);
5076  if(!output)
5077  {
5078  return Fail("%s: Could not read output 0", __func__);
5079  }
5080 
5081  const armnn::TensorInfo &outputInfo = GetTensorInfoForOperand(*output);
5082 
5083  const Operand *blockShapeOperand = GetInputOperand(operation, 1, model);
5084  const Operand *paddingsOperand = GetInputOperand(operation, 2, model);
5085 
5086  armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
5087  if(blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
5088  {
5089  return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
5090  }
5091 
5092  std::vector<int32_t> blockShape;
5093  if(!GetTensorInt32Values(*blockShapeOperand, blockShape, model, data))
5094  {
5095  return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
5096  }
5097  if(std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i)
5098  { return i < 1; }))
5099  {
5100  return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
5101  }
5102 
5103  armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
5104  if(paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
5105  {
5106  return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
5107  }
5108 
5109  std::vector<std::pair<unsigned int, unsigned int>> paddingList;
5110  std::vector<int32_t> paddings;
5111  if(!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
5112  {
5113  return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
5114  }
5115  for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
5116  {
5117  int paddingBeforeInput = paddings[i];
5118  int paddingAfterInput = paddings[i + 1];
5119  if(paddingBeforeInput < 0 || paddingAfterInput < 0)
5120  {
5121  return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
5122  }
5123 
5124  paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
5125  }
5126 
5129  descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
5130  descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
5131 
5132  if(Is12OrLaterOperand(*output))
5133  {
5134  descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
5135  }
5136 
5137  bool isSupported = false;
5138  armnn::BackendId setBackend;
5139  auto validateFunc = [&](const armnn::TensorInfo &outputInfo, bool &isSupported)
5140  {
5141  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5143  data.m_Backends,
5144  isSupported,
5145  setBackend,
5146  inputInfo,
5147  outputInfo,
5148  descriptor);
5149  };
5150 
5151  if(IsDynamicTensor(outputInfo))
5152  {
5153  isSupported = AreDynamicTensorsSupported();
5154  } else
5155  {
5156  validateFunc(outputInfo, isSupported);
5157  }
5158 
5159  if(!isSupported)
5160  {
5161  return false;
5162  }
5163 
5164  armnn::IConnectableLayer *const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
5165  layer->SetBackendId(setBackend);
5166  assert(layer != nullptr);
5167  input.Connect(layer->GetInputSlot(0));
5168 
5169  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5170 }
5171 
5172 bool Converter::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
5173 {
5174  VLOG(DRIVER) << "Converter::ConvertSpaceToDepth()";
5175 
5176  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5177  if (!input.IsValid() )
5178  {
5179  return Fail("%s: Operation has invalid inputs", __func__);
5180  }
5181 
5182  const TensorInfo& inputInfo = input.GetTensorInfo();
5183  unsigned int rank = inputInfo.GetNumDimensions();
5184  if (rank != 4)
5185  {
5186  return Fail("%s: Only inputs with rank 4 are supported", __func__);
5187  }
5188 
5189  const Operand* output = GetOutputOperand(operation, 0, model);
5190  if (!output)
5191  {
5192  return Fail("%s: Could not read output 0", __func__);
5193  }
5194 
5195  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5196 
5198 
5199  GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
5200 
5201  if (desc.m_BlockSize <= 1)
5202  {
5203  return Fail("%s: Block size must be at least 1 in all dimensions");
5204  }
5205 
5206  desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
5207 
5208  bool isSupported = false;
5209  armnn::BackendId setBackend;
5210  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5211  {
5212  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5214  data.m_Backends,
5215  isSupported,
5216  setBackend,
5217  inputInfo,
5218  outputInfo,
5219  desc);
5220  };
5221 
5222  if(IsDynamicTensor(outputInfo))
5223  {
5224  isSupported = AreDynamicTensorsSupported();
5225  }
5226  else
5227  {
5228  validateFunc(outputInfo, isSupported);
5229  }
5230 
5231  if (!isSupported)
5232  {
5233  return false;
5234  }
5235 
5236  IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
5237  layer->SetBackendId(setBackend);
5238  assert(layer != nullptr);
5239  input.Connect(layer->GetInputSlot(0));
5240 
5241  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5242 }
5243 
5244 bool Converter::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
5245 {
5246  VLOG(DRIVER) << "Converter::ConvertSoftmax()";
5247 
5248  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5249  if (!input.IsValid())
5250  {
5251  return Fail("%s: Operation has invalid inputs", __func__);
5252  }
5253 
5254  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
5255  if (!outputOperand)
5256  {
5257  return Fail("%s: Operation has no outputs", __func__);
5258  }
5259 
5260  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
5261 
5262  SoftmaxDescriptor desc;
5263  OperandType outputType = outputOperand->type;
5264 
5265  // Read beta value
5266  if (outputType == OperandType::TENSOR_FLOAT16)
5267  {
5268  Half value;
5269 
5270  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
5271  {
5272  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5273  }
5274 
5275  desc.m_Beta = static_cast<float>(value);
5276  }
5277  else
5278  {
5279  if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
5280  {
5281  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5282  }
5283  }
5284 
5285  if (operation.inputs.size() > 2 && !GetInputScalar(operation,
5286  2,
5287  OperandType::INT32,
5288  desc.m_Axis,
5289  model,
5290  data))
5291  {
5292  return Fail("%s: Operation has invalid inputs", __func__);
5293  }
5294 
5295  bool isSupported = false;
5296  armnn::BackendId setBackend;
5297  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5298  {
5299  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5301  data.m_Backends,
5302  isSupported,
5303  setBackend,
5304  input.GetTensorInfo(),
5305  outputInfo,
5306  desc);
5307  };
5308 
5309  if(IsDynamicTensor(outputInfo))
5310  {
5311  isSupported = AreDynamicTensorsSupported();
5312  }
5313  else
5314  {
5315  validateFunc(outputInfo, isSupported);
5316  }
5317 
5318  if (!isSupported)
5319  {
5320  return false;
5321  }
5322 
5323  IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
5324  layer->SetBackendId(setBackend);
5325  assert(layer != nullptr);
5326  input.Connect(layer->GetInputSlot(0));
5327 
5328  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5329 }
5330 
5331 bool Converter::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
5332 {
5333  VLOG(DRIVER) << "Converter::ConvertSub()";
5334 
5335  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
5336  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
5337 
5338  if (!input0.IsValid() || !input1.IsValid())
5339  {
5340  return Fail("%s: Operation has invalid inputs", __func__);
5341  }
5342 
5343  // The FuseActivation parameter is always the input index 2
5344  // and it should be optional
5345  ActivationFn activationFunction;
5346  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
5347  {
5348  return Fail("%s: Operation has invalid inputs", __func__);
5349  }
5350 
5351  const Operand* output = GetOutputOperand(operation, 0, model);
5352  if (!output)
5353  {
5354  return Fail("%s: Could not read output 0", __func__);
5355  }
5356 
5357  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5358 
5359  bool isSupported = false;
5360  armnn::BackendId setBackend;
5361  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5362  {
5364  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5366  data.m_Backends,
5367  isSupported,
5368  setBackend,
5369  input0.GetTensorInfo(),
5370  input1.GetTensorInfo(),
5371  outputInfo);
5373  };
5374 
5375  if(IsDynamicTensor(outputInfo))
5376  {
5377  isSupported = AreDynamicTensorsSupported();
5378  }
5379  else
5380  {
5381  validateFunc(outputInfo, isSupported);
5382  }
5383 
5384  if (!isSupported)
5385  {
5386  return false;
5387  }
5388 
5390  armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
5392  startLayer->SetBackendId(setBackend);
5393 
5394  bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
5395  if (!isReshapeSupported)
5396  {
5397  return false;
5398  }
5399  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
5400  data, nullptr, validateFunc, activationFunction);
5401 }
5402 
5403 bool Converter::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
5404 {
5405  VLOG(DRIVER) << "Converter::ConvertTanH()";
5406 
5409  desc.m_A = 1.0f; // android nn does not support tanH parameters
5410  desc.m_B = 1.0f; // set to 1.0f for unity scaling
5411 
5412  return ConvertToActivation(operation, __func__, desc, model, data);
5413 }
5414 
5415 bool Converter::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
5416 {
5417  VLOG(DRIVER) << "Converter::ConvertTransposeConv2d()";
5418 
5419  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5420 
5421  if (!input.IsValid())
5422  {
5423  return Fail("%s: Operation has invalid inputs", __func__);
5424  }
5425 
5426  const Operand* output = GetOutputOperand(operation, 0, model);
5427 
5428  if (!output)
5429  {
5430  return Fail("%s: Could not read output 0", __func__);
5431  }
5432 
5433  const TensorInfo& inputInfo = input.GetTensorInfo();
5434  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5435 
5436  // ArmNN does not currently support non-fixed weights or bias
5437  // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
5438  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
5439 
5440  if (weightsOperand == nullptr)
5441  {
5442  return Fail("%s: Operand is invalid", __func__);
5443  }
5445  desc.m_DataLayout = DataLayout::NHWC;
5446 
5447  // Determine whether padding is implicit or explicit
5448  bool implicitPadding = operation.inputs.size() == 9;
5449 
5450  if (implicitPadding )
5451  {
5452  desc.m_DataLayout = OptionalDataLayout(operation, 8, model, data);
5453  }
5454  else
5455  {
5456  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
5457  }
5458 
5459  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
5460  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
5461  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
5462 
5463  const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
5464 
5465  // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
5466  // We have to permute it to OIHW if the data layout is NCHW.
5467  const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
5469  model, data, OHWIToOIHW) :
5470  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
5471 
5472  // Bias is a 1D tensor
5473  const ConstTensorPin biasPin =
5474  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
5475 
5476  if (!weightsPin.IsValid())
5477  {
5478  return Fail("%s: Operation has invalid weights", __func__);
5479  }
5480 
5481  if (!biasPin.IsValid())
5482  {
5483  return Fail("%s: Operation has invalid biases", __func__);
5484  }
5485 
5486  ConstTensor weights = weightsPin.GetConstTensor();
5487  ConstTensor bias = biasPin.GetConstTensor();
5488  SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
5489 
5490  ActivationFn activation;
5491 
5492  if (implicitPadding)
5493  {
5494  int32_t strideX{0};
5495  int32_t strideY{0};
5496  int32_t padLeft{0};
5497  int32_t padRight{0};
5498  int32_t padTop{0};
5499  int32_t padBottom{0};
5500 
5501  ::android::nn::PaddingScheme paddingScheme;
5502  if (!GetInputPaddingScheme(operation, 4, paddingScheme, model, data) ||
5503  !GetInputScalar(operation, 5, OperandType::INT32, strideX, model, data) ||
5504  !GetInputScalar(operation, 6, OperandType::INT32, strideY, model, data) ||
5505  !GetInputActivationFunction(operation, 7, activation, model, data))
5506  {
5507  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
5508  }
5509 
5510  const uint32_t kernelX = weights.GetShape()[widthIndex];
5511  const uint32_t kernelY = weights.GetShape()[heightIndex];
5512 
5513  // If output shape has been specified as a parameter then extract it and make it available.
5514  const Operand* outputShapeOperand = GetInputOperand(operation, 3, model, false);
5515  std::vector<int32_t> outputShape;
5516  if ((outputShapeOperand) && (GetTensorInt32Values(*outputShapeOperand, outputShape, model, data)))
5517  {
5518  // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
5519  for (int dimension : outputShape)
5520  {
5521  desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
5522  }
5523  desc.m_OutputShapeEnabled = true;
5524  }
5525 
5526  uint32_t outputX;
5527  uint32_t outputY;
5528 
5529  if (IsDynamicTensor(outputInfo))
5530  {
5531  if (outputShape.size() == 0)
5532  {
5533  return Fail("%s: Padding sizes cannot be inferred", __func__);
5534  }
5535 
5536  outputX = outputShape[widthIndex];
5537  outputY = outputShape[heightIndex];
5538  }
5539  else
5540  {
5541  outputX = outputInfo.GetShape()[widthIndex];
5542  outputY = outputInfo.GetShape()[heightIndex];
5543  }
5544 
5545  CalcPaddingTransposeConv(outputX, kernelX, strideX, padLeft, padRight, paddingScheme);
5546  CalcPaddingTransposeConv(outputY, kernelY, strideY, padTop, padBottom, paddingScheme);
5547 
5548  // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
5549  // but Arm NN only supports values >= 0
5550  if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
5551  {
5552  return Fail("%s: Negative padding values are not supported", __func__);
5553  }
5554 
5555  desc.m_StrideX = armnn::numeric_cast<uint32_t>(strideX);
5556  desc.m_StrideY = armnn::numeric_cast<uint32_t>(strideY);
5557  desc.m_PadLeft = armnn::numeric_cast<uint32_t>(padLeft);
5558  desc.m_PadRight = armnn::numeric_cast<uint32_t>(padRight);
5559  desc.m_PadTop = armnn::numeric_cast<uint32_t>(padTop);
5560  desc.m_PadBottom = armnn::numeric_cast<uint32_t>(padBottom);
5561  }
5562  else if (operation.inputs.size() == 11)
5563  {
5564  // explicit padding
5565  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
5566  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
5567  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
5568  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
5569  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
5570  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
5571  !GetInputActivationFunction(operation, 9, activation, model, data))
5572  {
5573  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
5574  }
5575  }
5576  else
5577  {
5578  return Fail("%s: Unsupported number of operation inputs", __func__);
5579  }
5580 
5581  desc.m_BiasEnabled = true;
5582  Optional<TensorInfo> biases(bias.GetInfo());
5583 
5584  bool isSupported = false;
5585  armnn::BackendId setBackend;
5586  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5587  {
5588  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5590  data.m_Backends,
5591  isSupported,
5592  setBackend,
5593  inputInfo,
5594  outputInfo,
5595  desc,
5596  weights.GetInfo(),
5597  biases);
5598  };
5599 
5600  if(IsDynamicTensor(outputInfo))
5601  {
5602  isSupported = AreDynamicTensorsSupported();
5603  }
5604  else
5605  {
5606  validateFunc(outputInfo, isSupported);
5607  }
5608  if (!isSupported)
5609  {
5610  return false;
5611  }
5612 
5613  IConnectableLayer* startLayer =
5614  data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
5615  startLayer->SetBackendId(setBackend);
5616  if (!startLayer)
5617  {
5618  return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
5619  }
5620 
5621  input.Connect(startLayer->GetInputSlot(0));
5622 
5623  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
5624  data, nullptr, validateFunc, activation);
5625 }
5626 
5627 bool Converter::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
5628 {
5629  VLOG(DRIVER) << "Converter::ConvertSqrt()";
5630  ActivationDescriptor desc;
5631  desc.m_Function = ActivationFunction::Sqrt;
5632 
5633  return ::ConvertToActivation(operation, __func__, desc, model, data);
5634 }
5635 
5636 bool Converter::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
5637 {
5638  VLOG(DRIVER) << "Converter::ConvertSqueeze()";
5639 
5640  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5641  if (!input.IsValid())
5642  {
5643  return Fail("%s: Operation has invalid inputs", __func__);
5644  }
5645 
5646  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5647  unsigned int rank = inputInfo.GetNumDimensions();
5648  if (rank > 4)
5649  {
5650  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5651  }
5652 
5653  const Operand* output = GetOutputOperand(operation, 0, model);
5654  if (!output)
5655  {
5656  return Fail("%s: Could not read output 0", __func__);
5657  }
5658 
5660  {
5661  return Fail("%s: Dynamic output tensors are not supported", __func__);
5662  }
5663 
5664  // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
5665  // if the operand index is out of bounds.
5666  const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
5667 
5668  const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
5669 
5670  std::vector<int32_t> axis;
5671  if (!axisOperand)
5672  {
5673  axis.assign(dimensionSequence,
5674  dimensionSequence + rank);
5675  }
5676  else if (!GetTensorInt32Values(*axisOperand, axis, model, data))
5677  {
5678  return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
5679  }
5680 
5681  std::vector<uint32_t> outputDims;
5682  for (unsigned int i = 0; i < rank; i++)
5683  {
5684  bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
5685  auto currentDimension = inputInfo.GetShape()[i];
5686  if (skipSqueeze || currentDimension != 1)
5687  {
5688  outputDims.push_back(currentDimension);
5689  }
5690  }
5691 
5692  armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
5693 
5694  armnn::TensorInfo outputInfo = inputInfo;
5695  outputInfo.SetShape(outShape);
5696 
5697  armnn::ReshapeDescriptor reshapeDesc;
5698  reshapeDesc.m_TargetShape = outputInfo.GetShape();
5699 
5700  bool isSupported = false;
5701  armnn::BackendId setBackend;
5702  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5704  data.m_Backends,
5705  isSupported,
5706  setBackend,
5707  inputInfo,
5708  outputInfo,
5709  reshapeDesc);
5710 
5711  if (!isSupported)
5712  {
5713  return false;
5714  }
5715 
5716  armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
5717  layer->SetBackendId(setBackend);
5718  assert(layer != nullptr);
5719  input.Connect(layer->GetInputSlot(0));
5720 
5721  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
5722 }
5723 
5724 bool Converter::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
5725 {
5726  VLOG(DRIVER) << "Converter::ConvertStridedSlice()";
5727 
5728  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5729  if (!input.IsValid())
5730  {
5731  return Fail("%s: Operation has invalid inputs", __func__);
5732  }
5733 
5734  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5735  unsigned int rank = inputInfo.GetNumDimensions();
5736  if (rank > 4)
5737  {
5738  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5739  }
5740 
5741  const Operand* output = GetOutputOperand(operation, 0, model);
5742  if (!output)
5743  {
5744  return Fail("%s: Could not read output 0", __func__);
5745  }
5746 
5747  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5748 
5749  const Operand* beginOperand = GetInputOperand(operation, 1, model);
5750  const Operand* endOperand = GetInputOperand(operation, 2, model);
5751  const Operand* stridesOperand = GetInputOperand(operation, 3, model);
5752 
5753  std::vector<int32_t> beginValues;
5754  std::vector<int32_t> endValues;
5755  std::vector<int32_t> stridesValues;
5756 
5757  // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
5758  auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
5759  {
5760  if (!GetTensorInt32Values(operand, operandValues, model, data))
5761  {
5762  return false;
5763  }
5764 
5765  if (operandValues.size() != rank)
5766  {
5767  return false;
5768  }
5769 
5770  return true;
5771  };
5772 
5773  if (!ValidateInputOperands(*beginOperand, beginValues)
5774  || !ValidateInputOperands(*endOperand, endValues)
5775  || !ValidateInputOperands(*stridesOperand, stridesValues))
5776  {
5777  return Fail("%s: Operation has invalid input operand", __func__);
5778  }
5779 
5780  // Stride cannot have value '0'
5781  if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
5782  {
5783  return Fail("%s: Stride must be non-zero value.", __func__);
5784  }
5785 
5786  armnn::StridedSliceDescriptor descriptor;
5787  descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
5788  descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
5789  descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
5791 
5792  // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
5793  if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) ||
5794  !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) ||
5795  !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
5796  {
5797  return Fail("%s: Operation has invalid inputs", __func__);
5798  }
5799 
5800  bool isSupported = false;
5801  armnn::BackendId setBackend;
5802  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5803  {
5804  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5806  data.m_Backends,
5807  isSupported,
5808  setBackend,
5809  inputInfo,
5810  outputInfo,
5811  descriptor);
5812  };
5813 
5814  if(IsDynamicTensor(outputInfo))
5815  {
5816  isSupported = AreDynamicTensorsSupported();
5817  }
5818  else
5819  {
5820  validateFunc(outputInfo, isSupported);
5821  }
5822 
5823  if (!isSupported)
5824  {
5825  return false;
5826  }
5827 
5828  // Check if slice can fit in a inferred output
5829  armnn::TensorShape inputShape = inputInfo.GetShape();
5830  for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
5831  {
5832  int stride = descriptor.m_Stride[i];
5833 
5834  if (descriptor.m_ShrinkAxisMask & (1 << i))
5835  {
5836  // If the difference between the start point and the end point of the slice on an axis being shrunk
5837  // is greater than 1 then throw an error as the output will not be large enough to hold the slice
5838  if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
5839  || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
5840  {
5841  return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
5842  }
5843 
5844  if(stride < 0)
5845  {
5846  return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
5847  }
5848  }
5849  }
5850 
5851  armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
5852  layer->SetBackendId(setBackend);
5853  assert(layer != nullptr);
5854  input.Connect(layer->GetInputSlot(0));
5855 
5856  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5857 }
5858 
5859 bool Converter::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
5860 {
5861  VLOG(DRIVER) << "Converter::ConvertTranspose()";
5862 
5863  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5864  if (!input.IsValid())
5865  {
5866  return Fail("%s: Operation has invalid inputs", __func__);
5867  }
5868 
5869  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5870  unsigned int rank = inputInfo.GetNumDimensions();
5871  if (rank > 4)
5872  {
5873  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5874  }
5875 
5876  // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
5877  // if the operand index is out of bounds.
5878  const Operand* permOperand = GetInputOperand(operation, 1, model, false);
5879 
5880  std::vector<int32_t> perm(rank);
5881  if (!permOperand || (permOperand->lifetime == OperandLifeTime::NO_VALUE))
5882  {
5883  for (unsigned int i = rank; i > 0; i--)
5884  {
5885  perm[rank - i] = armnn::numeric_cast<int> (i - 1);
5886  }
5887  }
5888  else if (!GetTensorInt32Values(*permOperand, perm, model, data))
5889  {
5890  return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
5891  }
5892 
5893  std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
5894 
5895  armnn::TransposeDescriptor transposeDesc;
5896  transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
5897 
5898  const Operand* output = GetOutputOperand(operation, 0, model);
5899  if (!output)
5900  {
5901  return Fail("%s: Could not read output 0", __func__);
5902  }
5903 
5904  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5905 
5906  bool isSupported = false;
5907  armnn::BackendId setBackend;
5908  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5909  {
5910  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5911  IsTransposeSupported,
5912  data.m_Backends,
5913  isSupported,
5914  setBackend,
5915  inputInfo,
5916  outputInfo,
5917  transposeDesc);
5918  };
5919 
5920  if(IsDynamicTensor(outputInfo))
5921  {
5922  isSupported = AreDynamicTensorsSupported();
5923  }
5924  else
5925  {
5926  validateFunc(outputInfo, isSupported);
5927  }
5928 
5929  if (!isSupported)
5930  {
5931  return false;
5932  }
5933 
5934  armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
5935  layer->SetBackendId(setBackend);
5936  assert(layer != nullptr);
5937  input.Connect(layer->GetInputSlot(0));
5938 
5939  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5940 }
5941 
5942 } // namespace armnn_driver
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:662
armnn::QLstmDescriptor::m_CellClip
float m_CellClip
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1381
armnn::QLstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1391
armnn_driver::IsWeightsValid
bool IsWeightsValid(const Operation &operation, uint32_t inputIndex, const Model &model)
Utility functions.
Definition: ConversionUtils.cpp:134
armnn_driver::Operation
::android::nn::Operation Operation
Definition: ConversionUtils.hpp:47
armnn::TransposeConvolution2dDescriptor::m_OutputShapeEnabled
bool m_OutputShapeEnabled
Output shape if it has been specified.
Definition: Descriptors.hpp:1452
armnn::LstmInputParams::m_CellToForgetWeights
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
armnn::InstanceNormalizationDescriptor::m_Eps
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
Definition: Descriptors.hpp:857
armnn::IsSoftmaxSupported
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:572
armnn::BackendId
Definition: BackendId.hpp:75
armnnUtils::ExpandDims
armnn::TensorShape ExpandDims(const armnn::TensorShape &tensorShape, int axis)
Definition: TensorUtils.cpp:140
armnn::QuantizedLstmInputParamsInfo::m_OutputGateBias
const TensorInfo * m_OutputGateBias
Definition: QuantizedLstmParams.hpp:152
armnn::IsConvolution2dSupported
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::Converter::Operation
::android::nn::Operation Operation
Definition: Converter.hpp:28
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1579
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1442
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:570
armnn::LstmInputParamsInfo::m_InputToCellWeights
const TensorInfo * m_InputToCellWeights
Definition: LstmParams.hpp:91
armnn_driver::IsConnectedToDequantize
bool IsConnectedToDequantize(armnn::IOutputSlot *ioutputSlot)
Definition: ConversionUtils.cpp:1040
armnn::QLstmDescriptor::m_ProjectionClip
float m_ProjectionClip
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1383
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1436
armnn::GetArgMinMaxFunctionAsCString
constexpr char const * GetArgMinMaxFunctionAsCString(ArgMinMaxFunction function)
Definition: TypesUtils.hpp:51
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:514
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:932
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:757
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1457
armnn_driver::ConvertPaddings
bool ConvertPaddings(const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
Definition: ConversionUtils.cpp:338
armnn::QLstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1395
armnn::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1440
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1163
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
Connect
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:14
armnn::LstmInputParamsInfo::m_CellToInputWeights
const TensorInfo * m_CellToInputWeights
Definition: LstmParams.hpp:97
armnn_driver::isQuantizedOperand
bool isQuantizedOperand(const OperandType &operandType)
Definition: CanonicalUtils.cpp:505
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:568
armnn::TensorInfo::SetQuantizationScale
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:560
armnn::LstmInputParamsInfo::m_InputGateBias
const TensorInfo * m_InputGateBias
Definition: LstmParams.hpp:100
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:888
armnn::QuantizedLstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: QuantizedLstmParams.hpp:38
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::LstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1069
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:495
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1317
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:204
armnn_driver::ConstTensorPin::GetConstTensorPtr
const armnn::ConstTensor * GetConstTensorPtr() const
Definition: ConversionUtils.cpp:120
armnn::LstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: LstmParams.hpp:90
armnn::TransposeConvolution2dDescriptor::m_OutputShape
std::vector< unsigned int > m_OutputShape
Definition: Descriptors.hpp:1453
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::LstmInputParamsInfo::m_InputToOutputWeights
const TensorInfo * m_InputToOutputWeights
Definition: LstmParams.hpp:92
armnn::IsStridedSliceSupported
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1036
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1551
armnn::LstmInputParamsInfo::m_ProjectionBias
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:952
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:518
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1270
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1309
armnn_driver::GetOptionalBool
bool GetOptionalBool(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:900
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn::LstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
armnn::QuantizedLstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: QuantizedLstmParams.hpp:36
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1448
armnn::LstmInputParamsInfo::m_RecurrentToForgetWeights
const TensorInfo * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:94
armnn::QuantizedLstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: QuantizedLstmParams.hpp:35
TensorUtils.hpp
armnn::OriginsDescriptor::SetConcatAxis
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
Definition: Descriptors.cpp:158
armnn::LayerType::Shape
@ Shape
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:822
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:566
armnn::LstmInputParams::m_OutputLayerNormWeights
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
armnn::QuantizedLstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: QuantizedLstmParams.hpp:33
armnn::LstmInputParamsInfo::m_CellBias
const TensorInfo * m_CellBias
Definition: LstmParams.hpp:102
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:43
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:500
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1113
armnn::QuantizedLstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: QuantizedLstmParams.hpp:40
armnn::QuantizedLstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: QuantizedLstmParams.hpp:41
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToCellWeights
const TensorInfo * m_RecurrentToCellWeights
Definition: QuantizedLstmParams.hpp:146
armnn::GetUnaryOperationAsCString
constexpr char const * GetUnaryOperationAsCString(UnaryOperation operation)
Definition: TypesUtils.hpp:89
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1119
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:696
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1157
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:558
armnn::IsLstmSupported
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:556
armnn_driver::GetInputActivationFunctionFromTensor
bool GetInputActivationFunctionFromTensor(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:837
armnn_driver::DequantizeAndMakeConstTensorPin
ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
Definition: ConversionUtils.cpp:731
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::QuantizedLstmInputParamsInfo::m_ForgetGateBias
const TensorInfo * m_ForgetGateBias
Definition: QuantizedLstmParams.hpp:150
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:688
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:102
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:692
armnn::LstmInputParamsInfo::m_InputToInputWeights
const TensorInfo * m_InputToInputWeights
Definition: LstmParams.hpp:89
armnnUtils::TransposeTensorShape
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Transpose.cpp:98
armnn::LstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
armnn_driver::ConversionData
Definition: ConversionUtils.hpp:51
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::LstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
armnn::LstmInputParamsInfo::m_CellToForgetWeights
const TensorInfo * m_CellToForgetWeights
Definition: LstmParams.hpp:98
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:783
armnn::QLstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1397
armnn::IsReshapeSupported
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:169
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:680
armnn::LstmInputParams::m_CellLayerNormWeights
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
armnn::LstmInputParams::m_CellToOutputWeights
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
armnn::FillDescriptor::m_Value
float m_Value
Definition: Descriptors.hpp:928
armnn::QLstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1399
armnn::QuantizedLstmInputParamsInfo::m_InputToInputWeights
const TensorInfo * m_InputToInputWeights
Definition: QuantizedLstmParams.hpp:139
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:793
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:684
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:863
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:44
armnn::OptionalReferenceSwitch::value
const T & value() const
Definition: Optional.hpp:146
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn_driver::Half
half_float::half Half
Definition: Converter.cpp:14
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1042
armnn::ReduceOperation::Min
@ Min
armnn::IsSplitterSupported
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1311
armnn::TensorInfo::SetQuantizationOffset
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:60
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:983
armnn::IsSubtractionSupported
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::LogicalBinaryOperation
LogicalBinaryOperation
Definition: Types.hpp:118
armnn::LstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
armnn::GetLogicalBinaryOperationAsCString
constexpr char const * GetLogicalBinaryOperationAsCString(LogicalBinaryOperation operation)
Definition: TypesUtils.hpp:105
armnn::ConcatDescriptor
OriginsDescriptor ConcatDescriptor
Definition: DescriptorsFwd.hpp:56
armnn::QuantizedLstmInputParamsInfo::m_InputToCellWeights
const TensorInfo * m_InputToCellWeights
Definition: QuantizedLstmParams.hpp:141
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:913
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:90
armnn::ResizeMethod
ResizeMethod
Definition: Types.hpp:163
armnn::ReduceOperation::Sum
@ Sum
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:976
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:66
armnn::SpaceToDepthDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1062
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:694
armnn::LstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:948
armnn_driver::Converter::Model
::android::nn::Model Model
Definition: Converter.hpp:24
armnn::ReduceOperation::Max
@ Max
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:698
armnn::QuantizedLstmInputParamsInfo::m_InputGateBias
const TensorInfo * m_InputGateBias
Definition: QuantizedLstmParams.hpp:149
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:192
armnn_driver::ConvertPooling2d
bool ConvertPooling2d(const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:380
armnn::NormalizationAlgorithmChannel::Across
@ Across
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:554
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:647
armnn::GetResizeMethodAsCString
constexpr const char * GetResizeMethodAsCString(ResizeMethod method)
Definition: TypesUtils.hpp:272
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1139
armnn::NormalizationAlgorithmMethod::LocalBrightness
@ LocalBrightness
Krichevsky 2012: Local Brightness Normalization.
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:785
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:782
armnn_driver::LayerInputHandle::GetOutputSlot
armnn::IOutputSlot * GetOutputSlot() const
Definition: ConversionUtils.cpp:70
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:890
armnn::QuantizedLstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: QuantizedLstmParams.hpp:45
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:797
armnn::LstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
armnn::QuantizedLstmInputParamsInfo::m_CellBias
const TensorInfo * m_CellBias
Definition: QuantizedLstmParams.hpp:151
armnn::IsResizeSupported
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::QuantizedLstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: QuantizedLstmParams.hpp:39
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1115
armnn::ComparisonOperation
ComparisonOperation
Definition: Types.hpp:108
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1580
armnn_driver::GetOptionalConvolutionDilationParams
bool GetOptionalConvolutionDilationParams(const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:874
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::GetOperandType
bool GetOperandType(const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
Definition: ConversionUtils.hpp:683
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1038
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:981
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1059
armnn::IsDivisionSupported
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:522
Converter.hpp
armnn::LstmInputParamsInfo::m_OutputGateBias
const TensorInfo * m_OutputGateBias
Definition: LstmParams.hpp:103
armnn::UnaryOperation
UnaryOperation
Definition: Types.hpp:124
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1438
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:32
armnn_driver::LayerInputHandle::SanitizeQuantizationScale
void SanitizeQuantizationScale(LayerInputHandle &weight, LayerInputHandle &input)
Definition: ConversionUtils.cpp:55
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1347
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn_driver::GetOptionalInputActivation
bool GetOptionalInputActivation(const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:853
armnn::InstanceNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:859
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn::GetComparisonOperationAsCString
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:61
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:643
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:979
armnn_driver::Converter::ConvertOperation
static bool ConvertOperation(const Operation &operation, const Model &model, ConversionData &data)
Definition: Converter.cpp:21
armnn::QLstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1403
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:761
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1481
armnn::QLstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1401
armnn::LstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
armnn::LstmInputParamsInfo::m_ForgetLayerNormWeights
const TensorInfo * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:107
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToForgetWeights
const TensorInfo * m_RecurrentToForgetWeights
Definition: QuantizedLstmParams.hpp:145
armnn::QuantizedLstmInputParams
Definition: QuantizedLstmParams.hpp:13
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1313
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:486
armnn::DataLayout::NHWC
@ NHWC
armnn::IsDequantizeSupported
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::LstmInputParamsInfo::m_InputLayerNormWeights
const TensorInfo * m_InputLayerNormWeights
Definition: LstmParams.hpp:106
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::LstmInputParamsInfo::m_OutputLayerNormWeights
const TensorInfo * m_OutputLayerNormWeights
Definition: LstmParams.hpp:109
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:562
armnn::LstmInputParamsInfo::m_ProjectionWeights
const TensorInfo * m_ProjectionWeights
Definition: LstmParams.hpp:104
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:791
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1188
armnn::PermutationVector
Definition: Types.hpp:306
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:27
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:886
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1322
ARMNN_NO_DEPRECATE_WARN_BEGIN
#define ARMNN_NO_DEPRECATE_WARN_BEGIN
Definition: Deprecated.hpp:33
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1033
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:789
armnn::LstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
armnn::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1109
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:990
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1450
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::LstmInputParams::m_ProjectionWeights
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
armnn::LstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1407
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:516
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:812
armnn::TensorInfo::GetQuantizationDim
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToOutputWeights
const TensorInfo * m_RecurrentToOutputWeights
Definition: QuantizedLstmParams.hpp:147
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn_driver::ConstTensorPin::IsOptional
bool IsOptional() const
Definition: ConversionUtils.cpp:110
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::QuantizedLstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: QuantizedLstmParams.hpp:44
armnnUtils::DataLayoutIndexed
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
Definition: DataLayoutIndexed.hpp:17
armnn::ActivationFunction::TanH
@ TanH
armnn::IsMaximumSupported
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToInputWeights
const TensorInfo * m_RecurrentToInputWeights
Definition: QuantizedLstmParams.hpp:144
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:682
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:50
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1485
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1117
armnn::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:718
armnn::IsMinimumSupported
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::QLstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1393
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1330
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1159
armnn::Optional
Definition: Optional.hpp:270
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::IsMeanSupported
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1444
armnn::IsNormalizationSupported
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:853
armnn::QLstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
Definition: Descriptors.hpp:1385
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:823
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1320
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:686
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:115
armnn::InstanceNormalizationDescriptor::m_Gamma
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:853
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:105
armnn::QuantizedLstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: QuantizedLstmParams.hpp:34
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:781
armnn::LstmInputParams::m_CellToInputWeights
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1107
armnn::CreateDescriptorForConcatenation
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
Definition: Descriptors.hpp:288
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1111
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
armnn::QLstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1387
armnn::LstmInputParamsInfo::m_CellLayerNormWeights
const TensorInfo * m_CellLayerNormWeights
Definition: LstmParams.hpp:108
armnn::BaseTensor::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:297
armnn::QuantizedLstmInputParamsInfo::m_InputToOutputWeights
const TensorInfo * m_InputToOutputWeights
Definition: QuantizedLstmParams.hpp:142
armnn::IsMultiplicationSupported
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::ConvertReduce
bool ConvertReduce(const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
Definition: ConversionUtils.cpp:508
armnn::LstmInputParamsInfo::m_ForgetGateBias
const TensorInfo * m_ForgetGateBias
Definition: LstmParams.hpp:101
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::IsPreluSupported
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:690
armnn::ActivationFunction::ReLu
@ ReLu
armnn::LstmInputParams::m_InputLayerNormWeights
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1446
armnn::LstmInputParams
Definition: LstmParams.hpp:13
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::LstmInputParamsInfo::m_RecurrentToOutputWeights
const TensorInfo * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:96
armnn::IsActivationSupported
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:564
armnn::IsFloorSupported
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:963
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn::ResizeDescriptor::m_HalfPixelCenters
bool m_HalfPixelCenters
Half Pixel Centers.
Definition: Descriptors.hpp:985
armnn::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1006
armnn::LstmInputParamsInfo::m_RecurrentToInputWeights
const TensorInfo * m_RecurrentToInputWeights
Definition: LstmParams.hpp:93
armnn::QuantizedLstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: QuantizedLstmParams.hpp:43
armnn::LstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
armnn::QuantizedLstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: QuantizedLstmParams.hpp:46
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn_driver::GetInputFloat32
bool GetInputFloat32(const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:791
armnn::LstmInputParamsInfo::m_CellToOutputWeights
const TensorInfo * m_CellToOutputWeights
Definition: LstmParams.hpp:99
armnn_driver::OperandType
::android::nn::OperandType OperandType
Definition: ConversionUtils.hpp:46
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:698
armnn::LstmInputParams::m_ForgetLayerNormWeights
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
armnn::BoostLogSeverityMapping::error
@ error
armnn::QLstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1389
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:835
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:787
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:742
armnn::LstmInputParams::m_ProjectionBias
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
armnn::LstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:153
armnn::QuantizedLstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: QuantizedLstmParams.hpp:140
armnn::LstmInputParamsInfo::m_RecurrentToCellWeights
const TensorInfo * m_RecurrentToCellWeights
Definition: LstmParams.hpp:95
armnn::OriginsDescriptor::SetViewOriginCoord
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
Definition: Descriptors.cpp:167
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:59
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
armnn::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::IsConcatSupported
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::InstanceNormalizationDescriptor::m_Beta
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:855
armnn_driver::ConvertToActivation
bool ConvertToActivation(const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:592
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:974
armnn::IsAdditionSupported
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1010
armnn::IsPadSupported
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ARMNN_NO_DEPRECATE_WARN_END
#define ARMNN_NO_DEPRECATE_WARN_END
Definition: Deprecated.hpp:34
android::nn
Definition: support_library_service.cpp:10
armnn::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.