ArmNN
 23.02
Converter.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Converter.hpp"
7 #include <half/half.hpp>
9 
10 namespace armnn_driver
11 {
12 
13 using namespace android::nn;
14 using Half = half_float::half;
15 
16 namespace
17 {
18 
19 } // anonymouse namespace
20 
21 bool Converter::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
22 {
23  switch (operation.type)
24  {
25  case OperationType::ABS:
26  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
27  case OperationType::ADD:
28  return ConvertAdd(operation, model, data);
29  case OperationType::ARGMAX:
30  return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
31  case OperationType::ARGMIN:
32  return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
33  case OperationType::AVERAGE_POOL_2D:
34  return ConvertAveragePool2d(operation, model, data);
35  case OperationType::BATCH_MATMUL:
36  return ConvertBatchMatMul(operation, model, data);
37  case OperationType::BATCH_TO_SPACE_ND:
38  return ConvertBatchToSpaceNd(operation, model, data);
39  case OperationType::CAST:
40  return ConvertCast(operation, model, data);
41  case OperationType::CONCATENATION:
42  return ConvertConcatenation(operation, model, data);
43  case OperationType::CONV_2D:
44  return ConvertConv2d(operation, model, data);
45  case OperationType::DEPTH_TO_SPACE:
46  return ConvertDepthToSpace(operation, model, data);
47  case OperationType::DEPTHWISE_CONV_2D:
48  return ConvertDepthwiseConv2d(operation, model, data);
49  case OperationType::DEQUANTIZE:
50  return ConvertDequantize(operation, model, data);
51  case OperationType::DIV:
52  return ConvertDiv(operation, model, data);
53  case OperationType::ELU:
54  return ConvertElu(operation, model, data);
55  case OperationType::EQUAL:
56  return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
57  case OperationType::EXP:
58  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Exp);
59  case OperationType::EXPAND_DIMS:
60  return ConvertExpandDims(operation, model, data);
61  case OperationType::FILL:
62  return ConvertFill(operation, model, data);
63  case OperationType::FLOOR:
64  return ConvertFloor(operation, model, data);
65  case OperationType::FULLY_CONNECTED:
66  return ConvertFullyConnected(operation, model, data);
67  case OperationType::GATHER:
68  return ConvertGather(operation, model, data);
69  case OperationType::GREATER:
70  return ConvertComparison(operation, model, data, ComparisonOperation::Greater);
71  case OperationType::GREATER_EQUAL:
72  return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
73  case OperationType::GROUPED_CONV_2D:
74  return ConvertGroupedConv2d(operation, model, data);
75  case OperationType::HARD_SWISH:
76  return ConvertHardSwish(operation, model, data);
77  case OperationType::INSTANCE_NORMALIZATION:
78  return ConvertInstanceNormalization(operation, model, data);
79  case OperationType::L2_NORMALIZATION:
80  return ConvertL2Normalization(operation, model, data);
81  case OperationType::L2_POOL_2D:
82  return ConvertL2Pool2d(operation, model, data);
83  case OperationType::LESS:
84  return ConvertComparison(operation, model, data, ComparisonOperation::Less);
85  case OperationType::LESS_EQUAL:
86  return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
87  case OperationType::LOCAL_RESPONSE_NORMALIZATION:
88  return ConvertLocalResponseNormalization(operation, model, data);
89  case OperationType::LOG:
90  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Log);
91  case OperationType::LOGICAL_AND:
92  return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
93  case OperationType::LOGICAL_NOT:
94  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
95  case OperationType::LOGICAL_OR:
96  return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
97  case OperationType::LOGISTIC:
98  return ConvertLogistic(operation, model, data);
99  case OperationType::LOG_SOFTMAX:
100  return ConvertLogSoftmax(operation, model, data);
101  case OperationType::LSTM:
102  return ConvertLstm(operation, model, data);
103  case OperationType::MAX_POOL_2D:
104  return ConvertMaxPool2d(operation, model, data);
105  case OperationType::MAXIMUM:
106  return ConvertMaximum(operation, model, data);
107  case OperationType::MEAN:
108  return ConvertMean(operation, model, data);
109  case OperationType::MINIMUM:
110  return ConvertMinimum(operation, model, data);
111  case OperationType::MUL:
112  return ConvertMul(operation, model, data);
113  case OperationType::NEG:
114  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
115  case OperationType::NOT_EQUAL:
116  return ConvertComparison(operation, model, data, ComparisonOperation::NotEqual);
117  case OperationType::PAD:
118  return ConvertPad(operation, model, data);
119  case OperationType::PAD_V2:
120  return ConvertPadV2(operation, model, data);
121  case OperationType::PRELU:
122  return ConvertPrelu(operation, model, data);
123  case OperationType::QUANTIZE:
124  return ConvertQuantize(operation, model, data);
125  case OperationType::QUANTIZED_LSTM:
126  return ConvertQuantizedLstm(operation, model, data);
127  case OperationType::QUANTIZED_16BIT_LSTM:
128  return ConvertQuantized16BitLstm(operation, model, data);
129  case OperationType::RANK:
130  return ConvertRank(operation, model, data);
131  case OperationType::REDUCE_MAX:
132  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Max);
133  case OperationType::REDUCE_MIN:
134  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Min);
135  case OperationType::REDUCE_SUM:
136  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Sum);
137  case OperationType::RELU:
138  return ConvertReLu(operation, model, data);
139  case OperationType::RELU1:
140  return ConvertReLu1(operation, model, data);
141  case OperationType::RELU6:
142  return ConvertReLu6(operation, model, data);
143  case OperationType::RESHAPE:
144  return ConvertReshape(operation, model, data);
145  case OperationType::RESIZE_BILINEAR:
146  return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
147  case OperationType::RESIZE_NEAREST_NEIGHBOR:
148  return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
149  case OperationType::RSQRT:
150  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
151  case OperationType::SIN:
152  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Sin);
153  case OperationType::SOFTMAX:
154  return ConvertSoftmax(operation, model, data);
155  case OperationType::SPACE_TO_BATCH_ND :
156  return ConvertSpaceToBatchNd(operation, model, data);
157  case OperationType::SPACE_TO_DEPTH:
158  return ConvertSpaceToDepth(operation, model, data);
159  case OperationType::SQRT:
160  return ConvertSqrt(operation, model, data);
161  case OperationType::SQUEEZE:
162  return ConvertSqueeze(operation, model, data);
163  case OperationType::STRIDED_SLICE:
164  return ConvertStridedSlice(operation, model, data);
165  case OperationType::SUB:
166  return ConvertSub(operation, model, data);
167  case OperationType::TRANSPOSE:
168  return ConvertTranspose(operation, model, data);
169  case OperationType::TRANSPOSE_CONV_2D:
170  return ConvertTransposeConv2d(operation, model, data);
171  case OperationType::TANH:
172  return ConvertTanH(operation, model, data);
173  default:
174  VLOG(DRIVER) << "Operation type: " << operation.type << "is not supported in ArmnnDriver";
175  return false;
176  }
177 }
178 
179 bool Converter::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
180 {
181  VLOG(DRIVER) << "Converter::ConvertAdd()";
182  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
183  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
184 
185  if (!input0.IsValid() || !input1.IsValid())
186  {
187  return Fail("%s: Operation has invalid inputs", __func__);
188  }
189 
190  // The FuseActivation parameter is always the input index 2, and it should be optional
191  ActivationFn activationFunction;
192  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
193  {
194  return Fail("%s: Operation has invalid inputs", __func__);
195  }
196 
197  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
198  if (!outputOperand)
199  {
200  return false;
201  }
202 
203  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
204  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
205 
206  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
207 
208  bool isSupported = false;
209  armnn::BackendId setBackend;
210  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
211  {
214  data.m_Backends,
215  isSupported,
216  setBackend,
217  inputInfo0,
218  inputInfo1,
219  outputInfo);
220  };
221 
222  if(!IsDynamicTensor(outputInfo))
223  {
224  validateFunc(outputInfo, isSupported);
225  }
226  else
227  {
228  isSupported = AreDynamicTensorsSupported();
229  }
230 
231  if (!isSupported)
232  {
233  return false;
234  }
235 
236  armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
237  startLayer->SetBackendId(setBackend);
238 
239  bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
240  if (!isReshapeSupported)
241  {
242  return false;
243  }
244 
245  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
246  data, nullptr, validateFunc, activationFunction);
247 }
248 
249 bool Converter::ConvertArgMinMax(const Operation& operation,
250  const Model& model,
251  ConversionData& data,
252  armnn::ArgMinMaxFunction argMinMaxFunction)
253 {
254  VLOG(DRIVER) << "Converter::ConvertArgMinMax()";
255  VLOG(DRIVER) << "argMinMaxFunction = " << GetArgMinMaxFunctionAsCString(argMinMaxFunction);
256 
257  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
258 
259  if (!input0.IsValid())
260  {
261  return Fail("%s: Operation has invalid inputs", __func__);
262  }
263 
264  int32_t axis;
265  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
266  {
267  return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
268  }
269 
270  const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
271  int rank = static_cast<int>(inputInfo.GetNumDimensions());
272 
273  if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
274  {
275  // Square bracket denotes inclusive n while parenthesis denotes exclusive n
276  // E.g. Rank 4 tensor can have axis in range [-4, 3)
277  // -1 == 3, -2 == 2, -3 == 1, -4 == 0
278  return Fail("%s: Axis must be in range [-n, n)", __func__);
279  }
280 
281  const Operand* output = GetOutputOperand(operation, 0, model);
282  if (!output)
283  {
284  return Fail("%s: Could not read output 0", __func__);
285  }
286 
287  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
288 
289  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
290 
291  armnn::ArgMinMaxDescriptor descriptor;
292  descriptor.m_Function = argMinMaxFunction;
293  descriptor.m_Axis = axis;
294 
295  bool isSupported = false;
296  armnn::BackendId setBackend;
297  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
298  {
300  IsArgMinMaxSupported,
301  data.m_Backends,
302  isSupported,
303  setBackend,
304  inputInfo0,
305  outputInfo,
306  descriptor);
307  };
308 
309  if(IsDynamicTensor(outputInfo))
310  {
311  isSupported = AreDynamicTensorsSupported();
312  }
313  else
314  {
315  validateFunc(outputInfo, isSupported);
316  }
317 
318  if (!isSupported)
319  {
320  return false;
321  }
322 
323  armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
324  layer->SetBackendId(setBackend);
325  assert(layer != nullptr);
326 
327  input0.Connect(layer->GetInputSlot(0));
328 
329  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
330 }
331 
332 bool Converter::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
333 {
334  VLOG(DRIVER) << "Converter::ConvertAveragePool2d()";
335  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Average, model, data);
336 }
337 
338 bool Converter::ConvertBatchMatMul(const Operation& operation, const Model& model, ConversionData& data)
339 {
340  VLOG(DRIVER) << "Converter::ConvertBatchMatMul()";
341  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
342  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
343 
344  if (!input0.IsValid() || !input1.IsValid())
345  {
346  return Fail("%s: Operation has invalid inputs", __func__);
347  }
348 
349  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
350  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
351 
352  unsigned int rankInput0 = inputInfo0.GetNumDimensions();
353  if (rankInput0 > 4 || rankInput0 < 2)
354  {
355  Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
356  }
357 
358  unsigned int rankInput1 = inputInfo1.GetNumDimensions();
359  if (rankInput1 > 4 || rankInput1 < 2)
360  {
361  Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
362  }
363 
364  // Determine data type of input tensor 0
365  OperandType input0Type;
366  if (!GetOperandType(operation, 0, model, input0Type))
367  {
368  return Fail("%s: Operation has invalid inputs", __func__);
369  }
370 
371  // Determine data type of input tensor 0
372  OperandType input1Type;
373  if (!GetOperandType(operation, 0, model, input1Type))
374  {
375  return Fail("%s: Operation has invalid inputs", __func__);
376  }
377 
378  if (input0Type != input1Type)
379  {
380  return Fail("%s: Operation has invalid inputs (Inputs must have same OperandCode)", __func__);
381  }
382 
383  const Operand* output = GetOutputOperand(operation, 0, model);
384  if (!output)
385  {
386  return Fail("%s: Could not read output 0", __func__);
387  }
388 
389  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
390 
391  armnn::BatchMatMulDescriptor batchMatMulDesc;
392 
393  // Inputs 2 and 3 are adjoint in Android NeuralNetworks, but they perform transpose.
394  // This is why we are linking them with transpose parameters in the descriptor
395  batchMatMulDesc.m_TransposeX = GetOptionalBool(operation, 2, model, data);
396  batchMatMulDesc.m_TransposeY = GetOptionalBool(operation, 3, model, data);
397 
398  bool isSupported = false;
399  armnn::BackendId setBackend;
400  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
401  {
403  IsBatchMatMulSupported,
404  data.m_Backends,
405  isSupported,
406  setBackend,
407  inputInfo0,
408  inputInfo1,
409  outputInfo,
410  batchMatMulDesc);
411  };
412 
413  if(!IsDynamicTensor(outputInfo))
414  {
415  validateFunc(outputInfo, isSupported);
416  }
417  else
418  {
419  isSupported = AreDynamicTensorsSupported();
420  }
421 
422 
423  if (!isSupported)
424  {
425  return false;
426  }
427 
428  armnn::IConnectableLayer* const layer = data.m_Network->AddBatchMatMulLayer(batchMatMulDesc);
429  layer->SetBackendId(setBackend);
430  assert(layer != nullptr);
431  input0.Connect(layer->GetInputSlot(0));
432  input1.Connect(layer->GetInputSlot(1));
433 
434  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
435 }
436 
437 bool Converter::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
438 {
439  VLOG(DRIVER) << "Converter::ConvertBatchToSpaceNd()";
440  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
441  if (!input.IsValid())
442  {
443  return Fail("%s: Operation has invalid inputs", __func__);
444  }
445 
446  const Operand* output = GetOutputOperand(operation, 0, model);
447  if (!output)
448  {
449  return Fail("%s: Could not read output 0", __func__);
450  }
451 
452  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
453 
454  const Operand* blockOperand = GetInputOperand(operation, 1, model);
455  if (!blockOperand)
456  {
457  return Fail("%s: Could not read input 1", __func__);
458  }
459 
460  // Convert the block operand to int32
461  std::vector<int32_t> block;
462  if (!GetTensorInt32Values(*blockOperand, block, model, data))
463  {
464  return Fail("%s: Input 1 has invalid values", __func__);
465  }
466 
467  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
468 
469  unsigned int rank = inputInfo.GetNumDimensions();
470  if (rank != 4)
471  {
472  Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
473  }
474 
475  if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
476  {
477  return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
478  " greater than or equal to 1", __func__);
479  }
480 
481  armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
482  batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
483  batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
484 
485  if (Is12OrLaterOperand(*output))
486  {
487  batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
488  }
489  // Setting crops to 0,0 0,0 as it is not supported in Android NN API
490  batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
491 
492  bool isSupported = false;
493  armnn::BackendId setBackend;
494  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
495  {
498  data.m_Backends,
499  isSupported,
500  setBackend,
501  inputInfo,
502  outputInfo,
503  batchToSpaceNdDesc);
504  };
505 
506  if(!IsDynamicTensor(outputInfo))
507  {
508  validateFunc(outputInfo, isSupported);
509  }
510  else
511  {
512  isSupported = AreDynamicTensorsSupported();
513  }
514 
515 
516  if (!isSupported)
517  {
518  return false;
519  }
520 
521  armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
522  layer->SetBackendId(setBackend);
523  assert(layer != nullptr);
524  input.Connect(layer->GetInputSlot(0));
525 
526  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
527 }
528 
529 bool Converter::ConvertCast(const Operation& operation, const Model& model, ConversionData& data)
530 {
531  VLOG(DRIVER) << "Converter::ConvertCast()";
532 
533  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
534 
535  if (!input.IsValid())
536  {
537  return Fail("%s: Operation has invalid inputs", __func__);
538  }
539 
540  const Operand* output = GetOutputOperand(operation, 0, model);
541  if (!output)
542  {
543  return Fail("%s: Could not read output 0", __func__);
544  }
545 
546  const TensorInfo& inputInfo = input.GetTensorInfo();
547  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
548 
549  bool isSupported = false;
550  armnn::BackendId setBackend;
551  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
552  {
554  IsCastSupported,
555  data.m_Backends,
556  isSupported,
557  setBackend,
558  inputInfo,
559  outputInfo);
560  };
561 
562  if(!IsDynamicTensor(outputInfo))
563  {
564  validateFunc(outputInfo, isSupported);
565  }
566  else
567  {
568  isSupported = AreDynamicTensorsSupported();
569  }
570 
571  if (!isSupported)
572  {
573  return false;
574  }
575 
576  IConnectableLayer* layer = data.m_Network->AddCastLayer();
577  layer->SetBackendId(setBackend);
578  assert(layer != nullptr);
579  input.Connect(layer->GetInputSlot(0));
580 
581  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
582 }
583 
584 bool Converter::ConvertComparison(const Operation& operation,
585  const Model& model,
586  ConversionData& data,
587  ComparisonOperation comparisonOperation)
588 {
589  VLOG(DRIVER) << "Converter::ConvertComparison()";
590  VLOG(DRIVER) << "comparisonOperation = " << GetComparisonOperationAsCString(comparisonOperation);
591 
592  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
593  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
594 
595  if (!(input0.IsValid() && input1.IsValid()))
596  {
597  return Fail("%s: Operation has invalid inputs", __func__);
598  }
599 
600  const Operand* output = GetOutputOperand(operation, 0, model);
601  if (!output)
602  {
603  return Fail("%s: Could not read output 0", __func__);
604  }
605 
606  const TensorInfo& inputInfo0 = input0.GetTensorInfo();
607  const TensorInfo& inputInfo1 = input1.GetTensorInfo();
608  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
609 
610  ComparisonDescriptor descriptor(comparisonOperation);
611 
612  bool isSupported = false;
613  armnn::BackendId setBackend;
614  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
615  {
617  IsComparisonSupported,
618  data.m_Backends,
619  isSupported,
620  setBackend,
621  inputInfo0,
622  inputInfo1,
623  outputInfo,
624  descriptor);
625  };
626 
627  if(!IsDynamicTensor(outputInfo))
628  {
629  validateFunc(outputInfo, isSupported);
630  }
631  else
632  {
633  isSupported = AreDynamicTensorsSupported();
634  }
635 
636  if (!isSupported)
637  {
638  return false;
639  }
640 
641  IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
642  layer->SetBackendId(setBackend);
643  assert(layer != nullptr);
644 
645  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
646  if (!isReshapeSupported)
647  {
648  return false;
649  }
650 
651  if(IsDynamicTensor(outputInfo))
652  {
653  input0.Connect(layer->GetInputSlot(0));
654  input1.Connect(layer->GetInputSlot(1));
655  }
656 
657  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
658 }
659 
660 
661 bool Converter::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
662 {
663  VLOG(DRIVER) << "Converter::ConvertConcatenation()";
664 
665  // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
666  if (operation.inputs.size() <= 1)
667  {
668  return Fail("%s: Operation has insufficient arguments", __func__);
669  }
670 
671  // Get inputs and outputs
672  const std::size_t numInputTensors = operation.inputs.size() - 1;
673 
674  int32_t concatDim;
675  if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
676  {
677  return Fail("%s: Operation has invalid inputs", __func__);
678  }
679 
680  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
681  if (!outputOperand)
682  {
683  return Fail("%s: Operation has no outputs", __func__);
684  }
685 
686  armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
687  armnn::TensorShape outputShape = outputInfo.GetShape();
688  const bool isDynamicTensor = IsDynamicTensor(outputInfo);
689  //
690  // handle negative concat dims along the lines of tensorflow as described here:
691  // https://www.tensorflow.org/api_docs/python/tf/concat
692  // "negative axis refers to axis + rank(values)-th dimension"
693  //
694  if (concatDim < 0)
695  {
696  concatDim += outputShape.GetNumDimensions();
697  }
698 
699  if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
700  {
701  return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
702  }
703 
704  std::vector<LayerInputHandle> inputHandles;
705  std::vector<armnn::TensorShape> inputShapes;
706 
707  inputHandles.reserve(numInputTensors);
708  inputShapes.reserve(numInputTensors);
709 
710  bool inputsHaveBeenReshaped = false;
711  unsigned int tensorDimensionsAdded = 0;
712  for (uint32_t i = 0; i < numInputTensors; ++i)
713  {
714  const Operand* operand = GetInputOperand(operation, i, model);
715  if (!operand)
716  {
717  return Fail("%s: Operation has invalid inputs", __func__);
718  }
719 
720  LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
721  if (!operandInputHandle.IsValid())
722  {
723  return Fail("%s: Operation has invalid inputs", __func__);
724  }
725 
726  armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
727  if (operandShape.GetNumDimensions() == 0)
728  {
729  return Fail("%s: Operands with rank 0 are not supported", __func__);
730  }
731 
732  if (RequiresReshape(operandShape))
733  {
734  inputsHaveBeenReshaped = true;
735 
736  armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
737 
738  // Expand the tensor to three dimensions
739  if (operandShape.GetNumDimensions() == 2)
740  {
741  reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
742  tensorDimensionsAdded = 1;
743  }
744  else
745  {
746  reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
747  tensorDimensionsAdded = 2;
748  }
749 
750  armnn::ReshapeDescriptor reshapeDescriptor;
751  reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
752 
753  bool isSupported = false;
754  armnn::BackendId setBackendReshape;
757  data.m_Backends,
758  isSupported,
759  setBackendReshape,
760  operandInputHandle.GetTensorInfo(),
761  reshapeInfo,
762  reshapeDescriptor);
763 
764  if (!isSupported)
765  {
766  return false;
767  }
768  armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
769  newReshape.SetBackendId(setBackendReshape);
770 
771  // Point to the reshape operation rather then the input operation
772  operandShape = reshapeInfo.GetShape();
773  operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
774  }
775 
776  inputShapes.emplace_back(operandShape);
777  inputHandles.emplace_back(operandInputHandle);
778 
779  if (!inputHandles.back().IsValid())
780  {
781  return Fail("%s: Operation has invalid inputs", __func__);
782  }
783  }
784 
785  ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
786 
787  if (inputsHaveBeenReshaped)
788  {
789  // Adjust the concatenation dimension by the amount of dimensions added (if any)
790  concatDim += tensorDimensionsAdded;
791 
792  // Add extra dimensions to the output shape to reflect the addition of the reshape layers
793  if (tensorDimensionsAdded == 1)
794  {
795  if (IsDynamicTensor(outputInfo))
796  {
797  outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
798  }
799  else
800  {
801  outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
802  }
803  }
804  else if (tensorDimensionsAdded == 2)
805  {
806  if (IsDynamicTensor(outputInfo))
807  {
808  outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
809  }
810  else
811  {
812  outputShape = armnn::TensorShape({1, 1, outputShape[0]});
813  }
814  }
815  }
816 
817  // Check if permutations is required and get the pair of permutations required for the concatenation.
818  // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
819  std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
820  std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
821  bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
822  concatDim,
823  permutationPair);
824 
825  // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
826  if (!isDynamicTensor)
827  {
828  if (needPermute)
829  {
830  outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
831  }
832 
833  outputInfo.SetShape(outputShape);
834  }
835  // this is no-op for identity swizzles, otherwise it replaces both
836  // the handles and shapes with the swizzled layer output handles and shapes
837  if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
838  {
839  return false;
840  }
841 
842  // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
843  armnn::OriginsDescriptor concatDescriptor;
844 
845  try
846  {
847  // The concat descriptor is always created across the only supported concat dimension
848  // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
849  concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
850  inputShapes.end(),
851  concatDim);
852  } catch (std::exception& error)
853  {
854  return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
855  }
856 
857  // Validate the output shape is correct given the input shapes based on the
858  // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
859  if (!isDynamicTensor)
860  {
861  if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
862  {
863  return Fail("%s: Error validating the output shape for concat", __func__);
864  }
865  }
866 
867  std::vector<const armnn::TensorInfo*> inputTensorInfos;
868  std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
869  [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
870 
871  bool isSupported = false;
872  armnn::BackendId setBackendConcat;
873  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
876  data.m_Backends,
877  isSupported,
878  setBackendConcat,
879  inputTensorInfos,
880  outputInfo,
881  concatDescriptor);
882  };
883 
884  if (!isDynamicTensor)
885  {
886  validateFunc(outputInfo, isSupported);
887  }
888  else
889  {
890  isSupported = AreDynamicTensorsSupported();
891  }
892 
893  if (!isSupported)
894  {
895  return false;
896  }
897 
898  armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
899  layer->SetBackendId(setBackendConcat);
900  assert(layer != nullptr);
901  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
902  // Connect inputs to the layer
903  const int numInputSlots = layer->GetNumInputSlots();
904  assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
905  for (int i = 0; i < numInputSlots; ++i)
906  {
907  // connect the input directly to the merge (concat) layer
908  inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
909  }
910 
911  // Transpose the output shape
912  auto transposeOutputShape = [&](){
913  armnn::TransposeDescriptor transposeDesc;
914  transposeDesc.m_DimMappings = permutationPair.second;
915  armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
916  armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
917  permutationPair.second);
918  isSupported = false;
919  armnn::BackendId setBackendTranspose;
921  IsTransposeSupported,
922  data.m_Backends,
923  isSupported,
924  setBackendTranspose,
925  inputTransposeInfo,
926  outputTransposeInfo,
927  transposeDesc);
928  if (!isSupported)
929  {
930  return false;
931  }
932  // Add permutation layer and connect the output to it, the permutation becomes the output layer
933  armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
934  permutationPair.second);
935  deswizzleLayer.SetBackendId(setBackendTranspose);
936  layer = &deswizzleLayer;
937 
938  return true;
939  };
940 
941  if (needPermute && !isDynamicTensor)
942  {
943  transposeOutputShape();
944  }
945 
946  if (inputsHaveBeenReshaped)
947  {
948  if (isDynamicTensor)
949  {
950  // Infer the output shapes of concat if outputs are type 1 dynamic
952  if (!ValidateConcatOutputShape(inputShapes,
953  layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
954  concatDim))
955  {
956  return Fail("%s: Error validating the output shape for concat", __func__);
957  }
958  transposeOutputShape();
959  }
960 
961  armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
962  // Undo the reshape knowing the amount of dimensions added
963  if (tensorDimensionsAdded == 1)
964  {
965  afterConcatInfo.SetShape(
966  armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
967  }
968  else if (tensorDimensionsAdded == 2)
969  {
970  afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
971  }
972 
973  armnn::ReshapeDescriptor reshapeDescriptor;
974  reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
975  armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
976 
977  isSupported = false;
978  armnn::BackendId setBackendReshape2;
979  auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
982  data.m_Backends,
983  isSupported,
984  setBackendReshape2,
985  concatInfo,
986  afterConcatInfo,
987  reshapeDescriptor);
988  };
989 
990  if (!IsDynamicTensor(afterConcatInfo))
991  {
992  validateReshapeFunc(afterConcatInfo, isSupported);
993  }
994  else
995  {
996  isSupported = AreDynamicTensorsSupported();
997  }
998 
999  if (!isSupported)
1000  {
1001  return false;
1002  }
1003  layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
1004  layer->SetBackendId(setBackendReshape2);
1005  return SetupAndTrackLayerOutputSlot(operation,
1006  0,
1007  *layer,
1008  model,
1009  data,
1010  nullptr,
1011  validateReshapeFunc);
1012  }
1013 
1014  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1015 }
1016 
1017 bool Converter::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
1018 {
1019  VLOG(DRIVER) << "Converter::ConvertConv2d()";
1020 
1021  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1022  if (!input.IsValid())
1023  {
1024  return Fail("%s: Operation has invalid inputs", __func__);
1025  }
1026 
1027  const Operand* output = GetOutputOperand(operation, 0, model);
1028  if (!output)
1029  {
1030  return Fail("%s: Could not read output 0", __func__);
1031  }
1032 
1033  const TensorInfo& inputInfo = input.GetTensorInfo();
1034  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1035 
1037  desc.m_DataLayout = DataLayout::NHWC;
1038 
1039  // Determine whether padding is implicit or explicit
1040  bool implicitPadding = operation.inputs.size() == 7
1041  || (operation.inputs.size() >= 8
1042  && GetInputOperand(operation, 7, model)->type == OperandType::BOOL);
1043 
1044  if (implicitPadding)
1045  {
1046  desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
1047  }
1048  else if (operation.inputs.size() >= 10)
1049  {
1050  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
1051  }
1052 
1053  const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
1054 
1055  // ArmNN does not currently support non-fixed weights or bias
1056  // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
1057  // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
1058  // the DataLayout is NCHW
1059 
1060  if (!IsWeightsValid(operation, 1, model) && desc.m_DataLayout == DataLayout::NCHW)
1061  {
1062  return Fail("%s: Operation has unsupported weights OperandLifeTime", __func__);
1063  }
1064 
1065  LayerInputHandle weightsInput = (desc.m_DataLayout == DataLayout::NCHW)
1066  ? ConvertToLayerInputHandle(operation, 1, model, data, OHWIToOIHW, &input)
1067  : ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
1068 
1069  if (!weightsInput.IsValid())
1070  {
1071  return Fail("%s: Operation has invalid inputs", __func__);
1072  }
1073 
1074  LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
1075  if (!biasInput.IsValid())
1076  {
1077  return Fail("%s: Operation has invalid inputs", __func__);
1078  }
1079 
1080  biasInput.SanitizeQuantizationScale(weightsInput, input);
1081  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1082  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1083 
1084  ActivationFn activation;
1085  if (implicitPadding)
1086  {
1087  ::android::nn::PaddingScheme paddingScheme;
1088  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1089  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1090  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1091  || !GetInputActivationFunction(operation, 6, activation, model, data)
1092  || !GetOptionalConvolutionDilationParams(operation, 8, desc, model, data))
1093  {
1094  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1095  }
1096 
1097  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1098  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1099  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1100  const uint32_t kernelX = weightsInfo.GetShape()[widthIndex];
1101  const uint32_t kernelY = weightsInfo.GetShape()[heightIndex];
1102  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1103  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1104 
1105  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1106  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1107 
1108  }
1109  else if (operation.inputs.size() >= 10)
1110  {
1111  // explicit padding
1112  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1113  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1114  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1115  || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1116  || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1117  || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1118  || !GetInputActivationFunction(operation, 9, activation, model, data)
1119  || !GetOptionalConvolutionDilationParams(operation, 11, desc, model, data))
1120  {
1121  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1122  }
1123  }
1124  else
1125  {
1126  return Fail("%s: Unsupported number of operation inputs", __func__);
1127  }
1128 
1129  desc.m_BiasEnabled = true;
1130  Optional<TensorInfo> biases(biasInfo);
1131 
1132  bool requiresValidation = true;
1133  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1134  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1135  if (IsConnectedToDequantize(weightsInput.GetOutputSlot())
1136  || IsConnectedToDequantize(biasInput.GetOutputSlot()))
1137  {
1138  // Do not require validation for now. There will be an optimization step
1139  // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1140  // then at the end of the optimization there will be layer supported validation.
1141  requiresValidation = false;
1142  VLOG(DRIVER) << "Converter::ConvertConv2d(): Weights and Biases are as INPUTS.";
1143  }
1144 
1145  armnn::BackendId setBackend;
1146  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
1147  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1149  data.m_Backends,
1150  isSupported,
1151  setBackend,
1152  inputInfo,
1153  outputInfo,
1154  desc,
1155  weightsInfo,
1156  biases);
1157  };
1158 
1159  if (requiresValidation)
1160  {
1161  VLOG(DRIVER) << "Converter::ConvertConv2d(): Requires Validation!";
1162  bool isSupported = false;
1163  if (!IsDynamicTensor(outputInfo))
1164  {
1165  validateFunc(outputInfo, isSupported);
1166  }
1167  else
1168  {
1169  isSupported = AreDynamicTensorsSupported();
1170  }
1171 
1172  if (!isSupported)
1173  {
1174  return false;
1175  }
1176  }
1177 
1178  armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
1179  startLayer->SetBackendId(setBackend);
1180 
1181  if (!startLayer)
1182  {
1183  return Fail("%s: AddConvolution2dLayer failed", __func__);
1184  }
1185 
1186  input.Connect(startLayer->GetInputSlot(0));
1187  weightsInput.Connect(startLayer->GetInputSlot(1));
1188  biasInput.Connect(startLayer->GetInputSlot(2));
1189 
1190  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1191 }
1192 
1193 bool Converter::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
1194 {
1195  VLOG(DRIVER) << "Converter::ConvertDepthToSpace()";
1196 
1197  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1198  if (!input.IsValid() )
1199  {
1200  return Fail("%s: Operation has invalid inputs", __func__);
1201  }
1202 
1203  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1204  unsigned int rank = inputInfo.GetNumDimensions();
1205  if (rank != 4)
1206  {
1207  return Fail("%s: Only inputs with rank 4 are supported", __func__);
1208  }
1209 
1210  const Operand* output = GetOutputOperand(operation, 0, model);
1211  if (!output)
1212  {
1213  return Fail("%s: Could not read output 0", __func__);
1214  }
1215 
1216  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1217 
1218  armnn::DepthToSpaceDescriptor descriptor;
1219 
1220  GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_BlockSize, model, data);
1221  if (descriptor.m_BlockSize <= 1)
1222  {
1223  return Fail("%s: Block size must be at least 1 in all dimensions");
1224  }
1225 
1227  if (Is12OrLaterOperand(*output))
1228  {
1229  descriptor.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
1230  }
1231 
1232  bool isSupported = false;
1233  armnn::BackendId setBackend;
1234  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1235  {
1236  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1237  IsDepthToSpaceSupported,
1238  data.m_Backends,
1239  isSupported,
1240  setBackend,
1241  inputInfo,
1242  outputInfo,
1243  descriptor);
1244  };
1245 
1246  if(!IsDynamicTensor(outputInfo))
1247  {
1248  validateFunc(outputInfo, isSupported);
1249  }
1250  else
1251  {
1252  isSupported = AreDynamicTensorsSupported();
1253  }
1254 
1255  if (!isSupported)
1256  {
1257  return false;
1258  }
1259 
1260  armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
1261  layer->SetBackendId(setBackend);
1262  assert(layer != nullptr);
1263  input.Connect(layer->GetInputSlot(0));
1264 
1265  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1266 }
1267 
1268 bool Converter::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
1269 {
1270  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d()";
1271 
1272  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1273 
1274  if (!input.IsValid())
1275  {
1276  return Fail("%s: Operation has invalid inputs", __func__);
1277  }
1278 
1279  const Operand* output = GetOutputOperand(operation, 0, model);
1280 
1281  if (!output)
1282  {
1283  return Fail("%s: Could not read output 0", __func__);
1284  }
1285 
1286  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1287  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1288 
1289  // ArmNN does not currently support non-fixed weights or bias
1290  // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1291  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1292 
1293  if (!weightsOperand)
1294  {
1295  return Fail("%s: Could not read weights", __func__);
1296  }
1297  // Basic sanity check on the weights shape.
1298  // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
1299  // [1, filter_height, filter_width, depth_out]
1300  if (weightsOperand->dimensions[0] != 1)
1301  {
1302  return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
1303  }
1304 
1307 
1308  // Determine whether padding is implicit or explicit
1309  bool implicitPadding = operation.inputs.size() == 8
1310  || (operation.inputs.size() >= 9
1311  && GetInputOperand(operation, 8, model)->type == OperandType::BOOL);
1312 
1313  // Look ahead to find the optional DataLayout, if present
1314  const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
1315  desc.m_DataLayout = OptionalDataLayout(operation, dataLayoutFlagIndex, model, data);
1316 
1317  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1318  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1319  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1320 
1321  LayerInputHandle weightsInput = ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
1322  if (!weightsInput.IsValid())
1323  {
1324  return Fail("%s: Operation has invalid inputs", __func__);
1325  }
1326 
1327  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1328  if (!biasOperand)
1329  {
1330  return Fail("%s: Could not read bias", __func__);
1331  }
1332 
1333  LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
1334  if (!biasInput.IsValid())
1335  {
1336  return Fail("%s: Operation has invalid inputs", __func__);
1337  }
1338 
1339  biasInput.SanitizeQuantizationScale(weightsInput, input);
1340  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1341  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1342 
1343  ActivationFn activation;
1344  if (implicitPadding)
1345  {
1346  ::android::nn::PaddingScheme paddingScheme;
1347  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1348  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1349  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1350  || !GetInputActivationFunction(operation, 7, activation, model, data)
1351  || !GetOptionalConvolutionDilationParams(operation, 9, desc, model, data))
1352  {
1353  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1354  }
1355 
1356  const uint32_t kernelX = weightsInfo.GetShape()[2];
1357  const uint32_t kernelY = weightsInfo.GetShape()[1];
1358  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1359  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1360 
1361  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1362  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1363  }
1364  else if (operation.inputs.size() >= 11)
1365  {
1366  // explicit padding
1367  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1368  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1369  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1370  || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1371  || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1372  || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1373  || !GetInputActivationFunction(operation, 10, activation, model, data)
1374  || !GetOptionalConvolutionDilationParams(operation, 12, desc, model, data))
1375  {
1376  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1377  }
1378  }
1379  else
1380  {
1381  return Fail("%s: Unsupported number of operation inputs", __func__);
1382  }
1383 
1384  desc.m_BiasEnabled = true;
1385  Optional<TensorInfo> biases(biasInfo);
1386 
1387  bool requiresValidation = true;
1388  if (IsConnectedToDequantize(weightsInput.GetOutputSlot()) || IsConnectedToDequantize(biasInput.GetOutputSlot()))
1389  {
1390  // Do not require validation for now. There will be an optimization step
1391  // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1392  // then at the end of the optimization there will be layer supported validation.
1393  requiresValidation = false;
1394  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Weights and Biases are as INPUTS.";
1395  }
1396 
1397  armnn::BackendId setBackend;
1398  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
1399  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1401  data.m_Backends,
1402  isSupported,
1403  setBackend,
1404  inputInfo,
1405  outputInfo,
1406  desc,
1407  weightsInfo,
1408  biases);
1409  };
1410 
1411  if (requiresValidation)
1412  {
1413  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Requires Validation!";
1414  bool isSupported = false;
1415  if (!IsDynamicTensor(outputInfo))
1416  {
1417  validateFunc(outputInfo, isSupported);
1418  }
1419  else
1420  {
1421  isSupported = AreDynamicTensorsSupported();
1422  }
1423 
1424  if (!isSupported)
1425  {
1426  return false;
1427  }
1428  }
1429 
1430  armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
1431  startLayer->SetBackendId(setBackend);
1432 
1433  if (!startLayer)
1434  {
1435  return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1436  }
1437 
1438  input.Connect(startLayer->GetInputSlot(0));
1439 
1440  // Connect weights and bias inputs
1441  weightsInput.Connect(startLayer->GetInputSlot(1));
1442  biasInput.Connect(startLayer->GetInputSlot(2));
1443 
1444  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1445 }
1446 
1447 bool Converter::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
1448 {
1449  VLOG(DRIVER) << "Converter::ConvertDequantize()";
1450 
1451  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1452  if (!input.IsValid())
1453  {
1454  return Fail("%s: Operation has invalid input", __func__);
1455  }
1456 
1457  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1458  const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
1459  if (quantizationDim.has_value() && quantizationDim.value() != 0)
1460  {
1461  return Fail("%s: Operation has quantization dimension different than 0", __func__);
1462  }
1463 
1464  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1465  if (!outputOperand)
1466  {
1467  return Fail("%s: Operation has invalid outputs", __func__);
1468  }
1469 
1470  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1471 
1472  bool isSupported = false;
1473  armnn::BackendId setBackend;
1474  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1475  {
1476  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1478  data.m_Backends,
1479  isSupported,
1480  setBackend,
1481  inputInfo,
1482  outputInfo);
1483  };
1484 
1485  if(IsDynamicTensor(outputInfo))
1486  {
1487  isSupported = AreDynamicTensorsSupported();
1488  }
1489  else
1490  {
1491  validateFunc(outputInfo, isSupported);
1492  }
1493 
1494  if (!isSupported)
1495  {
1496  return false;
1497  }
1498 
1499  armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
1500  layer->SetBackendId(setBackend);
1501  assert(layer != nullptr);
1502  input.Connect(layer->GetInputSlot(0));
1503 
1504  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1505 }
1506 
1507 bool Converter::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
1508 {
1509  VLOG(DRIVER) << "Converter::ConvertDiv()";
1510 
1511  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1512  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1513 
1514  if (!input0.IsValid() || !input1.IsValid())
1515  {
1516  return Fail("%s: Operation has invalid inputs", __func__);
1517  }
1518 
1519  // The FuseActivation parameter is always the input index 2
1520  // and it should be optional
1521  ActivationFn activationFunction;
1522  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1523  {
1524  return Fail("%s: Operation has invalid inputs", __func__);
1525  }
1526 
1527  const Operand* output = GetOutputOperand(operation, 0, model);
1528  if (!output)
1529  {
1530  return Fail("%s: Could not read output 0", __func__);
1531  }
1532 
1533  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1534 
1535  bool isSupported = false;
1536  armnn::BackendId setBackend;
1537  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1538  {
1539  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1541  data.m_Backends,
1542  isSupported,
1543  setBackend,
1544  input0.GetTensorInfo(),
1545  input1.GetTensorInfo(),
1546  outputInfo);
1547  };
1548 
1549  if(!IsDynamicTensor(outputInfo))
1550  {
1551  validateFunc(outputInfo, isSupported);
1552  }
1553  else
1554  {
1555  isSupported = AreDynamicTensorsSupported();
1556  }
1557 
1558  if (!isSupported)
1559  {
1560  return false;
1561  }
1562 
1563  armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
1564  startLayer->SetBackendId(setBackend);
1565 
1566  bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1567  if (!isReshapeSupported)
1568  {
1569  return false;
1570  }
1571 
1572  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
1573  data, nullptr, validateFunc, activationFunction);
1574 }
1575 
1576 bool Converter::ConvertElementwiseUnary(const Operation& operation,
1577  const Model& model,
1578  ConversionData& data,
1579  UnaryOperation unaryOperation)
1580 {
1581  VLOG(DRIVER) << "Converter::ConvertElementwiseUnary()";
1582  VLOG(DRIVER) << "unaryOperation = " << GetUnaryOperationAsCString(unaryOperation);
1583 
1584  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1585 
1586  if (!input.IsValid())
1587  {
1588  return Fail("%s: Operation has invalid input", __func__);
1589  }
1590 
1591  const Operand* output = GetOutputOperand(operation, 0, model);
1592  if (!output)
1593  {
1594  return Fail("%s: Could not read output 0", __func__);
1595  }
1596 
1597  const TensorInfo& inputInfo = input.GetTensorInfo();
1598  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1599 
1600  ElementwiseUnaryDescriptor descriptor(unaryOperation);
1601 
1602  bool isSupported = false;
1603  armnn::BackendId setBackend;
1604  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1605  {
1606  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1607  IsElementwiseUnarySupported,
1608  data.m_Backends,
1609  isSupported,
1610  setBackend,
1611  inputInfo,
1612  outputInfo,
1613  descriptor);
1614  };
1615 
1616  if(!IsDynamicTensor(outputInfo))
1617  {
1618  validateFunc(outputInfo, isSupported);
1619  }
1620  else
1621  {
1622  isSupported = AreDynamicTensorsSupported();
1623  }
1624 
1625  if (!isSupported)
1626  {
1627  return false;
1628  }
1629 
1630  IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
1631  layer->SetBackendId(setBackend);
1632  assert(layer != nullptr);
1633  input.Connect(layer->GetInputSlot(0));
1634 
1635  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1636 }
1637 
1638 bool Converter::ConvertElu(const Operation& operation, const Model& model, ConversionData& data)
1639 {
1640  VLOG(DRIVER) << "Converter::ConvertElu()";
1641 
1642  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1643  if (!input0.IsValid())
1644  {
1645  return Fail("%s: Operation has invalid inputs", __func__);
1646  }
1647 
1648  // Determine data type of input tensor
1649  OperandType inputType;
1650  if (!GetOperandType(operation, 0, model, inputType))
1651  {
1652  return Fail("%s: Operation has invalid inputs", __func__);
1653  }
1654 
1655  ActivationDescriptor desc;
1656  desc.m_Function = ActivationFunction::Elu;
1657 
1658  // Read alpha
1659  if (inputType == OperandType::TENSOR_FLOAT16)
1660  {
1661  Half alpha;
1662 
1663  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, alpha, model, data))
1664  {
1665  return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
1666  }
1667 
1668  desc.m_A = static_cast<float>(alpha);
1669  }
1670  else if (inputType == OperandType::TENSOR_FLOAT32)
1671  {
1672  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_A, model, data))
1673  {
1674  return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
1675  }
1676  }
1677  else
1678  {
1679  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
1680  }
1681 
1682  return ::ConvertToActivation(operation, __func__, desc, model, data);
1683 }
1684 
1685 bool Converter::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
1686 {
1687  VLOG(DRIVER) << "Converter::ConvertExpandDims()";
1688 
1689  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1690 
1691  if (!input.IsValid())
1692  {
1693  return Fail("%s: Operation has invalid input", __func__);
1694  }
1695 
1696  const Operand* output = GetOutputOperand(operation, 0, model);
1697  if (!output)
1698  {
1699  return Fail("%s: Operation has invalid output", __func__);
1700  }
1701 
1702  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1703 
1704  int32_t axis;
1705  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
1706  {
1707  return Fail("%s: failed to get axis input value", __func__);
1708  }
1709 
1710  TensorShape targetShape;
1711 
1712  try
1713  {
1714  targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
1715  }
1716  catch (const std::exception& e)
1717  {
1718  return Fail("%s: %s", __func__, e.what());
1719  }
1720 
1721  ReshapeDescriptor reshapeDescriptor;
1722  reshapeDescriptor.m_TargetShape = targetShape;
1723 
1724  bool isSupported = false;
1725  armnn::BackendId setBackend;
1726  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1727  {
1728  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1730  data.m_Backends,
1731  isSupported,
1732  setBackend,
1733  input.GetTensorInfo(),
1734  outputInfo,
1735  reshapeDescriptor);
1736  };
1737 
1738  if(!IsDynamicTensor(outputInfo))
1739  {
1740  if (targetShape != outputInfo.GetShape())
1741  {
1742  return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
1743  }
1744  validateFunc(outputInfo, isSupported);
1745  }
1746  else
1747  {
1748  isSupported = AreDynamicTensorsSupported();
1749  }
1750 
1751  if (!isSupported)
1752  {
1753  return false;
1754  }
1755 
1756  IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1757  layer->SetBackendId(setBackend);
1758  assert(layer != nullptr);
1759  input.Connect(layer->GetInputSlot(0));
1760 
1761  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1762 }
1763 
1764 bool Converter::ConvertFill(const Operation& operation, const Model& model, ConversionData& data)
1765 {
1766  VLOG(DRIVER) << "Converter::ConvertFill()";
1767  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1768  if (!input.IsValid())
1769  {
1770  return Fail("%s: Operation has invalid inputs", __func__);
1771  }
1772 
1773  const Operand* output = GetOutputOperand(operation, 0, model);
1774  if (!output)
1775  {
1776  return Fail("%s: Could not read output", __func__);
1777  }
1778 
1779  const TensorInfo& inputInfo = input.GetTensorInfo();
1780  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1781  if (IsDynamicTensor(outputInfo))
1782  {
1783  return Fail("%s: Dynamic output tensors are not supported", __func__);
1784  }
1785 
1786  // Determine data type of output tensor
1787  OperandType outputType = output->type;
1788  FillDescriptor descriptor;
1789  // Read the scalar fill value
1790  if (outputType == OperandType::TENSOR_FLOAT16)
1791  {
1792  Half value;
1793 
1794  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
1795  {
1796  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1797  }
1798 
1799  descriptor.m_Value = static_cast<float>(value);
1800  }
1801  else if (outputType == OperandType::TENSOR_FLOAT32)
1802  {
1803  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Value, model, data))
1804  {
1805  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1806  }
1807  }
1808  else if (outputType == OperandType::TENSOR_INT32)
1809  {
1810  int32_t value;
1811 
1812  if (!GetInputScalar(operation, 1, OperandType::INT32, value, model, data))
1813  {
1814  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1815  }
1816 
1817  descriptor.m_Value = static_cast<float>(value);
1818  }
1819  else
1820  {
1821  return Fail("%s: Unsupported input tensor type: %d", __func__, outputType);
1822  }
1823 
1824  bool isSupported = false;
1825  armnn::BackendId setBackend;
1826  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1827  IsFillSupported,
1828  data.m_Backends,
1829  isSupported,
1830  setBackend,
1831  inputInfo,
1832  outputInfo,
1833  descriptor);
1834  if (!isSupported)
1835  {
1836  return false;
1837  }
1838 
1839  IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
1840  layer->SetBackendId(setBackend);
1841  assert(layer != nullptr);
1842  input.Connect(layer->GetInputSlot(0));
1843 
1844  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1845 }
1846 
1847 bool Converter::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
1848 {
1849  VLOG(DRIVER) << "Converter::ConvertFloor()";
1850  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1851  if (!input.IsValid())
1852  {
1853  return Fail("%s: Operation has invalid inputs", __func__);
1854  }
1855 
1856  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1857  if (!outputOperand)
1858  {
1859  return Fail("%s: Operation has invalid outputs", __func__);
1860  }
1861 
1862  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1863 
1864  bool isSupported = false;
1865  armnn::BackendId setBackend;
1866  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1867  {
1868  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1870  data.m_Backends,
1871  isSupported,
1872  setBackend,
1873  input.GetTensorInfo(),
1874  outputInfo);
1875  };
1876 
1877  if(!IsDynamicTensor(outputInfo))
1878  {
1879  validateFunc(outputInfo, isSupported);
1880  }
1881  else
1882  {
1883  isSupported = AreDynamicTensorsSupported();
1884  }
1885 
1886  if (!isSupported)
1887  {
1888  return false;
1889  }
1890 
1891  armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
1892  layer->SetBackendId(setBackend);
1893  assert(layer != nullptr);
1894  input.Connect(layer->GetInputSlot(0));
1895 
1896  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1897 }
1898 
1899 bool Converter::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
1900 {
1901  VLOG(DRIVER) << "Converter::ConvertFullyConnected()";
1902  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1903  if (!input.IsValid())
1904  {
1905  return Fail("%s: Operation has invalid inputs", __func__);
1906  }
1907 
1908  const Operand* output = GetOutputOperand(operation, 0, model);
1909  if (!output)
1910  {
1911  return Fail("%s: Could not read output 0", __func__);
1912  }
1913 
1914  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1915  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1916 
1917  LayerInputHandle weightsInput = LayerInputHandle();
1918  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1919  if (!weightsOperand)
1920  {
1921  return Fail("%s: Could not read weights", __func__);
1922  }
1923 
1924  // If weights are constant a separate constant layer will be created to store data.
1925  // Otherwise handle non const weights as inputs.
1926  weightsInput = ConvertToLayerInputHandle(operation, 1, model, data);
1927  if (!weightsInput.IsValid())
1928  {
1929  return Fail("%s: Operation has invalid inputs", __func__);
1930  }
1931 
1932  LayerInputHandle biasInput = LayerInputHandle();
1933  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1934  if (!biasOperand)
1935  {
1936  return Fail("%s: Could not read bias", __func__);
1937  }
1938 
1939  // If bias are constant a separate constant layer will be created to store data.
1940  // Otherwise handle non const bias as inputs.
1941  biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D
1942  if (!biasInput.IsValid())
1943  {
1944  return Fail("%s: Operation has invalid inputs", __func__);
1945  }
1946 
1947  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1948  armnn::TensorInfo reshapedInfo = inputInfo;
1949  try
1950  {
1951  reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
1952  }
1953  catch (const std::exception& e)
1954  {
1955  return Fail("%s: %s", __func__, e.what());
1956  }
1957 
1958  // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
1959  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1960  SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
1961 
1962  ActivationFn activationFunction;
1963  if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
1964  {
1965  return Fail("%s: Operation has invalid inputs", __func__);
1966  }
1967 
1969  desc.m_TransposeWeightMatrix = true;
1970  desc.m_BiasEnabled = true;
1971  desc.m_ConstantWeights = IsOperandConstant(*weightsOperand);
1972 
1973  bool isSupported = false;
1974  armnn::BackendId setBackend;
1975  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1976  {
1977  if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
1978  weightsInfo.GetShape(),
1979  outputInfo.GetShape(),
1981  {
1982  isSupported = false;
1983  Fail("%s: Expected outputShape does not match actual outputShape", __func__);
1984  return;
1985  }
1986 
1987  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1989  data.m_Backends,
1990  isSupported,
1991  setBackend,
1992  reshapedInfo,
1993  outputInfo,
1994  weightsInfo,
1995  biasInfo,
1996  desc);
1997  };
1998 
1999  if(!IsDynamicTensor(outputInfo))
2000  {
2001  validateFunc(outputInfo, isSupported);
2002  }
2003  else
2004  {
2005  isSupported = AreDynamicTensorsSupported();
2006  }
2007 
2008  if (!isSupported)
2009  {
2010  return false;
2011  }
2012 
2013  // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
2014  armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
2015  startLayer->SetBackendId(setBackend);
2016 
2017  if (inputInfo.GetNumDimensions() > 2U)
2018  {
2019  armnn::ReshapeDescriptor reshapeDescriptor;
2020  reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2021 
2022  armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2023  assert(reshapeLayer != nullptr);
2024  input.Connect(reshapeLayer->GetInputSlot(0));
2025  reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2026  reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2027  }
2028  else
2029  {
2030  input.Connect(startLayer->GetInputSlot(0));
2031  }
2032 
2033  // Connect weights and bias inputs
2034  weightsInput.Connect(startLayer->GetInputSlot(1));
2035  biasInput.Connect(startLayer->GetInputSlot(2));
2036 
2037  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
2038  data, nullptr, validateFunc, activationFunction);
2039 }
2040 
2041 bool Converter::ConvertGather(const Operation& operation, const Model& model, ConversionData& data)
2042 {
2043  VLOG(DRIVER) << "Converter::ConvertGather()";
2044 
2045  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2046  if (!input.IsValid())
2047  {
2048  return Fail("%s: Operation has invalid input", __func__);
2049  }
2050  auto inputDimensions = input.GetTensorInfo().GetNumDimensions();
2051 
2052  LayerInputHandle indices = ConvertToLayerInputHandle(operation, 2, model, data);
2053  if (!indices.IsValid())
2054  {
2055  return Fail("%s: Operation has invalid indices", __func__);
2056  }
2057  auto indicesDimensions = indices.GetTensorInfo().GetNumDimensions();
2058 
2059  const Operand* output = GetOutputOperand(operation, 0, model);
2060  if (!output)
2061  {
2062  return Fail("%s: Operation has invalid output", __func__);
2063  }
2064  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2065  auto outputDimensions = outputInfo.GetNumDimensions();
2066  if (outputDimensions != inputDimensions + indicesDimensions - 1)
2067  {
2068  return Fail("%s: Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
2069  __func__, outputDimensions, inputDimensions, indicesDimensions);
2070  }
2071 
2072  int32_t axis;
2073  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
2074  {
2075  return Fail("%s: Operation has invalid or unsupported axis operand", __func__);
2076  }
2077  if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2078  {
2079  return Fail("%s: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", __func__, axis,
2080  inputDimensions, inputDimensions);
2081  }
2082 
2083  GatherDescriptor desc;
2084  desc.m_Axis = axis;
2085 
2086  bool isSupported = false;
2087  armnn::BackendId setBackend;
2088  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2089  {
2090  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2091  IsGatherSupported,
2092  data.m_Backends,
2093  isSupported,
2094  setBackend,
2095  input.GetTensorInfo(),
2096  indices.GetTensorInfo(),
2097  outputInfo,
2098  desc);
2099  };
2100 
2101  if(!IsDynamicTensor(outputInfo))
2102  {
2103  validateFunc(outputInfo, isSupported);
2104  }
2105  else
2106  {
2107  isSupported = AreDynamicTensorsSupported();
2108  }
2109 
2110  if (!isSupported)
2111  {
2112  return false;
2113  }
2114 
2115  IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
2116  layer->SetBackendId(setBackend);
2117  assert(layer != nullptr);
2118  input.Connect(layer->GetInputSlot(0));
2119  indices.Connect(layer->GetInputSlot(1));
2120 
2121  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2122 }
2123 
2124 bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data)
2125 {
2126  VLOG(DRIVER) << "Converter::ConvertGroupedConv2d()";
2127  //
2128  // Parse data
2129  //
2130  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2131  if (!input.IsValid())
2132  {
2133  return Fail("%s: Operation has invalid inputs", __func__);
2134  }
2135  const TensorInfo& inputInfo = input.GetTensorInfo();
2136 
2137  const Operand* output = GetOutputOperand(operation, 0, model);
2138  if (!output)
2139  {
2140  return Fail("%s: Could not read output 0", __func__);
2141  }
2142  TensorInfo outputInfo = GetTensorInfoForOperand(*output);
2143 
2144  // Look ahead to determine data layout
2145  DataLayout dataLayout = DataLayout::NHWC;
2146  if (operation.inputs.size() == 12)
2147  {
2148  dataLayout = OptionalDataLayout(operation, 11, model, data);
2149  }
2150  else
2151  {
2152  dataLayout = OptionalDataLayout(operation, 8, model, data);
2153  }
2154 
2155  // NOTE:
2156  // NNAPI weights are always OHWI, i.e. [depth_out, filter_height, filter_width, depth_group],
2157  // but Arm NN expects the filter's height and width indices to match the input's height and
2158  // width indices so when the DataLayout is NCHW, we need to permute the weights to OIHW
2159  const PermutationVector ohwiToOihw = { 0u, 2u, 3u, 1u };
2160  const ConstTensorPin weightsPin = (dataLayout == DataLayout::NCHW) ?
2162  model, data, ohwiToOihw) :
2163  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
2164  const ConstTensorPin biasesPin =
2165  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
2166  if (!weightsPin.IsValid() || !biasesPin.IsValid())
2167  {
2168  return Fail("%s: Operation has invalid inputs", __func__);
2169  }
2170 
2171  ConstTensor weights = weightsPin.GetConstTensor();
2172  ConstTensor biases = biasesPin.GetConstTensor();
2173  SanitizeBiasQuantizationScale(biases.GetInfo(), weights.GetInfo(), inputInfo);
2174 
2175  const TensorShape& inputShape = inputInfo.GetShape();
2176  const TensorShape& outputShape = outputInfo.GetShape();
2177  const TensorShape& weightsShape = weights.GetShape();
2178  const TensorShape& biasesShape = biases.GetShape();
2179 
2180  armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
2181  const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
2182  const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
2183  const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
2184 
2186  desc.m_DataLayout = dataLayout;
2187  desc.m_BiasEnabled = true;
2188 
2189  int numGroups;
2190  ActivationFn activation;
2191 
2192  if (operation.inputs.size() == 12)
2193  {
2194  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
2195  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
2196  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
2197  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
2198  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
2199  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
2200  !GetInputScalar(operation, 9, OperandType::INT32, numGroups, model, data) ||
2201  !GetInputActivationFunction(operation, 10, activation, model, data))
2202  {
2203  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
2204  }
2205 
2206  }
2207  else if (operation.inputs.size() == 9)
2208  {
2209  ::android::nn::PaddingScheme paddingScheme;
2210  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
2211  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
2212  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
2213  !GetInputScalar(operation, 6, OperandType::INT32, numGroups, model, data) ||
2214  !GetInputActivationFunction(operation, 7, activation, model, data))
2215  {
2216  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
2217  }
2218 
2219  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
2220  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
2221 
2222  const uint32_t kernelX = weightsShape[widthIndex];
2223  const uint32_t kernelY = weightsShape[heightIndex];
2224 
2225  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2226  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2227  }
2228  else
2229  {
2230  return Fail("%s: Unsupported number of operation inputs", __func__);
2231  }
2232 
2233  // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2234  const unsigned int outputChannels = weightsShape[0];
2235 
2236  const unsigned int channelsPerGroup = weightsShape[channelsIndex];
2237  const unsigned int channelMultiplier = outputChannels / numGroups;
2238 
2239  //
2240  // Validate all relevant inputs
2241  //
2242  if (numGroups <= 0)
2243  {
2244  return Fail("%s: Number of groups must be greater than 0. Got: %d", __func__, numGroups);
2245  }
2246 
2247  if (outputChannels % numGroups != 0u)
2248  {
2249  return Fail("%s: Output channels must be divisible by the number of groups", __func__);
2250  }
2251 
2252  //
2253  // Set up Splitter layer
2254  //
2255  unsigned int splitterDimSizes[4] = { inputShape[0], inputShape[1], inputShape[2], inputShape[3] };
2256  splitterDimSizes[channelsIndex] /= numGroups; // split in depth
2257 
2258  TensorInfo splitterOutputInfo(4,
2259  splitterDimSizes,
2260  inputInfo.GetDataType(),
2261  inputInfo.GetQuantizationScale(),
2262  inputInfo.GetQuantizationOffset());
2263 
2264  std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(numGroups, std::ref(splitterOutputInfo));
2265 
2266  ViewsDescriptor splitterDesc(numGroups);
2267  for (unsigned int group = 0u; group < numGroups; ++group)
2268  {
2269  splitterDesc.SetViewOriginCoord(group, channelsIndex, splitterDimSizes[channelsIndex] * group);
2270  for (unsigned int dimIdx = 0u; dimIdx < 4u; dimIdx++)
2271  {
2272  splitterDesc.SetViewSize(group, dimIdx, splitterDimSizes[dimIdx]);
2273  }
2274  }
2275 
2276  bool isSupported = false;
2277  armnn::BackendId setBackendSplit;
2278  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2280  data.m_Backends,
2281  isSupported,
2282  setBackendSplit,
2283  inputInfo,
2284  splitterOutputInfos,
2285  splitterDesc);
2286  if (!isSupported)
2287  {
2288  return false;
2289  }
2290 
2291  IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
2292  splitterLayer->SetBackendId(setBackendSplit);
2293  if (!splitterLayer)
2294  {
2295  return Fail("%s: Failed to add SplitterLayer", __func__);
2296  }
2297 
2298  input.Connect(splitterLayer->GetInputSlot(0));
2299  for (unsigned int group = 0u; group < splitterLayer->GetNumOutputSlots(); ++group)
2300  {
2301  splitterLayer->GetOutputSlot(group).SetTensorInfo(splitterOutputInfo);
2302  }
2303 
2304  //
2305  // Set up Convolution2d layers for each group
2306  //
2307 
2308  // Set up group tensor shapes
2309  TensorShape groupInputShape(inputShape);
2310  groupInputShape[channelsIndex] = channelsPerGroup;
2311 
2312  TensorShape groupWeightsShape(weightsShape);
2313  groupWeightsShape[0] /= channelMultiplier * numGroups;
2314 
2315  TensorShape groupBiasesShape({ 1 });
2316 
2317  // Set up group tensor infos
2318  TensorInfo groupInputInfo(inputInfo);
2319  groupInputInfo.SetShape(groupInputShape);
2320 
2321  const TensorInfo& weightsInfo = weights.GetInfo();
2322  TensorInfo groupWeightsInfo(weightsInfo);
2323  groupWeightsInfo.SetShape(groupWeightsShape);
2324 
2325  const TensorInfo& biasesInfo = biases.GetInfo();
2326  TensorInfo groupBiasesInfo(biasesInfo);
2327  groupBiasesInfo.SetShape(groupBiasesShape);
2328 
2329  TensorInfo groupOutputInfo(outputInfo);
2330 
2331  TensorShape groupOutputShape(outputShape);
2332  const bool isDynamic = IsDynamicTensor(outputInfo);
2333  if (!isDynamic)
2334  {
2335  groupOutputShape[channelsIndex] = 1;
2336  }
2337  groupOutputInfo.SetShape(groupOutputShape);
2338 
2339  const unsigned int weightsDataTypeSize = GetDataTypeSize(groupWeightsInfo.GetDataType());
2340  const unsigned int biasesDataTypeSize = GetDataTypeSize(groupBiasesInfo.GetDataType());
2341 
2342  std::vector<IConnectableLayer*> convLayers(numGroups * channelMultiplier, nullptr);
2343  for (unsigned int group = 0u; group < numGroups; ++group)
2344  {
2345  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2346  {
2347  auto index = group * channelMultiplier + m;
2348 
2349  const unsigned int weightsDataOffset = groupWeightsShape.GetNumElements() * index * weightsDataTypeSize;
2350  const unsigned int biasesDataOffset = groupBiasesShape.GetNumElements() * index * biasesDataTypeSize;
2351 
2352  if (weightsInfo.HasPerAxisQuantization())
2353  {
2354  // Extract per-axis quantization scales for group weights
2355  const std::vector<float>& weightsQuantScales = weightsInfo.GetQuantizationScales();
2356  groupWeightsInfo.SetQuantizationScales(
2357  std::vector<float>(weightsQuantScales.begin() + index,
2358  weightsQuantScales.begin() + index + groupWeightsShape[0]));
2359 
2360  // Extract per-axis quantization scales for group biases
2361  const std::vector<float>& biasesQuantScales = biasesInfo.GetQuantizationScales();
2362  groupBiasesInfo.SetQuantizationScales(
2363  std::vector<float>(biasesQuantScales.begin() + index,
2364  biasesQuantScales.begin() + index + groupWeightsShape[0]));
2365  }
2366 
2367  // Extract weights and biases data for current group convolution
2368  ConstTensor groupWeights(groupWeightsInfo,
2369  static_cast<const void *>(reinterpret_cast<const char *>(weights.GetMemoryArea()) +
2370  weightsDataOffset));
2371  ConstTensor groupBiases(groupBiasesInfo,
2372  static_cast<const void *>(reinterpret_cast<const char *>(biases.GetMemoryArea()) +
2373  biasesDataOffset));
2374 
2375  isSupported = false;
2376  armnn::BackendId setBackendConv;
2377  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2378  {
2379  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2381  data.m_Backends,
2382  isSupported,
2383  setBackendConv,
2384  groupInputInfo,
2385  outputInfo,
2386  desc,
2387  groupWeightsInfo,
2388  Optional<TensorInfo>(groupBiasesInfo));
2389  };
2390 
2391  if(!isDynamic)
2392  {
2393  validateFunc(groupOutputInfo, isSupported);
2394  }
2395  else
2396  {
2397  isSupported = AreDynamicTensorsSupported();
2398  }
2399 
2400  if (!isSupported)
2401  {
2402  return false;
2403  }
2404 
2405  IConnectableLayer* weightsLayer = data.m_Network->AddConstantLayer(groupWeights);
2406  IConnectableLayer* biasLayer = data.m_Network->AddConstantLayer(groupBiases);
2407  IConnectableLayer* convLayer = data.m_Network->AddConvolution2dLayer(desc);
2408 
2409  convLayer->SetBackendId(setBackendConv);
2410 
2411  if (!convLayer)
2412  {
2413  return Fail("%s: AddConvolution2dLayer failed", __func__);
2414  }
2415 
2416  splitterLayer->GetOutputSlot(group).Connect(convLayer->GetInputSlot(0));
2417  weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
2418  biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
2419 
2420  weightsLayer->GetOutputSlot(0).SetTensorInfo(groupWeightsInfo);
2421  biasLayer->GetOutputSlot(0).SetTensorInfo(groupBiasesInfo);
2422  convLayer->GetOutputSlot(0).SetTensorInfo(groupOutputInfo);
2423 
2424  if(isDynamic)
2425  {
2426  convLayer->GetOutputSlot(0).IsTensorInfoSet();
2427 
2428  validateFunc(convLayer->GetOutputSlot(0).GetTensorInfo(), isSupported);
2429 
2430  outputInfo = convLayer->GetOutputSlot(0).GetTensorInfo();
2431 
2432  if (!isSupported)
2433  {
2434  return false;
2435  }
2436  }
2437 
2438  convLayers[index] = convLayer;
2439  }
2440  }
2441 
2442  //
2443  // Set up Concat layer
2444  //
2445  ConcatDescriptor concatDescriptor;
2446  // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2447  concatDescriptor = ConcatDescriptor(weightsShape[0]);
2448  for (unsigned int group = 0u; group < numGroups; ++group)
2449  {
2450  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2451  {
2452  auto index = group * channelMultiplier + m;
2453  concatDescriptor.SetViewOriginCoord(index, channelsIndex, index);
2454  concatDescriptor.SetConcatAxis(channelsIndex);
2455  }
2456  }
2457 
2458  isSupported = false;
2459  armnn::BackendId setBackendConcat;
2460  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2462  data.m_Backends,
2463  isSupported,
2464  setBackendConcat,
2465  std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
2466  outputInfo,
2467  concatDescriptor);
2468 
2469  if (!isSupported)
2470  {
2471  return false;
2472  }
2473 
2474  IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
2475  concatLayer->SetBackendId(setBackendConcat);
2476  if (!concatLayer)
2477  {
2478  return Fail("%s: AddConcatLayer failed", __func__);
2479  }
2480 
2481  for (unsigned int group = 0u; group < numGroups; ++group)
2482  {
2483  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2484  {
2485  auto index = group * channelMultiplier + m;
2486  convLayers[index]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(index));
2487  }
2488  }
2489  concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2490 
2491  return SetupAndTrackLayerOutputSlot(operation, 0, *concatLayer, model,
2492  data, nullptr, nullptr, activation);
2493 }
2494 
2495 bool Converter::ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data)
2496 {
2497  VLOG(DRIVER) << "Converter::ConvertHardSwish()";
2498  ActivationDescriptor desc;
2499  desc.m_Function = ActivationFunction::HardSwish;
2500 
2501  return ::ConvertToActivation(operation, __func__, desc, model, data);
2502 }
2503 
2504 bool Converter::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
2505 {
2506  VLOG(DRIVER) << "Converter::ConvertInstanceNormalization()";
2507 
2508  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2509  if (!input.IsValid())
2510  {
2511  return Fail("%s: Operation has an invalid input 0", __func__);
2512  }
2513 
2514  const Operand* output = GetOutputOperand(operation, 0, model);
2515  if (!output)
2516  {
2517  return Fail("%s: Operation has an invalid output", __func__);
2518  }
2519 
2520  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2521 
2522  // Determine data type of input tensor
2523  OperandType inputType;
2524  if (!GetOperandType(operation, 0, model, inputType))
2525  {
2526  return Fail("%s: Operation has invalid inputs", __func__);
2527  }
2528 
2530 
2531  // Read gamma, beta & epsilon
2532  if (inputType == OperandType::TENSOR_FLOAT16)
2533  {
2534  Half fp16Gamma;
2535  Half fp16Beta;
2536  Half fp16Epsilon;
2537 
2538  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Gamma, model, data) ||
2539  !GetInputScalar(operation, 2, OperandType::FLOAT16, fp16Beta, model, data) ||
2540  !GetInputScalar(operation, 3, OperandType::FLOAT16, fp16Epsilon, model, data))
2541  {
2542  return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
2543  }
2544 
2545  desc.m_Gamma = static_cast<float>(fp16Gamma);
2546  desc.m_Beta = static_cast<float>(fp16Beta);
2547  desc.m_Eps = static_cast<float>(fp16Epsilon);
2548  }
2549  else if (inputType == OperandType::TENSOR_FLOAT32)
2550  {
2551  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_Gamma, model, data) ||
2552  !GetInputScalar(operation, 2, OperandType::FLOAT32, desc.m_Beta, model, data) ||
2553  !GetInputScalar(operation, 3, OperandType::FLOAT32, desc.m_Eps, model, data))
2554  {
2555  return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
2556  }
2557  }
2558  else
2559  {
2560  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2561  }
2562 
2563  desc.m_DataLayout = OptionalDataLayout(operation, 4, model, data);
2564 
2565  bool isSupported = false;
2566  armnn::BackendId setBackend;
2567  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2568  {
2569  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2570  IsInstanceNormalizationSupported,
2571  data.m_Backends,
2572  isSupported,
2573  setBackend,
2574  input.GetTensorInfo(),
2575  outputInfo,
2576  desc);
2577  };
2578 
2579  if(IsDynamicTensor(outputInfo))
2580  {
2581  isSupported = AreDynamicTensorsSupported();
2582  }
2583  else
2584  {
2585  validateFunc(outputInfo, isSupported);
2586  }
2587 
2588  if (!isSupported)
2589  {
2590  return false;
2591  }
2592 
2593  IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
2594  layer->SetBackendId(setBackend);
2595  input.Connect(layer->GetInputSlot(0));
2596 
2597  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2598 }
2599 
2600 bool Converter::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2601 {
2602  VLOG(DRIVER) << "Converter::ConvertL2Normalization()";
2603 
2604  if (operation.inputs.size() != 1)
2605  {
2606  return Fail("%s: Optional inputs are not supported", __func__);
2607  }
2608 
2609  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2610  if (!input.IsValid())
2611  {
2612  return Fail("%s: Operation has invalid inputs", __func__);
2613  }
2614 
2615  const Operand* output = GetOutputOperand(operation, 0, model);
2616  if (!output)
2617  {
2618  return Fail("%s: Could not read output 0", __func__);
2619  }
2620 
2621  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2622  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2623 
2624  if (outputInfo.GetNumDimensions() != 4u)
2625  {
2626  return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2627  }
2628 
2631 
2632  bool isSupported = false;
2633  armnn::BackendId setBackend;
2634  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2635  {
2636  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2638  data.m_Backends,
2639  isSupported,
2640  setBackend,
2641  inputInfo,
2642  outputInfo,
2643  desc);
2644  };
2645 
2646  if(!IsDynamicTensor(outputInfo))
2647  {
2648  validateFunc(outputInfo, isSupported);
2649  }
2650  else
2651  {
2652  isSupported = AreDynamicTensorsSupported();
2653  }
2654 
2655  if (!isSupported)
2656  {
2657  return false;
2658  }
2659 
2660  armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2661  layer->SetBackendId(setBackend);
2662  assert(layer != nullptr);
2663  input.Connect(layer->GetInputSlot(0));
2664 
2665  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2666 }
2667 
2668 bool Converter::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
2669 {
2670  VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
2671  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::L2, model, data);
2672 }
2673 
2674 bool Converter::ConvertLocalResponseNormalization(const Operation& operation,
2675  const Model& model,
2676  ConversionData& data)
2677 {
2678  VLOG(DRIVER) << "Converter::ConvertLocalResponseNormalization()";
2679 
2680  if (operation.inputs.size() != 5)
2681  {
2682  return Fail("%s: Optional inputs are not supported", __func__);
2683  }
2684 
2685  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2686  if (!input.IsValid())
2687  {
2688  return Fail("%s: Operation has invalid inputs", __func__);
2689  }
2690 
2691  const Operand* output = GetOutputOperand(operation, 0, model);
2692  if (!output)
2693  {
2694  return Fail("%s: Could not read output 0", __func__);
2695  }
2696 
2697  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2698  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2699 
2700  if (outputInfo.GetNumDimensions() != 4u)
2701  {
2702  return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2703  }
2704 
2705  armnn::NormalizationDescriptor descriptor;
2709 
2710  if (!input.IsValid() ||
2711  !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2712  !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
2713  !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
2714  !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
2715  {
2716  return Fail("%s: Operation has invalid inputs", __func__);
2717  }
2718 
2719  // ArmNN expects normSize to be the full size of the normalization
2720  // window rather than the radius as in AndroidNN.
2721  descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2722 
2723  bool isSupported = false;
2724  armnn::BackendId setBackend;
2725  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2726  {
2727  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2729  data.m_Backends,
2730  isSupported,
2731  setBackend,
2732  inputInfo,
2733  outputInfo,
2734  descriptor);
2735  };
2736 
2737  if(!IsDynamicTensor(outputInfo))
2738  {
2739  validateFunc(outputInfo, isSupported);
2740  }
2741  else
2742  {
2743  isSupported = AreDynamicTensorsSupported();
2744  }
2745 
2746  if (!isSupported)
2747  {
2748  return false;
2749  }
2750 
2751 
2752  armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2753  layer->SetBackendId(setBackend);
2754  assert(layer != nullptr);
2755  input.Connect(layer->GetInputSlot(0));
2756 
2757  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2758 }
2759 
2760 bool Converter::ConvertLogicalBinary(const Operation& operation,
2761  const Model& model,
2762  ConversionData& data,
2763  armnn::LogicalBinaryOperation logicalOperation)
2764 {
2765  VLOG(DRIVER) << "Converter::ConvertLogicalBinary()";
2766  VLOG(DRIVER) << "ConvertLogicalBinary()";
2767  VLOG(DRIVER) << "logicalOperation = " << GetLogicalBinaryOperationAsCString(logicalOperation);
2768 
2769  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
2770  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
2771 
2772  if (!(input0.IsValid() && input1.IsValid()))
2773  {
2774  return Fail("%s: Operation has invalid inputs", __func__);
2775  }
2776 
2777  const Operand* output = GetOutputOperand(operation, 0, model);
2778  if (!output)
2779  {
2780  return Fail("%s: Could not read output 0", __func__);
2781  }
2782 
2783  const TensorInfo& inputInfo0 = input0.GetTensorInfo();
2784  const TensorInfo& inputInfo1 = input1.GetTensorInfo();
2785  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2786 
2787  LogicalBinaryDescriptor descriptor(logicalOperation);
2788 
2789  bool isSupported = false;
2790  armnn::BackendId setBackend;
2791  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2792  {
2793  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2794  IsLogicalBinarySupported,
2795  data.m_Backends,
2796  isSupported,
2797  setBackend,
2798  inputInfo0,
2799  inputInfo1,
2800  outputInfo,
2801  descriptor);
2802  };
2803 
2804  if(!IsDynamicTensor(outputInfo))
2805  {
2806  validateFunc(outputInfo, isSupported);
2807  }
2808  else
2809  {
2810  isSupported = AreDynamicTensorsSupported();
2811  }
2812 
2813  if (!isSupported)
2814  {
2815  return false;
2816  }
2817 
2818  IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
2819  layer->SetBackendId(setBackend);
2820  assert(layer != nullptr);
2821 
2822  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
2823  if (!isReshapeSupported)
2824  {
2825  return false;
2826  }
2827 
2828  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2829 }
2830 
2831 bool Converter::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2832 {
2833  VLOG(DRIVER) << "Converter::ConvertLogistic()";
2836 
2837  return ConvertToActivation(operation, __func__, desc, model, data);
2838 }
2839 
2840 bool Converter::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data)
2841 {
2842  VLOG(DRIVER) << "Converter::ConvertLogSoftmax()";
2843 
2844  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2845  if (!input.IsValid())
2846  {
2847  return Fail("%s: Failed to read input 0", __func__);
2848  }
2849 
2850  const Operand* output = GetOutputOperand(operation, 0, model);
2851  if (!output)
2852  {
2853  return Fail("%s: Failed to read output", __func__);
2854  }
2855 
2856  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2857 
2858  // Determine data type of input tensor
2859  OperandType inputType;
2860  if (!GetOperandType(operation, 0, model, inputType))
2861  {
2862  return Fail("%s: Operation has invalid inputs", __func__);
2863  }
2864 
2865  LogSoftmaxDescriptor descriptor;
2866 
2867  // Read beta
2868  if (inputType == OperandType::TENSOR_FLOAT16)
2869  {
2870  Half fp16Beta;
2871  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Beta, model, data))
2872  {
2873  return Fail("%s: Failed to read input 1 (FLOAT16)", __func__);
2874  }
2875 
2876  descriptor.m_Beta = static_cast<float>(fp16Beta);
2877  }
2878  else if (inputType == OperandType::TENSOR_FLOAT32)
2879  {
2880  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Beta, model, data))
2881  {
2882  return Fail("%s: Failed to read input 1 (FLOAT32)", __func__);
2883  }
2884  }
2885  else
2886  {
2887  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2888  }
2889 
2890  // Read axis
2891  if (!GetInputInt32(operation, 2, descriptor.m_Axis, model, data))
2892  {
2893  return Fail("%s: Failed to read input 2", __func__);
2894  }
2895 
2896  bool isSupported = false;
2897  armnn::BackendId setBackend;
2898  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2899  {
2900  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2901  IsLogSoftmaxSupported,
2902  data.m_Backends,
2903  isSupported,
2904  setBackend,
2905  input.GetTensorInfo(),
2906  outputInfo,
2907  descriptor);
2908  };
2909 
2910  if(IsDynamicTensor(outputInfo))
2911  {
2912  isSupported = AreDynamicTensorsSupported();
2913  }
2914  else
2915  {
2916  validateFunc(outputInfo, isSupported);
2917  }
2918 
2919  if (!isSupported)
2920  {
2921  return false;
2922  }
2923 
2924  IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
2925  layer->SetBackendId(setBackend);
2926  if (!layer)
2927  {
2928  return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
2929  }
2930 
2931  input.Connect(layer->GetInputSlot(0));
2932 
2933  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2934 }
2935 
2936 bool Converter::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
2937 {
2938  VLOG(DRIVER) << "Converter::ConvertLstm()";
2939 
2940  // Inputs:
2941  // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2942  // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2943  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2944  if (!input.IsValid())
2945  {
2946  return Fail("%s: Could not read input 0: input", __func__);
2947  }
2948  // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2949  LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
2950  if (!outputStateIn.IsValid())
2951  {
2952  return Fail("%s: Could not read input 18: outputStateIn", __func__);
2953  }
2954  // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2955  LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
2956  if (!cellStateIn.IsValid())
2957  {
2958  return Fail("%s: Could not read input 19: cellStateIn", __func__);
2959  }
2960 
2961  // Get the mandatory input tensors:
2962  // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2963  // [num_units, input_size].
2964  const ConstTensorPin inputToForgetWeightsPin =
2965  (DequantizeAndMakeConstTensorPin(operation, model, data, 2));
2966  // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2967  // [num_units, input_size].
2968  const ConstTensorPin inputToCellWeightsPin =
2969  (DequantizeAndMakeConstTensorPin(operation, model, data, 3));
2970  // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2971  // [num_units, input_size].
2972  const ConstTensorPin inputToOutputWeightsPin =
2973  (DequantizeAndMakeConstTensorPin(operation, model, data, 4));
2974  // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2975  // [num_units, output_size].
2976  const ConstTensorPin recurrentToForgetWeightsPin =
2977  (DequantizeAndMakeConstTensorPin(operation, model, data, 6));
2978  // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2979  // [num_units, output_size].
2980  const ConstTensorPin recurrentToCellWeightsPin =
2981  (DequantizeAndMakeConstTensorPin(operation, model, data, 7));
2982  // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2983  // [num_units, output_size].
2984  const ConstTensorPin recurrentToOutputWeightsPin =
2985  (DequantizeAndMakeConstTensorPin(operation, model, data, 8));
2986  // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2987  const ConstTensorPin forgetGateBiasPin =
2988  ConvertOperationInputToConstTensorPin(operation, 13, model, data);
2989  // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2990  const ConstTensorPin cellBiasPin =
2991  ConvertOperationInputToConstTensorPin(operation, 14, model, data);
2992  // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2993  const ConstTensorPin outputGateBiasPin =
2994  ConvertOperationInputToConstTensorPin(operation, 15, model, data);
2995 
2996  if (!inputToForgetWeightsPin.IsValid() ||
2997  !inputToCellWeightsPin.IsValid() ||
2998  !inputToOutputWeightsPin.IsValid() ||
2999  !recurrentToForgetWeightsPin.IsValid() ||
3000  !recurrentToCellWeightsPin.IsValid() ||
3001  !recurrentToOutputWeightsPin.IsValid() ||
3002  !forgetGateBiasPin.IsValid() ||
3003  !cellBiasPin.IsValid() ||
3004  !outputGateBiasPin.IsValid())
3005  {
3006  return Fail("%s: Operation has invalid tensor inputs", __func__);
3007  }
3008 
3009  // Get the optional input tensors:
3010  // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3011  // [num_units, input_size], where “num_units” corresponds to the number of cell units.
3012  const ConstTensorPin inputToInputWeightsPin =
3013  (DequantizeAndMakeConstTensorPin(operation, model, data, 1, true));
3014  // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3015  // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
3016  // “num_units”), or the second dimension of the “projection_weights”, if defined.
3017  const ConstTensorPin recurrentToInputWeightsPin =
3018  (DequantizeAndMakeConstTensorPin(operation, model, data, 5, true));
3019  // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3020  const ConstTensorPin cellToInputWeightsPin =
3021  (DequantizeAndMakeConstTensorPin(operation, model, data, 9, true));
3022  // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3023  const ConstTensorPin cellToForgetWeightsPin =
3024  (DequantizeAndMakeConstTensorPin(operation, model, data, 10, true));
3025  // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3026  const ConstTensorPin cellToOutputWeightsPin =
3027  (DequantizeAndMakeConstTensorPin(operation, model, data, 11, true));
3028  // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3029  const ConstTensorPin inputGateBiasPin =
3031  12,
3032  model,
3033  data,
3034  g_DontPermute,
3035  nullptr,
3036  true);
3037 
3038  // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3039  // [output_size, num_units].
3040  const ConstTensorPin projectionWeightsPin =
3041  (DequantizeAndMakeConstTensorPin(operation, model, data, 16, true));
3042  // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
3043  const ConstTensorPin projectionBiasPin =
3045  17,
3046  model,
3047  data,
3048  g_DontPermute,
3049  nullptr,
3050  true);
3051 
3052  if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
3053  (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
3054  (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
3055  (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
3056  (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
3057  (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
3058  (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
3059  (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
3060  {
3061  return Fail("%s: Operation has invalid tensor inputs", __func__);
3062  }
3063 
3064  // Get the mandatory input scalars (actually 1-D tensors of size 1):
3065  // 20: The activation function: A value indicating the activation function:
3066  // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
3067  // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
3068  // If set to 0.0 then clipping is disabled.
3069  // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
3070  // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
3071  ActivationFn activation = ActivationFn::kActivationNone;
3072  float cellClip;
3073  float projClip;
3074  if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
3075  !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
3076  !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
3077  {
3078  return Fail("%s: Operation has invalid scalar inputs", __func__);
3079  }
3080 
3081  // Get the normalization tensors
3082  // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
3083  // Used to rescale normalized inputs to activation at input gate.
3084  const ConstTensorPin inputLayerNormWeightsPin
3085  (DequantizeAndMakeConstTensorPin(operation, model, data, 23, true));
3086 
3087  // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
3088  // Used to rescale normalized inputs to activation at forget gate.
3089  const ConstTensorPin forgetLayerNormWeightsPin =
3091  24,
3092  model,
3093  data,
3094  g_DontPermute,
3095  nullptr,
3096  true);
3097 
3098  // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
3099  // Used to rescale normalized inputs to activation at cell gate.
3100  const ConstTensorPin cellLayerNormWeightsPin =
3102  25,
3103  model,
3104  data,
3105  g_DontPermute,
3106  nullptr,
3107  true);
3108 
3109  // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
3110  // Used to rescale normalized inputs to activation at output gate.
3111  const ConstTensorPin outputLayerNormWeightsPin =
3113  26,
3114  model,
3115  data,
3116  g_DontPermute,
3117  nullptr,
3118  true);
3119 
3120  // Outputs:
3121  // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
3122  // with CIFG, or [batch_size, num_units * 3] without CIFG.
3123  const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
3124  if (!scratchBuffer)
3125  {
3126  return Fail("%s: Could not read output 0: scratchBuffer", __func__);
3127  }
3128  // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
3129  const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
3130  if (!outputStateOut)
3131  {
3132  return Fail("%s: Could not read output 1: outputStateOut", __func__);
3133  }
3134  // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
3135  const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
3136  if (!cellStateOut)
3137  {
3138  return Fail("%s: Could not read output 2: cellStateOut", __func__);
3139  }
3140  // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
3141  // effectively the same as the current “output state (out)” value.
3142  const Operand* output = GetOutputOperand(operation, 3, model);
3143  if (!output)
3144  {
3145  return Fail("%s: Could not read output 3: output", __func__);
3146  }
3147 
3148  // set the params structure for the AddLstmLayer call
3149  LstmInputParams params;
3150  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
3151  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
3152  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
3153  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
3154  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
3155  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
3156  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
3157  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
3158  params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
3159  params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
3160  params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
3161  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
3162  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
3163  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
3164  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
3165  params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
3166  params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
3167  params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
3168  params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
3169  params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
3170  params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
3171 
3172  // set the layer descriptor
3173  LstmDescriptor desc;
3174  desc.m_ActivationFunc = activation;
3175  desc.m_ClippingThresCell = cellClip;
3176  desc.m_ClippingThresProj = projClip;
3177  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
3178  params.m_RecurrentToInputWeights == nullptr ||
3179  params.m_InputGateBias == nullptr);
3180  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
3181  params.m_CellToOutputWeights != nullptr);
3182  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
3183  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
3184  params.m_ForgetLayerNormWeights != nullptr ||
3185  params.m_CellLayerNormWeights != nullptr ||
3186  params.m_OutputLayerNormWeights != nullptr);
3187 
3188  // validate the optional input groups
3189  if (desc.m_CifgEnabled &&
3190  (params.m_InputToInputWeights != nullptr ||
3191  params.m_RecurrentToInputWeights != nullptr ||
3192  params.m_InputGateBias != nullptr))
3193  {
3194  return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
3195  " and input gate bias must be provided", __func__);
3196  }
3197 
3198  if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
3199  {
3200  return Fail("%s: projection bias should not be provided without projection weights", __func__);
3201  }
3202 
3203  if (desc.m_PeepholeEnabled &&
3204  (params.m_CellToForgetWeights == nullptr ||
3205  params.m_CellToOutputWeights == nullptr ||
3206  (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
3207  {
3208  return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
3209  " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
3210  }
3211 
3212  if (desc.m_LayerNormEnabled &&
3213  (params.m_ForgetLayerNormWeights == nullptr ||
3214  params.m_CellLayerNormWeights == nullptr ||
3215  params.m_OutputLayerNormWeights == nullptr ||
3216  (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
3217  {
3218  return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
3219  " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
3220  }
3221 
3222  // Check if the layer is supported
3223  // Inputs
3224  const TensorInfo& inputInfo = input.GetTensorInfo();
3225  const TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
3226  const TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
3227 
3228  // Outputs
3229  const TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
3230  const TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
3231  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
3232  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3233 
3234  // Basic parameters
3235  LstmInputParamsInfo paramsInfo;
3236  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
3237  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
3238  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
3240  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
3242  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
3243  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
3244  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
3245 
3246  // Optional parameters
3247  if (!desc.m_CifgEnabled)
3248  {
3249  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
3251  if (params.m_CellToInputWeights != nullptr)
3252  {
3253  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
3254  }
3255  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
3256  }
3257 
3258  if (desc.m_ProjectionEnabled)
3259  {
3260  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
3261  if (params.m_ProjectionBias != nullptr)
3262  {
3263  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
3264  }
3265  }
3266 
3267  if (desc.m_PeepholeEnabled)
3268  {
3269  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
3270  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
3271  }
3272 
3273  if (desc.m_LayerNormEnabled)
3274  {
3275  if(!desc.m_CifgEnabled)
3276  {
3277  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
3278  }
3279  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
3280  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
3281  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
3282  }
3283 
3284  bool isSupported = false;
3285  armnn::BackendId setBackend;
3286  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3287  {
3288  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3290  data.m_Backends,
3291  isSupported,
3292  setBackend,
3293  inputInfo,
3294  outputStateInInfo,
3295  cellStateInInfo,
3296  scratchBufferInfo,
3297  outputStateOutInfo,
3298  cellStateOutInfo,
3299  outputInfo,
3300  desc,
3301  paramsInfo);
3302  };
3303 
3304  bool isDynamic = false;
3305  if (!IsDynamicTensor(outputStateOutInfo) &&
3306  !IsDynamicTensor(scratchBufferInfo) &&
3307  !IsDynamicTensor(cellStateOutInfo) &&
3308  !IsDynamicTensor(outputInfo))
3309  {
3310  validateFunc(outputInfo, isSupported);
3311  }
3312  else
3313  {
3314  isDynamic = true;
3315  isSupported = AreDynamicTensorsSupported();
3316  }
3317 
3318  if (!isSupported)
3319  {
3320  return false;
3321  }
3322 
3323  // Add the layer
3324  IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
3325  layer->SetBackendId(setBackend);
3326 
3327  input.Connect(layer->GetInputSlot(0));
3328  outputStateIn.Connect(layer->GetInputSlot(1));
3329  cellStateIn.Connect(layer->GetInputSlot(2));
3330 
3331  if (!isDynamic)
3332  {
3333  return (
3334  SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3335  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3336  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3337  SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
3338  }
3339  else
3340  {
3341  return (
3342  SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3343  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3344  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3346  operation, 3, *layer, 3, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
3347  }
3348 
3349 }
3350 
3351 bool Converter::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
3352 {
3353  VLOG(DRIVER) << "Converter::ConvertMaxPool2d()";
3354  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Max, model, data);
3355 }
3356 
3357 bool Converter::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
3358 {
3359  VLOG(DRIVER) << "Converter::ConvertMaximum()";
3360 
3361  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3362  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3363 
3364  if (!input0.IsValid() || !input1.IsValid())
3365  {
3366  return Fail("%s: Operation has invalid inputs", __func__);
3367  }
3368 
3369  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
3370  if (!outputOperand)
3371  {
3372  return Fail("%s: Could not read output", __func__);
3373  }
3374 
3375  const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
3376 
3377  bool isSupported = false;
3378  armnn::BackendId setBackend;
3379  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
3380  {
3381  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3383  data.m_Backends,
3384  isSupported,
3385  setBackend,
3386  input0.GetTensorInfo(),
3387  input1.GetTensorInfo(),
3388  outInfo);
3389  };
3390 
3391  if(IsDynamicTensor(outInfo))
3392  {
3393  isSupported = AreDynamicTensorsSupported();
3394  }
3395  else
3396  {
3397  validateFunc(outInfo, isSupported);
3398  }
3399 
3400  if (!isSupported)
3401  {
3402  return false;
3403  }
3404 
3405  IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
3406  layer->SetBackendId(setBackend);
3407  assert(layer != nullptr);
3408  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
3409  if (!isReshapeSupported)
3410  {
3411  return false;
3412  }
3413 
3414  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3415 }
3416 
3417 bool Converter::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
3418 {
3419  VLOG(DRIVER) << "Converter::ConvertMean()";
3420 
3421  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3422  if (!input.IsValid())
3423  {
3424  return Fail("%s: Operation has invalid inputs", __func__);
3425  }
3426 
3427  const Operand* output = GetOutputOperand(operation, 0, model);
3428  if (!output)
3429  {
3430  return Fail("%s: Could not read output 0", __func__);
3431  }
3432 
3433  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3434 
3435  const Operand* axisOperand = GetInputOperand(operation, 1, model);
3436  if (!axisOperand)
3437  {
3438  return Fail("%s: Could not read input 1", __func__);
3439  }
3440 
3441  std::vector<int32_t> axis;
3442  if (!GetTensorInt32Values(*axisOperand, axis, model, data))
3443  {
3444  return Fail("%s: Input 1 has invalid values", __func__);
3445  }
3446 
3447  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3448 
3449  // Convert the axis to unsigned int and remove duplicates.
3450  unsigned int rank = inputInfo.GetNumDimensions();
3451  std::set<unsigned int> uniqueAxis;
3452  std::transform(axis.begin(), axis.end(),
3453  std::inserter(uniqueAxis, uniqueAxis.begin()),
3454  [rank](int i) -> unsigned int { return (i + rank) % rank; });
3455 
3456  // Get the "keep dims" flag.
3457  int32_t keepDims = 0;
3458  if (!GetInputInt32(operation, 2, keepDims, model, data))
3459  {
3460  return Fail("%s: Could not read input 2", __func__);
3461  }
3462 
3463  armnn::MeanDescriptor descriptor;
3464  descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3465  descriptor.m_KeepDims = keepDims > 0;
3466 
3467  bool isSupported = false;
3468  armnn::BackendId setBackend;
3469  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3470  {
3471  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3473  data.m_Backends,
3474  isSupported,
3475  setBackend,
3476  inputInfo,
3477  outputInfo,
3478  descriptor);
3479  };
3480 
3481  if(!IsDynamicTensor(outputInfo))
3482  {
3483  validateFunc(outputInfo, isSupported);
3484  }
3485  else
3486  {
3487  isSupported = AreDynamicTensorsSupported();
3488  }
3489 
3490  if (!isSupported)
3491  {
3492  return false;
3493  }
3494 
3495  armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3496  layer->SetBackendId(setBackend);
3497  assert(layer != nullptr);
3498  input.Connect(layer->GetInputSlot(0));
3499 
3500  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3501 }
3502 
3503 bool Converter::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
3504 {
3505  VLOG(DRIVER) << "Converter::ConvertMinimum()";
3506 
3507  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3508  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3509 
3510  if (!input0.IsValid() || !input1.IsValid())
3511  {
3512  return Fail("%s: Operation has invalid inputs", __func__);
3513  }
3514 
3515  const Operand* output = GetOutputOperand(operation, 0, model);
3516  if (!output)
3517  {
3518  return Fail("%s: Could not read output 0", __func__);
3519  }
3520 
3521  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3522 
3523  bool isSupported = false;
3524  armnn::BackendId setBackend;
3525  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3526  {
3527  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3529  data.m_Backends,
3530  isSupported,
3531  setBackend,
3532  input0.GetTensorInfo(),
3533  input1.GetTensorInfo(),
3534  outputInfo);
3535  };
3536 
3537  if(IsDynamicTensor(outputInfo))
3538  {
3539  isSupported = AreDynamicTensorsSupported();
3540  }
3541  else
3542  {
3543  validateFunc(outputInfo, isSupported);
3544  }
3545 
3546  if (!isSupported)
3547  {
3548  return false;
3549  }
3550 
3551  IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
3552  layer->SetBackendId(setBackend);
3553  assert(layer != nullptr);
3554  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
3555  if (!isReshapeSupported)
3556  {
3557  return false;
3558  }
3559 
3560  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3561 }
3562 
3563 bool Converter::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
3564 {
3565  VLOG(DRIVER) << "Converter::ConvertMul()";
3566 
3567  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3568  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3569 
3570  if (!input0.IsValid() || !input1.IsValid())
3571  {
3572  return Fail("%s: Operation has invalid inputs", __func__);
3573  }
3574 
3575  // The FuseActivation parameter is always the input index 2
3576  // and it should be optional
3577  ActivationFn activationFunction;
3578  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
3579  {
3580  return Fail("%s: Operation has invalid inputs", __func__);
3581  }
3582 
3583  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
3584 
3585  if (outputOperand == nullptr)
3586  {
3587  return false;
3588  }
3589 
3590  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3591 
3592  bool isSupported = false;
3593  armnn::BackendId setBackend;
3594  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3595  {
3596  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3598  data.m_Backends,
3599  isSupported,
3600  setBackend,
3601  input0.GetTensorInfo(),
3602  input1.GetTensorInfo(),
3603  outputInfo);
3604  };
3605 
3606  if(!IsDynamicTensor(outputInfo))
3607  {
3608  validateFunc(outputInfo, isSupported);
3609  }
3610  else
3611  {
3612  isSupported = AreDynamicTensorsSupported();
3613  }
3614 
3615  if (!isSupported)
3616  {
3617  return false;
3618  }
3619 
3620  armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
3621  startLayer->SetBackendId(setBackend);
3622 
3623  bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3624  if (!isReshapeSupported)
3625  {
3626  return false;
3627  }
3628 
3629  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
3630  data, nullptr, validateFunc, activationFunction);
3631 }
3632 
3633 bool Converter::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
3634 {
3635  VLOG(DRIVER) << "Converter::ConvertPad()";
3636 
3637  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3638  if (!input.IsValid())
3639  {
3640  return Fail("%s: Operation has invalid inputs", __func__);
3641  }
3642 
3643  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3644  unsigned int rank = inputInfo.GetNumDimensions();
3645 
3646  armnn::PadDescriptor descriptor;
3647  if (!ConvertPaddings(operation, model, data, rank, descriptor))
3648  {
3649  return Fail("%s: Could not convert paddings", __func__);
3650  }
3651 
3652  // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3653  // the scale and zeroPoint must be the same as input0
3654  // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3655  // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3656  // (QuantizationOffset - QuantizationOffset) * scale = 0.
3657  if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
3658  {
3659  descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3660  }
3661 
3662  const Operand* output = GetOutputOperand(operation, 0, model);
3663  if (!output)
3664  {
3665  return Fail("%s: Could not read output", __func__);
3666  }
3667 
3668  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3669 
3670  bool isSupported = false;
3671  armnn::BackendId setBackend;
3672  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3673  {
3674  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3676  data.m_Backends,
3677  isSupported,
3678  setBackend,
3679  inputInfo,
3680  outputInfo,
3681  descriptor);
3682  };
3683 
3684  if(!IsDynamicTensor(outputInfo))
3685  {
3686  validateFunc(outputInfo, isSupported);
3687  }
3688  else
3689  {
3690  isSupported = AreDynamicTensorsSupported();
3691  }
3692 
3693  if (!isSupported)
3694  {
3695  return false;
3696  }
3697 
3698  armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3699  layer->SetBackendId(setBackend);
3700  assert(layer != nullptr);
3701  input.Connect(layer->GetInputSlot(0));
3702 
3703  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3704 }
3705 
3706 bool Converter::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
3707 {
3708  VLOG(DRIVER) << "Converter::ConvertPadV2()";
3709 
3710  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3711  if (!input.IsValid())
3712  {
3713  return Fail("%s: Could not read input 0", __func__);
3714  }
3715 
3716  const Operand* output = GetOutputOperand(operation, 0, model);
3717  if (!output)
3718  {
3719  return Fail("%s: Could not read output", __func__);
3720  }
3721 
3722  const TensorInfo& inputInfo = input.GetTensorInfo();
3723  unsigned int rank = inputInfo.GetNumDimensions();
3724 
3725  PadDescriptor descriptor;
3726  if (!ConvertPaddings(operation, model, data, rank, descriptor))
3727  {
3728  return Fail("%s: Could not convert paddings", __func__);
3729  }
3730 
3731  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3732 
3733  // Determine type of padding value
3734  OperandType operandType0;
3735  OperandType operandType2;
3736 
3737  if (!GetOperandType(operation, 0, model, operandType0) ||
3738  !GetOperandType(operation, 2, model, operandType2))
3739  {
3740  return Fail("%s: Operation has invalid inputs", __func__);
3741  }
3742 
3743  // Read value to use for padding
3744  if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
3745  {
3746  Half f16PadValue;
3747  if (!GetInputScalar(operation, 2, operandType2, f16PadValue, model, data))
3748  {
3749  return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
3750  }
3751 
3752  descriptor.m_PadValue = f16PadValue;
3753  }
3754  else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
3755  {
3756  if (!GetInputFloat32(operation, 2, descriptor.m_PadValue, model, data))
3757  {
3758  return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
3759  }
3760  }
3761  else if (isQuantizedOperand(operandType0) && operandType2 == OperandType::INT32)
3762  {
3763  int32_t intPadValue = 0;
3764  if (!GetInputInt32(operation, 2, intPadValue, model, data))
3765  {
3766  return Fail("%s: Could not read input 2 (INT32)", __func__);
3767  }
3768  descriptor.m_PadValue = intPadValue;
3769  }
3770  else
3771  {
3772  return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
3773  }
3774 
3775  bool isSupported = false;
3776  armnn::BackendId setBackend;
3777  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3778  {
3779  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3781  data.m_Backends,
3782  isSupported,
3783  setBackend,
3784  inputInfo,
3785  outputInfo,
3786  descriptor);
3787  };
3788 
3789  if(IsDynamicTensor(outputInfo))
3790  {
3791  isSupported = AreDynamicTensorsSupported();
3792  }
3793  else
3794  {
3795  validateFunc(outputInfo, isSupported);
3796  }
3797 
3798  if (!isSupported)
3799  {
3800  return false;
3801  }
3802 
3803  IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3804  layer->SetBackendId(setBackend);
3805  assert(layer != nullptr);
3806  input.Connect(layer->GetInputSlot(0));
3807 
3808  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3809 }
3810 
3811 bool Converter::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
3812 {
3813  VLOG(DRIVER) << "Converter::ConvertPrelu()";
3814 
3815  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3816  LayerInputHandle alpha = ConvertToLayerInputHandle(operation, 1, model, data);
3817 
3818  if (!input.IsValid() || !alpha.IsValid())
3819  {
3820  return Fail("%s: Operation has invalid inputs", __func__);
3821  }
3822 
3823  const Operand* output = GetOutputOperand(operation, 0, model);
3824 
3825  if (!output)
3826  {
3827  return Fail("%s: Could not read output", __func__);
3828  }
3829 
3830  const TensorInfo& inputInfo = input.GetTensorInfo();
3831  const TensorInfo& alphaInfo = alpha.GetTensorInfo();
3832  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3833 
3834  bool isSupported = false;
3835  armnn::BackendId setBackend;
3836  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3837  {
3838  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3840  data.m_Backends,
3841  isSupported,
3842  setBackend,
3843  inputInfo,
3844  alphaInfo,
3845  outputInfo);
3846  };
3847 
3848  if(IsDynamicTensor(outputInfo))
3849  {
3850  isSupported = AreDynamicTensorsSupported();
3851  }
3852  else
3853  {
3854  validateFunc(outputInfo, isSupported);
3855  }
3856 
3857  if (!isSupported)
3858  {
3859  return false;
3860  }
3861 
3862  IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
3863  layer->SetBackendId(setBackend);
3864 
3865  if (!layer)
3866  {
3867  return Fail("%s: AddPreluLayer failed", __func__);
3868  }
3869 
3870  bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
3871  if (!isReshapeSupported)
3872  {
3873  return false;
3874  }
3875 
3876  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3877 }
3878 
3879 bool Converter::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
3880 {
3881  VLOG(DRIVER) << "Converter::ConvertQuantize()";
3882 
3883  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3884  if (!input.IsValid())
3885  {
3886  return Fail("%s: Operation has invalid input", __func__);
3887  }
3888 
3889  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
3890  if (!outputOperand)
3891  {
3892  return Fail("%s: Operation has invalid outputs", __func__);
3893  }
3894 
3895  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3896 
3897  bool isSupported = false;
3898  armnn::BackendId setBackend;
3899  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3900  {
3901  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3902  IsQuantizeSupported,
3903  data.m_Backends,
3904  isSupported,
3905  setBackend,
3906  input.GetTensorInfo(),
3907  outputInfo);
3908  };
3909 
3910  if(IsDynamicTensor(outputInfo))
3911  {
3912  isSupported = AreDynamicTensorsSupported();
3913  }
3914  else
3915  {
3916  validateFunc(outputInfo, isSupported);
3917  }
3918 
3919  if (!isSupported)
3920  {
3921  return false;
3922  }
3923 
3924  IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
3925  layer->SetBackendId(setBackend);
3926  assert(layer != nullptr);
3927  input.Connect(layer->GetInputSlot(0));
3928 
3929  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3930 }
3931 
3932 bool Converter::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
3933 {
3934  VLOG(DRIVER) << "Converter::ConvertQuantizedLstm()";
3935 
3936  VLOG(DRIVER) << "ConvertQuantizedLstm()";
3937 
3938  //Inputs:
3939  // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
3940  // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
3941  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3942  if (!input.IsValid())
3943  {
3944  return Fail("%s: Could not read input 0: input", __func__);
3945  }
3946 
3947  // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, of shape [batch_size, output_size].
3948  LayerInputHandle outputStatePrevTimeStep = ConvertToLayerInputHandle(operation, 18, model, data);
3949  if (!outputStatePrevTimeStep.IsValid())
3950  {
3951  return Fail("%s: Could not read input 18: outputStatePrevTimeStep", __func__);
3952  }
3953 
3954  // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
3955  LayerInputHandle cellStatePrevTimeStep = ConvertToLayerInputHandle(operation, 19, model, data);
3956  if (!cellStatePrevTimeStep.IsValid())
3957  {
3958  return Fail("%s: Could not read input 19: cellStatePrevTimeStep", __func__);
3959  }
3960 
3961  // Get the mandatory input tensors:
3962 
3963  // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3964  // [num_units, input_size].
3965  const ConstTensorPin inputToForgetWeightsPin =
3966  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
3967 
3968  // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3969  // [num_units, input_size].
3970  const ConstTensorPin inputToCellWeightsPin =
3971  ConvertOperationInputToConstTensorPin(operation, 3, model, data);
3972 
3973  // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3974  // [num_units, input_size].
3975  const ConstTensorPin inputToOutputWeightsPin =
3976  ConvertOperationInputToConstTensorPin(operation, 4, model, data);
3977 
3978  // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3979  // [num_units, output_size].
3980  const ConstTensorPin recurrentToForgetWeightsPin =
3981  ConvertOperationInputToConstTensorPin(operation, 6, model, data);
3982 
3983  // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3984  // [num_units, output_size].
3985  const ConstTensorPin recurrentToCellWeightsPin =
3986  ConvertOperationInputToConstTensorPin(operation, 7, model, data);
3987 
3988  // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3989  // [num_units, output_size].
3990  const ConstTensorPin recurrentToOutputWeightsPin =
3991  ConvertOperationInputToConstTensorPin(operation, 8, model, data);
3992 
3993  // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3994  const ConstTensorPin forgetGateBiasPin =
3995  ConvertOperationInputToConstTensorPin(operation, 13, model, data);
3996 
3997  // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3998  const ConstTensorPin cellBiasPin =
3999  ConvertOperationInputToConstTensorPin(operation, 14, model, data);
4000 
4001  // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
4002  const ConstTensorPin outputGateBiasPin =
4003  ConvertOperationInputToConstTensorPin(operation, 15, model, data);
4004 
4005  if (!inputToForgetWeightsPin.IsValid() ||
4006  !inputToCellWeightsPin.IsValid() ||
4007  !inputToOutputWeightsPin.IsValid() ||
4008  !recurrentToForgetWeightsPin.IsValid() ||
4009  !recurrentToCellWeightsPin.IsValid() ||
4010  !recurrentToOutputWeightsPin.IsValid() ||
4011  !forgetGateBiasPin.IsValid() ||
4012  !cellBiasPin.IsValid() ||
4013  !outputGateBiasPin.IsValid())
4014  {
4015  return Fail("%s: Operation has invalid tensor inputs", __func__);
4016  }
4017 
4018  // Get the optional input tensors:
4019 
4020  // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4021  // [num_units, input_size], where “num_units” corresponds to the number of cell units.
4022  const ConstTensorPin inputToInputWeightsPin =
4024  1,
4025  model,
4026  data,
4027  g_DontPermute,
4028  nullptr,
4029  true);
4030 
4031  // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4032  // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
4033  // “num_units”), or the second dimension of the “projection_weights”, if defined.
4034  const ConstTensorPin recurrentToInputWeightsPin =
4036  5,
4037  model,
4038  data,
4039  g_DontPermute,
4040  nullptr,
4041  true);
4042 
4043  // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4044  // [num_units].
4045  const ConstTensorPin cellToInputWeightsPin =
4047  9,
4048  model,
4049  data,
4050  g_DontPermute,
4051  nullptr,
4052  true);
4053 
4054  // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4055  // [num_units].
4056  const ConstTensorPin cellToForgetWeightsPin =
4058  10,
4059  model,
4060  data,
4061  g_DontPermute,
4062  nullptr,
4063  true);
4064 
4065  // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4066  // [num_units].
4067  const ConstTensorPin cellToOutputWeightsPin =
4069  11,
4070  model,
4071  data,
4072  g_DontPermute,
4073  nullptr,
4074  true);
4075 
4076  // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
4077  const ConstTensorPin inputGateBiasPin =
4079  12,
4080  model,
4081  data,
4082  g_DontPermute,
4083  nullptr,
4084  true);
4085 
4086  // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4087  // [output_size, num_units].
4088  const ConstTensorPin projectionWeightsPin =
4090  16,
4091  model,
4092  data,
4093  g_DontPermute,
4094  nullptr,
4095  true);
4096 
4097  // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [output_size].
4098  const ConstTensorPin projectionBiasPin =
4100  17,
4101  model,
4102  data,
4103  g_DontPermute,
4104  nullptr,
4105  true);
4106 
4107  if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional())
4108  || (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional())
4109  || (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional())
4110  || (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional())
4111  || (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional())
4112  || (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional())
4113  || (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional())
4114  || (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
4115  {
4116  return Fail("%s: Operation has invalid tensor inputs", __func__);
4117  }
4118 
4119 
4120  // Get the optional normalization tensors
4121 
4122  // 20: The input layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
4123  // Used to rescale normalized inputs to activation at input gate.
4124  const ConstTensorPin inputLayerNormWeightsPin =
4126  20,
4127  model,
4128  data,
4129  g_DontPermute,
4130  nullptr,
4131  true);
4132 
4133  // 21: The forget layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM
4134  // Used to rescale normalized inputs to activation at forget gate.
4135  const ConstTensorPin forgetLayerNormWeightsPin =
4137  21,
4138  model,
4139  data,
4140  g_DontPermute,
4141  nullptr,
4142  true);
4143 
4144  // 22: The cell layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
4145  // Used to rescale normalized inputs to activation at cell gate.
4146  const ConstTensorPin cellLayerNormWeightsPin =
4148  22,
4149  model,
4150  data,
4151  g_DontPermute,
4152  nullptr,
4153  true);
4154 
4155  // 23: The output layer normalization weights. A 1-D tensor of shape [num_units].
4156  // Used to rescale normalized inputs to activation at output gate.
4157  const ConstTensorPin outputLayerNormWeightsPin =
4159  23,
4160  model,
4161  data,
4162  g_DontPermute,
4163  nullptr,
4164  true);
4165 
4166  if ((!inputLayerNormWeightsPin.IsValid() && !inputLayerNormWeightsPin.IsOptional())
4167  || (!forgetLayerNormWeightsPin.IsValid() && !forgetLayerNormWeightsPin.IsOptional())
4168  || (!cellLayerNormWeightsPin.IsValid() && !cellLayerNormWeightsPin.IsOptional())
4169  || (!outputLayerNormWeightsPin.IsValid() && !outputLayerNormWeightsPin.IsOptional()))
4170  {
4171  return Fail("%s: Operation has invalid tensor inputs", __func__);
4172  }
4173 
4174  // Get the optional input scalars:
4175  // 24: The cell clip: If provided the cell state is clipped by this value prior to the cell output activation.
4176  // 25: The projection clip: If provided and projection is enabled, this is used for clipping the projected values.
4177 
4178  // Get the mandatory input scalars:
4179  // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
4180  // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
4181  // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
4182  // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
4183  // 30: The zero point of the hidden state, i.e. input to projection.
4184  // 31: The scale of the hidden state, i.e. input to projection.
4185  float cellClip, projClip, matMulInputGate, matMulForgetGate, matMulCellGate, matMulOutputGate, projInputScale;
4186  int projInputZeroPoint;
4187 
4188  if (!GetInputScalar(operation, 24, OperandType::FLOAT32, cellClip, model, data, true) ||
4189  !GetInputScalar(operation, 25, OperandType::FLOAT32, projClip, model, data, true) ||
4190  !GetInputScalar(operation, 26, OperandType::FLOAT32, matMulInputGate, model, data) ||
4191  !GetInputScalar(operation, 27, OperandType::FLOAT32, matMulForgetGate, model, data) ||
4192  !GetInputScalar(operation, 28, OperandType::FLOAT32, matMulCellGate, model, data) ||
4193  !GetInputScalar(operation, 29, OperandType::FLOAT32, matMulOutputGate, model, data) ||
4194  !GetInputScalar(operation, 30, OperandType::INT32, projInputZeroPoint, model, data) ||
4195  !GetInputScalar(operation, 31, OperandType::FLOAT32, projInputScale, model, data))
4196  {
4197  return Fail("%s: Operation has invalid scalar inputs", __func__);
4198  }
4199 
4200  // Outputs:
4201  // 0: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size,
4202  // output_size].
4203  const Operand* outputStateOut = GetOutputOperand(operation, 0, model);
4204  if (!outputStateOut)
4205  {
4206  return Fail("%s: Could not read output 0: outputStateOut", __func__);
4207  }
4208 
4209  // 1: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
4210  const Operand* cellStateOut = GetOutputOperand(operation, 1, model);
4211  if (!cellStateOut)
4212  {
4213  return Fail("%s: Could not read output 1: cellStateOut", __func__);
4214  }
4215 
4216  // 2: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size, output_size].
4217  // This is effectively the same as the current “output state (out)” value.
4218  const Operand* output = GetOutputOperand(operation, 2, model);
4219  if (!output)
4220  {
4221  return Fail("%s: Could not read output 2: output", __func__);
4222  }
4223 
4224  // set the params structure for the AddLstmLayer call
4225  LstmInputParams params;
4226  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
4227  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
4228  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
4229  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
4230  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
4231  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
4232  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
4233  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4234  params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
4235  params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
4236  params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
4237  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4238  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4239  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4240  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4241  params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
4242  params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
4243  params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
4244  params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
4245  params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
4246  params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
4247 
4248  // set the layer descriptor
4249  QLstmDescriptor desc;
4250  desc.m_CellClip = cellClip;
4251  desc.m_ProjectionClip = projClip;
4252  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
4253  params.m_RecurrentToInputWeights == nullptr ||
4254  params.m_InputGateBias == nullptr);
4255  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
4256  params.m_CellToOutputWeights != nullptr);
4257  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
4258  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
4259  params.m_ForgetLayerNormWeights != nullptr ||
4260  params.m_CellLayerNormWeights != nullptr ||
4261  params.m_OutputLayerNormWeights != nullptr);
4262  desc.m_InputIntermediateScale = matMulInputGate;
4263  desc.m_ForgetIntermediateScale = matMulForgetGate;
4264  desc.m_CellIntermediateScale = matMulCellGate;
4265  desc.m_OutputIntermediateScale = matMulOutputGate;
4266  desc.m_HiddenStateScale = projInputScale;
4267  desc.m_HiddenStateZeroPoint = projInputZeroPoint;
4268 
4269  // validate the optional input groups
4270  if (desc.m_CifgEnabled &&
4271  (params.m_InputToInputWeights != nullptr ||
4272  params.m_RecurrentToInputWeights != nullptr ||
4273  params.m_InputGateBias != nullptr))
4274  {
4275  return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
4276  " and input gate bias must be provided", __func__);
4277  }
4278 
4279  if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
4280  {
4281  return Fail("%s: projection bias should not be provided without projection weights", __func__);
4282  }
4283 
4284  if (desc.m_PeepholeEnabled &&
4285  (params.m_CellToForgetWeights == nullptr ||
4286  params.m_CellToOutputWeights == nullptr ||
4287  (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
4288  {
4289  return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
4290  " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
4291  }
4292 
4293  if (desc.m_LayerNormEnabled &&
4294  (params.m_ForgetLayerNormWeights == nullptr ||
4295  params.m_CellLayerNormWeights == nullptr ||
4296  params.m_OutputLayerNormWeights == nullptr ||
4297  (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
4298  {
4299  return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
4300  " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
4301  }
4302 
4303  // Basic parameters
4304  LstmInputParamsInfo paramsInfo;
4305  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4306  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4307  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4309  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4311  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4312  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4313  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4314 
4315  // Inputs
4316  const TensorInfo& inputInfo = input.GetTensorInfo();
4317  const TensorInfo& outputStatePrevTimeStepInfo = outputStatePrevTimeStep.GetTensorInfo();
4318  const TensorInfo& cellStatePrevTimeStepInfo = cellStatePrevTimeStep.GetTensorInfo();
4319 
4320  // Outputs
4321  TensorInfo outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
4322  TensorInfo outputInfo = GetTensorInfoForOperand(*output);
4323  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4324 
4325  // Optional parameters
4326  if (!desc.m_CifgEnabled)
4327  {
4328  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4330  if (desc.m_PeepholeEnabled)
4331  {
4332  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
4333  }
4334  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4335  }
4336 
4337 
4338  if (desc.m_ProjectionEnabled)
4339  {
4340  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
4341  if (params.m_ProjectionBias != nullptr)
4342  {
4343  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
4344  }
4345  }
4346  else
4347  {
4348  // If Projection is disabled, override non-const outputs to change the quant info with hidden params, then
4349  // create a new const TensorInfo based on this
4350  outputStateOutInfo.SetQuantizationScale(projInputScale);
4351  outputStateOutInfo.SetQuantizationOffset(projInputZeroPoint);
4352  outputInfo.SetQuantizationScale(projInputScale);
4353  outputInfo.SetQuantizationOffset(projInputZeroPoint);
4354  }
4355 
4356  const TensorInfo constOutputStateOutInfo(outputStateOutInfo);
4357  const TensorInfo constOutputInfo(outputInfo);
4358 
4359  if (desc.m_PeepholeEnabled)
4360  {
4361  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
4362  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
4363  }
4364 
4365  if (desc.m_LayerNormEnabled)
4366  {
4367  if(!desc.m_CifgEnabled)
4368  {
4369  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
4370  }
4371  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
4372  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
4373  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
4374  }
4375 
4376  // Check if the layer is supported
4377  bool isSupported = false;
4378  armnn::BackendId setBackend;
4379  auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
4380  {
4381  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4382  IsQLstmSupported,
4383  data.m_Backends,
4384  isSupported,
4385  setBackend,
4386  inputInfo,
4387  outputStatePrevTimeStepInfo,
4388  cellStatePrevTimeStepInfo,
4389  constOutputStateOutInfo,
4390  cellStateOutInfo,
4391  constOutputInfo,
4392  desc,
4393  paramsInfo);
4394  };
4395 
4396  bool isDynamic = false;
4397  if (!IsDynamicTensor(constOutputStateOutInfo) &&
4398  !IsDynamicTensor(cellStateOutInfo) &&
4399  !IsDynamicTensor(constOutputInfo))
4400  {
4401  validateFunc(outputInfo, isSupported);
4402  }
4403  else
4404  {
4405  isDynamic = true;
4406  isSupported = AreDynamicTensorsSupported();
4407  }
4408 
4409  if (!isSupported)
4410  {
4411  return false;
4412  }
4413 
4414  // Add the layer
4415  IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
4416  layer->SetBackendId(setBackend);
4417 
4418  input.Connect(layer->GetInputSlot(0));
4419  outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
4420  cellStatePrevTimeStep.Connect(layer->GetInputSlot(2));
4421 
4422  if (!isDynamic)
4423  {
4425  operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4426  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
4427  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4428  }
4429  else
4430  {
4432  operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4434  operation, 1, *layer, 1, model, data, nullptr, validateFunc,
4435  ActivationFn::kActivationNone, true) &&
4436  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4437  }
4438 }
4439 
4440 bool Converter::ConvertQuantized16BitLstm(const Operation& operation, const Model& model, ConversionData& data)
4441 {
4442  VLOG(DRIVER) << "Converter::ConvertQuantized16BitLstm()";
4443  VLOG(DRIVER) << "Policy::ConvertQuantized16BitLstm()";
4444 
4445  //Inputs:
4446  // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
4447  // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
4448  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4449  if (!input.IsValid())
4450  {
4451  return Fail("%s: Could not read input 0: input", __func__);
4452  }
4453 
4454  //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
4455  // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
4456  // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
4457  LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle(operation, 13, model, data);
4458  if (!previousCellStateIn.IsValid())
4459  {
4460  return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
4461  }
4462 
4463  // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4464  // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
4465  // is quantized with a fixed quantization range of -1, 127/128.
4466  LayerInputHandle previousOutputIn = ConvertToLayerInputHandle(operation, 14, model, data);
4467  if (!previousOutputIn.IsValid())
4468  {
4469  return Fail("%s: Could not read input 14: previousOutputIn", __func__);
4470  }
4471 
4472  // Get the input tensors:
4473  // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4474  // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
4475  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4476  const ConstTensorPin inputToInputWeightsPin =
4477  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
4478 
4479  // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4480  // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
4481  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4482  const ConstTensorPin inputToForgetWeightsPin =
4483  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
4484 
4485  // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4486  // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
4487  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4488  const ConstTensorPin inputToCellWeightsPin =
4489  ConvertOperationInputToConstTensorPin(operation, 3, model, data);
4490 
4491  // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4492  // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
4493  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4494  const ConstTensorPin inputToOutputWeightsPin =
4495  ConvertOperationInputToConstTensorPin(operation, 4, model, data);
4496 
4497  // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4498  // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
4499  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4500  const ConstTensorPin recurrentToInputWeightsPin =
4501  ConvertOperationInputToConstTensorPin(operation, 5, model, data);
4502 
4503  // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4504  // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
4505  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4506  const ConstTensorPin recurrentToForgetWeightsPin =
4507  ConvertOperationInputToConstTensorPin(operation, 6, model, data);
4508 
4509  // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4510  // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
4511  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4512  const ConstTensorPin recurrentToCellWeightsPin =
4513  ConvertOperationInputToConstTensorPin(operation, 7, model, data);
4514 
4515  // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4516  // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
4517  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4518  const ConstTensorPin recurrentToOutputWeightsPin =
4519  ConvertOperationInputToConstTensorPin(operation, 8, model, data);
4520 
4521  // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
4522  // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4523  // of input and weights scales and zeroPoint equal to 0.
4524  const ConstTensorPin inputGateBiasPin =
4525  ConvertOperationInputToConstTensorPin(operation, 9, model, data);
4526 
4527  // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4528  // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4529  // of input and weights scales and zeroPoint equal to 0.
4530  const ConstTensorPin forgetGateBiasPin =
4531  ConvertOperationInputToConstTensorPin(operation, 10, model, data);
4532 
4533  // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
4534  // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
4535  // and weights scales and zeroPoint equal to 0.
4536  const ConstTensorPin cellBiasPin =
4537  ConvertOperationInputToConstTensorPin(operation, 11, model, data);
4538 
4539  // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4540  // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4541  // of input and weights scales and zeroPoint equal to 0.
4542  const ConstTensorPin outputGateBiasPin =
4543  ConvertOperationInputToConstTensorPin(operation, 12, model, data);
4544 
4545  if (!inputToInputWeightsPin.IsValid() ||
4546  !inputToForgetWeightsPin.IsValid() ||
4547  !inputToCellWeightsPin.IsValid() ||
4548  !inputToOutputWeightsPin.IsValid() ||
4549  !recurrentToInputWeightsPin.IsValid() ||
4550  !recurrentToForgetWeightsPin.IsValid() ||
4551  !recurrentToCellWeightsPin.IsValid() ||
4552  !recurrentToOutputWeightsPin.IsValid() ||
4553  !inputGateBiasPin.IsValid() ||
4554  !forgetGateBiasPin.IsValid() ||
4555  !cellBiasPin.IsValid() ||
4556  !outputGateBiasPin.IsValid())
4557  {
4558  return Fail("%s: Operation has invalid tensor inputs", __func__);
4559  }
4560 
4561  // Outputs:
4562  // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
4563  // which contains a cell state from the current time step. Tensor is quantized using a quantization range
4564  // of -2^4, 2^4 * 32767/32768.
4565  const Operand* cellStateOut = GetOutputOperand(operation, 0, model);
4566  if (!cellStateOut)
4567  {
4568  return Fail("%s: Could not read output 0: cellStateOut", __func__);
4569  }
4570 
4571  // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
4572  // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
4573  const Operand* output = GetOutputOperand(operation, 1, model);
4574  if (!output)
4575  {
4576  return Fail("%s: Could not read output 1: output", __func__);
4577  }
4578 
4579  // Inputs
4580  const TensorInfo& inputInfo = input.GetTensorInfo();
4581  const TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
4582  const TensorInfo& previousOutputInInfo = previousOutputIn.GetTensorInfo();
4583 
4584  // Outputs
4585  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4586  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4587 
4588  // Dynamic tensors currently not supported
4589  if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
4590  {
4591  return Fail("%s: Dynamic output tensors are not supported", __func__);
4592  }
4593 
4594  QuantizedLstmInputParams params;
4595 
4596  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
4597  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
4598  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
4599  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
4600  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
4601  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
4602  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
4603  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4604  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4605  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4606  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4607  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4608 
4609  QuantizedLstmInputParamsInfo paramsInfo;
4610  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4611  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4612  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4613  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4616  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4618  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4619  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4620  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4621  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4622 
4623  bool isSupported = false;
4624  armnn::BackendId setBackend;
4625  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4626  {
4627  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4629  data.m_Backends,
4630  isSupported,
4631  setBackend,
4632  inputInfo,
4633  previousCellStateInInfo,
4634  previousOutputInInfo,
4635  cellStateOutInfo,
4636  outputInfo,
4637  paramsInfo);
4638  };
4639 
4640  bool isDynamic = false;
4641  if (!IsDynamicTensor(cellStateOutInfo) &&
4642  !IsDynamicTensor(outputInfo))
4643  {
4644  validateFunc(outputInfo, isSupported);
4645  }
4646  else
4647  {
4648  isDynamic = true;
4649  isSupported = AreDynamicTensorsSupported();
4650  }
4651 
4652  if (!isSupported)
4653  {
4654  return false;
4655  }
4656 
4657  IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
4658  layer->SetBackendId(setBackend);
4659  input.Connect(layer->GetInputSlot(0));
4660  previousCellStateIn.Connect(layer->GetInputSlot(1));
4661  previousOutputIn.Connect(layer->GetInputSlot(2));
4662 
4663  if (!isDynamic)
4664  {
4665  return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4666  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data));
4667  }
4668  else
4669  {
4670  return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4672  operation, 1, *layer, 1, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
4673  }
4674 
4675 }
4676 
4677 bool Converter::ConvertRank(const Operation& operation, const Model& model, ConversionData& data)
4678 {
4679  VLOG(DRIVER) << "Converter::ConvertRank()";
4680 
4681  const Operand* inputOperand = GetInputOperand(operation, 0, model);
4682  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4683 
4684  if (inputOperand == nullptr || outputOperand == nullptr)
4685  {
4686  return Fail("%s: Operation has invalid inputs", __func__);
4687  }
4688 
4689  const Shape inputOperandShape = GetOperandShape(*inputOperand);
4690  const Shape outputOperandShape = GetOperandShape(*outputOperand);
4691 
4692  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4693  if (!input.IsValid())
4694  {
4695  return Fail("%s: Could not read input 0", __func__);
4696  }
4697 
4698  armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
4699  if (IsDynamicTensor(outInfo))
4700  {
4701  return Fail("%s: Dynamic output tensors are not supported", __func__);
4702  }
4703 
4704  bool isSupported = false;
4705  armnn::BackendId setBackend;
4706  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4707  IsRankSupported,
4708  data.m_Backends,
4709  isSupported,
4710  setBackend,
4711  input.GetTensorInfo(),
4712  outInfo);
4713  if (!isSupported)
4714  {
4715  return false;
4716  }
4717 
4718  armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
4719  layer->SetBackendId(setBackend);
4720  assert(layer != nullptr);
4721  input.Connect(layer->GetInputSlot(0));
4722 
4723  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, &outInfo);
4724 }
4725 
4726 bool Converter::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
4727 {
4728  VLOG(DRIVER) << "Converter::ConvertReLu()";
4731 
4732 
4733  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4734  if (!input.IsValid())
4735  {
4736  return Fail("%s: Input 0 is invalid", "operationName");
4737  }
4738 
4739  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4740  if (!outputOperand)
4741  {
4742  return false;
4743  }
4744 
4745  const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
4746 
4747  bool isSupported = false;
4748  armnn::BackendId setBackend;
4749  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
4750  {
4751  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4753  data.m_Backends,
4754  isSupported,
4755  setBackend,
4756  input.GetTensorInfo(),
4757  outInfo,
4758  desc);
4759  };
4760 
4761  if(IsDynamicTensor(outInfo))
4762  {
4763  isSupported = AreDynamicTensorsSupported();
4764  }
4765  else
4766  {
4767  validateFunc(outInfo, isSupported);
4768  }
4769 
4770  if (!isSupported)
4771  {
4772  return false;
4773  }
4774 
4775  armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
4776  layer->SetBackendId(setBackend);
4777  ARMNN_ASSERT(layer != nullptr);
4778  input.Connect(layer->GetInputSlot(0));
4779 
4780  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4781 }
4782 
4783 bool Converter::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
4784 {
4785  VLOG(DRIVER) << "Converter::ConvertReLu1()";
4788  desc.m_A = 1.0f;
4789  desc.m_B = -1.0f;
4790 
4791  return ConvertToActivation(operation, __func__, desc, model, data);
4792 }
4793 
4794 bool Converter::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
4795 {
4796  VLOG(DRIVER) << "Converter::ConvertReLu6()";
4799  desc.m_A = 6.0f;
4800 
4801  return ConvertToActivation(operation, __func__, desc, model, data);
4802 }
4803 
4804 bool Converter::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
4805 {
4806  VLOG(DRIVER) << "Converter::ConvertReshape()";
4807 
4808  const Operand* inputOperand = GetInputOperand(operation, 0, model);
4809  const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
4810  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4811 
4812  if (inputOperand == nullptr
4813  || requestedShapeOperand == nullptr
4814  || outputOperand == nullptr)
4815  {
4816  return Fail("%s: Operation has invalid inputs", __func__);
4817  }
4818 
4819  if (requestedShapeOperand->dimensions.size() != 1)
4820  {
4821  return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
4822  __func__, requestedShapeOperand->dimensions.size());
4823  }
4824 
4825  std::vector<int32_t> targetDimensions;
4826  if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
4827  {
4828  return Fail("%s: Could not read values of input 1", __func__);
4829  }
4830 
4831  const Shape inputOperandShape = GetOperandShape(*inputOperand);
4832 
4833  Shape requestedShape;
4834  // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
4835  // function that resolves these values into a fully specified tensor shape.
4836  if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
4837  {
4838  return Fail("%s: Failed to resolve the requested shape", __func__);
4839  }
4840 
4841  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4842  if (!input.IsValid())
4843  {
4844  return Fail("%s: Could not read input 0", __func__);
4845  }
4846 
4847  armnn::ReshapeDescriptor reshapeDescriptor;
4848  reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
4849  requestedShape.dimensions.data());
4850 
4851  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
4852 
4853  bool isSupported = false;
4854  armnn::BackendId setBackend;
4855  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4856  {
4857  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4859  data.m_Backends,
4860  isSupported,
4861  setBackend,
4862  input.GetTensorInfo(),
4863  outputInfo,
4864  reshapeDescriptor);
4865  };
4866 
4867  if(!IsDynamicTensor(outputInfo))
4868  {
4869  validateFunc(outputInfo, isSupported);
4870  }
4871  else
4872  {
4873  isSupported = AreDynamicTensorsSupported();
4874  }
4875 
4876  if (!isSupported)
4877  {
4878  return false;
4879  }
4880 
4881  armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
4882  layer->SetBackendId(setBackend);
4883  assert(layer != nullptr);
4884  input.Connect(layer->GetInputSlot(0));
4885 
4886  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4887 }
4888 
4889 bool Converter::ConvertResize(const Operation& operation,
4890  const Model& model,
4891  ConversionData& data,
4892  ResizeMethod resizeMethod)
4893 {
4894  VLOG(DRIVER) << "Converter::ConvertResize()";
4895  VLOG(DRIVER) << "resizeMethod = " << GetResizeMethodAsCString(resizeMethod);
4896 
4897  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4898  if (!input.IsValid())
4899  {
4900  return Fail("%s: Could not read input 0", __func__);
4901  }
4902 
4903  const Operand* output = GetOutputOperand(operation, 0, model);
4904  if (!output)
4905  {
4906  return Fail("%s: Could not read output 0", __func__);
4907  }
4908 
4909  const TensorInfo& inputInfo = input.GetTensorInfo();
4910  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4911 
4912  ResizeDescriptor descriptor;
4913  descriptor.m_Method = resizeMethod;
4914  descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
4915 
4916  OperandType operandType1;
4917  OperandType operandType2;
4918 
4919  if (!GetOperandType(operation, 1, model, operandType1) ||
4920  !GetOperandType(operation, 2, model, operandType2))
4921  {
4922  return Fail("%s: Operation has invalid inputs", __func__);
4923  }
4924 
4925  if (operandType1 != operandType2)
4926  {
4927  return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
4928  }
4929 
4930  if (operandType1 == OperandType::INT32)
4931  {
4932  // Case 1: resizing by shape
4933  int32_t targetWidth = 0;
4934  int32_t targetHeight = 0;
4935 
4936  if (!GetInputInt32(operation, 1, targetWidth, model, data) ||
4937  !GetInputInt32(operation, 2, targetHeight, model, data))
4938  {
4939  return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
4940  }
4941 
4942  if (targetWidth < 0 || targetHeight < 0)
4943  {
4944  return Fail("%s: Operation has invalid inputs for resizing by shape. "
4945  "Target width/height cannot be < 0", __func__);
4946  }
4947 
4948  descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
4949  descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
4950  }
4951  else if (operandType1 == OperandType::FLOAT32)
4952  {
4953  // Case 2: resizing by scale
4954  float widthScale = 1.0f;
4955  float heightScale = 1.0f;
4956 
4957  if (!GetInputFloat32(operation, 1, widthScale, model, data) ||
4958  !GetInputFloat32(operation, 2, heightScale, model, data))
4959  {
4960  return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4961  }
4962 
4963  const TensorShape& inputShape = inputInfo.GetShape();
4964  armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4965 
4966  float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
4967  float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
4968 
4969  descriptor.m_TargetWidth = std::floor(width * widthScale);
4970  descriptor.m_TargetHeight = std::floor(height * heightScale);
4971  }
4972  else if (operandType1 == OperandType::FLOAT16)
4973  {
4974  Half widthScale;
4975  Half heightScale;
4976 
4977  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, widthScale, model, data) ||
4978  !GetInputScalar(operation, 2, OperandType::FLOAT16, heightScale, model, data))
4979  {
4980  return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4981  }
4982 
4983  const TensorShape& inputShape = inputInfo.GetShape();
4984  armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4985 
4986  Half width = static_cast<Half>(inputShape[dataLayoutIndexed.GetWidthIndex()]);
4987  Half height = static_cast<Half>(inputShape[dataLayoutIndexed.GetHeightIndex()]);
4988 
4989  descriptor.m_TargetWidth = std::floor(width * widthScale);
4990  descriptor.m_TargetHeight = std::floor(height * heightScale);
4991  }
4992  else
4993  {
4994  return Fail("%s: Operand has invalid data type for resizing by scale", __func__);
4995  }
4996 
4997  descriptor.m_AlignCorners = GetOptionalBool(operation, 4, model, data);
4998  descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data);
4999 
5000  bool isSupported = false;
5001  armnn::BackendId setBackend;
5002  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5003  {
5004  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5006  data.m_Backends,
5007  isSupported,
5008  setBackend,
5009  inputInfo,
5010  outputInfo,
5011  descriptor);
5012  };
5013 
5014  if(IsDynamicTensor(outputInfo))
5015  {
5016  isSupported = AreDynamicTensorsSupported();
5017  }
5018  else
5019  {
5020  validateFunc(outputInfo, isSupported);
5021  }
5022 
5023  if (!isSupported)
5024  {
5025  return false;
5026  }
5027 
5028  IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
5029  layer->SetBackendId(setBackend);
5030  assert(layer != nullptr);
5031  input.Connect(layer->GetInputSlot(0));
5032 
5033  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5034 }
5035 
5036 bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
5037 {
5038  VLOG(DRIVER) << "Converter::ConvertSpaceToBatchNd()";
5039 
5040  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5041  if(!input.IsValid())
5042  {
5043  return Fail("%s: Operation has invalid inputs", __func__);
5044  }
5045 
5046  const armnn::TensorInfo &inputInfo = input.GetTensorInfo();
5047  unsigned int rank = inputInfo.GetNumDimensions();
5048  unsigned int spatialDim = rank - 2;
5049 
5050  if(rank != 4)
5051  {
5052  Fail("%s: Only inputs with rank 4 are supported", __func__);
5053  }
5054 
5055  const Operand *output = GetOutputOperand(operation, 0, model);
5056  if(!output)
5057  {
5058  return Fail("%s: Could not read output 0", __func__);
5059  }
5060 
5061  const armnn::TensorInfo &outputInfo = GetTensorInfoForOperand(*output);
5062 
5063  const Operand *blockShapeOperand = GetInputOperand(operation, 1, model);
5064  const Operand *paddingsOperand = GetInputOperand(operation, 2, model);
5065 
5066  armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
5067  if(blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
5068  {
5069  return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
5070  }
5071 
5072  std::vector<int32_t> blockShape;
5073  if(!GetTensorInt32Values(*blockShapeOperand, blockShape, model, data))
5074  {
5075  return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
5076  }
5077  if(std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i)
5078  { return i < 1; }))
5079  {
5080  return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
5081  }
5082 
5083  armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
5084  if(paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
5085  {
5086  return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
5087  }
5088 
5089  std::vector<std::pair<unsigned int, unsigned int>> paddingList;
5090  std::vector<int32_t> paddings;
5091  if(!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
5092  {
5093  return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
5094  }
5095  for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
5096  {
5097  int paddingBeforeInput = paddings[i];
5098  int paddingAfterInput = paddings[i + 1];
5099  if(paddingBeforeInput < 0 || paddingAfterInput < 0)
5100  {
5101  return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
5102  }
5103 
5104  paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
5105  }
5106 
5109  descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
5110  descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
5111 
5112  if(Is12OrLaterOperand(*output))
5113  {
5114  descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
5115  }
5116 
5117  bool isSupported = false;
5118  armnn::BackendId setBackend;
5119  auto validateFunc = [&](const armnn::TensorInfo &outputInfo, bool &isSupported)
5120  {
5121  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5123  data.m_Backends,
5124  isSupported,
5125  setBackend,
5126  inputInfo,
5127  outputInfo,
5128  descriptor);
5129  };
5130 
5131  if(IsDynamicTensor(outputInfo))
5132  {
5133  isSupported = AreDynamicTensorsSupported();
5134  } else
5135  {
5136  validateFunc(outputInfo, isSupported);
5137  }
5138 
5139  if(!isSupported)
5140  {
5141  return false;
5142  }
5143 
5144  armnn::IConnectableLayer *const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
5145  layer->SetBackendId(setBackend);
5146  assert(layer != nullptr);
5147  input.Connect(layer->GetInputSlot(0));
5148 
5149  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5150 }
5151 
5152 bool Converter::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
5153 {
5154  VLOG(DRIVER) << "Converter::ConvertSpaceToDepth()";
5155 
5156  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5157  if (!input.IsValid() )
5158  {
5159  return Fail("%s: Operation has invalid inputs", __func__);
5160  }
5161 
5162  const TensorInfo& inputInfo = input.GetTensorInfo();
5163  unsigned int rank = inputInfo.GetNumDimensions();
5164  if (rank != 4)
5165  {
5166  return Fail("%s: Only inputs with rank 4 are supported", __func__);
5167  }
5168 
5169  const Operand* output = GetOutputOperand(operation, 0, model);
5170  if (!output)
5171  {
5172  return Fail("%s: Could not read output 0", __func__);
5173  }
5174 
5175  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5176 
5178 
5179  GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
5180 
5181  if (desc.m_BlockSize <= 1)
5182  {
5183  return Fail("%s: Block size must be at least 1 in all dimensions");
5184  }
5185 
5186  desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
5187 
5188  bool isSupported = false;
5189  armnn::BackendId setBackend;
5190  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5191  {
5192  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5194  data.m_Backends,
5195  isSupported,
5196  setBackend,
5197  inputInfo,
5198  outputInfo,
5199  desc);
5200  };
5201 
5202  if(IsDynamicTensor(outputInfo))
5203  {
5204  isSupported = AreDynamicTensorsSupported();
5205  }
5206  else
5207  {
5208  validateFunc(outputInfo, isSupported);
5209  }
5210 
5211  if (!isSupported)
5212  {
5213  return false;
5214  }
5215 
5216  IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
5217  layer->SetBackendId(setBackend);
5218  assert(layer != nullptr);
5219  input.Connect(layer->GetInputSlot(0));
5220 
5221  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5222 }
5223 
5224 bool Converter::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
5225 {
5226  VLOG(DRIVER) << "Converter::ConvertSoftmax()";
5227 
5228  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5229  if (!input.IsValid())
5230  {
5231  return Fail("%s: Operation has invalid inputs", __func__);
5232  }
5233 
5234  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
5235  if (!outputOperand)
5236  {
5237  return Fail("%s: Operation has no outputs", __func__);
5238  }
5239 
5240  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
5241 
5242  SoftmaxDescriptor desc;
5243  OperandType outputType = outputOperand->type;
5244 
5245  // Read beta value
5246  if (outputType == OperandType::TENSOR_FLOAT16)
5247  {
5248  Half value;
5249 
5250  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
5251  {
5252  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5253  }
5254 
5255  desc.m_Beta = static_cast<float>(value);
5256  }
5257  else
5258  {
5259  if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
5260  {
5261  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5262  }
5263  }
5264 
5265  if (operation.inputs.size() > 2 && !GetInputScalar(operation,
5266  2,
5267  OperandType::INT32,
5268  desc.m_Axis,
5269  model,
5270  data))
5271  {
5272  return Fail("%s: Operation has invalid inputs", __func__);
5273  }
5274 
5275  bool isSupported = false;
5276  armnn::BackendId setBackend;
5277  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5278  {
5279  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5281  data.m_Backends,
5282  isSupported,
5283  setBackend,
5284  input.GetTensorInfo(),
5285  outputInfo,
5286  desc);
5287  };
5288 
5289  if(IsDynamicTensor(outputInfo))
5290  {
5291  isSupported = AreDynamicTensorsSupported();
5292  }
5293  else
5294  {
5295  validateFunc(outputInfo, isSupported);
5296  }
5297 
5298  if (!isSupported)
5299  {
5300  return false;
5301  }
5302 
5303  IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
5304  layer->SetBackendId(setBackend);
5305  assert(layer != nullptr);
5306  input.Connect(layer->GetInputSlot(0));
5307 
5308  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5309 }
5310 
5311 bool Converter::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
5312 {
5313  VLOG(DRIVER) << "Converter::ConvertSub()";
5314 
5315  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
5316  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
5317 
5318  if (!input0.IsValid() || !input1.IsValid())
5319  {
5320  return Fail("%s: Operation has invalid inputs", __func__);
5321  }
5322 
5323  // The FuseActivation parameter is always the input index 2
5324  // and it should be optional
5325  ActivationFn activationFunction;
5326  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
5327  {
5328  return Fail("%s: Operation has invalid inputs", __func__);
5329  }
5330 
5331  const Operand* output = GetOutputOperand(operation, 0, model);
5332  if (!output)
5333  {
5334  return Fail("%s: Could not read output 0", __func__);
5335  }
5336 
5337  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5338 
5339  bool isSupported = false;
5340  armnn::BackendId setBackend;
5341  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5342  {
5343  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5345  data.m_Backends,
5346  isSupported,
5347  setBackend,
5348  input0.GetTensorInfo(),
5349  input1.GetTensorInfo(),
5350  outputInfo);
5351  };
5352 
5353  if(IsDynamicTensor(outputInfo))
5354  {
5355  isSupported = AreDynamicTensorsSupported();
5356  }
5357  else
5358  {
5359  validateFunc(outputInfo, isSupported);
5360  }
5361 
5362  if (!isSupported)
5363  {
5364  return false;
5365  }
5366 
5367  armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
5368  startLayer->SetBackendId(setBackend);
5369 
5370  bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
5371  if (!isReshapeSupported)
5372  {
5373  return false;
5374  }
5375  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
5376  data, nullptr, validateFunc, activationFunction);
5377 }
5378 
5379 bool Converter::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
5380 {
5381  VLOG(DRIVER) << "Converter::ConvertTanH()";
5382 
5385  desc.m_A = 1.0f; // android nn does not support tanH parameters
5386  desc.m_B = 1.0f; // set to 1.0f for unity scaling
5387 
5388  return ConvertToActivation(operation, __func__, desc, model, data);
5389 }
5390 
5391 bool Converter::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
5392 {
5393  VLOG(DRIVER) << "Converter::ConvertTransposeConv2d()";
5394 
5395  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5396 
5397  if (!input.IsValid())
5398  {
5399  return Fail("%s: Operation has invalid inputs", __func__);
5400  }
5401 
5402  const Operand* output = GetOutputOperand(operation, 0, model);
5403 
5404  if (!output)
5405  {
5406  return Fail("%s: Could not read output 0", __func__);
5407  }
5408 
5409  const TensorInfo& inputInfo = input.GetTensorInfo();
5410  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5411 
5412  // ArmNN does not currently support non-fixed weights or bias
5413  // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
5414  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
5415 
5416  if (weightsOperand == nullptr)
5417  {
5418  return Fail("%s: Operand is invalid", __func__);
5419  }
5421  desc.m_DataLayout = DataLayout::NHWC;
5422 
5423  // Determine whether padding is implicit or explicit
5424  bool implicitPadding = operation.inputs.size() == 9;
5425 
5426  if (implicitPadding )
5427  {
5428  desc.m_DataLayout = OptionalDataLayout(operation, 8, model, data);
5429  }
5430  else
5431  {
5432  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
5433  }
5434 
5435  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
5436  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
5437  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
5438 
5439  const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
5440 
5441  // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
5442  // We have to permute it to OIHW if the data layout is NCHW.
5443  const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
5445  model, data, OHWIToOIHW) :
5446  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
5447 
5448  // Bias is a 1D tensor
5449  const ConstTensorPin biasPin =
5450  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
5451 
5452  if (!weightsPin.IsValid())
5453  {
5454  return Fail("%s: Operation has invalid weights", __func__);
5455  }
5456 
5457  if (!biasPin.IsValid())
5458  {
5459  return Fail("%s: Operation has invalid biases", __func__);
5460  }
5461 
5462  ConstTensor weights = weightsPin.GetConstTensor();
5463  ConstTensor bias = biasPin.GetConstTensor();
5464  SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
5465 
5466  ActivationFn activation;
5467 
5468  if (implicitPadding)
5469  {
5470  int32_t strideX{0};
5471  int32_t strideY{0};
5472  int32_t padLeft{0};
5473  int32_t padRight{0};
5474  int32_t padTop{0};
5475  int32_t padBottom{0};
5476 
5477  ::android::nn::PaddingScheme paddingScheme;
5478  if (!GetInputPaddingScheme(operation, 4, paddingScheme, model, data) ||
5479  !GetInputScalar(operation, 5, OperandType::INT32, strideX, model, data) ||
5480  !GetInputScalar(operation, 6, OperandType::INT32, strideY, model, data) ||
5481  !GetInputActivationFunction(operation, 7, activation, model, data))
5482  {
5483  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
5484  }
5485 
5486  const uint32_t kernelX = weights.GetShape()[widthIndex];
5487  const uint32_t kernelY = weights.GetShape()[heightIndex];
5488 
5489  // If output shape has been specified as a parameter then extract it and make it available.
5490  const Operand* outputShapeOperand = GetInputOperand(operation, 3, model, false);
5491  std::vector<int32_t> outputShape;
5492  if ((outputShapeOperand) && (GetTensorInt32Values(*outputShapeOperand, outputShape, model, data)))
5493  {
5494  // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
5495  for (int dimension : outputShape)
5496  {
5497  desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
5498  }
5499  desc.m_OutputShapeEnabled = true;
5500  }
5501 
5502  uint32_t outputX;
5503  uint32_t outputY;
5504 
5505  if (IsDynamicTensor(outputInfo))
5506  {
5507  if (outputShape.size() == 0)
5508  {
5509  return Fail("%s: Padding sizes cannot be inferred", __func__);
5510  }
5511 
5512  outputX = outputShape[widthIndex];
5513  outputY = outputShape[heightIndex];
5514  }
5515  else
5516  {
5517  outputX = outputInfo.GetShape()[widthIndex];
5518  outputY = outputInfo.GetShape()[heightIndex];
5519  }
5520 
5521  CalcPaddingTransposeConv(outputX, kernelX, strideX, padLeft, padRight, paddingScheme);
5522  CalcPaddingTransposeConv(outputY, kernelY, strideY, padTop, padBottom, paddingScheme);
5523 
5524  // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
5525  // but Arm NN only supports values >= 0
5526  if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
5527  {
5528  return Fail("%s: Negative padding values are not supported", __func__);
5529  }
5530 
5531  desc.m_StrideX = armnn::numeric_cast<uint32_t>(strideX);
5532  desc.m_StrideY = armnn::numeric_cast<uint32_t>(strideY);
5533  desc.m_PadLeft = armnn::numeric_cast<uint32_t>(padLeft);
5534  desc.m_PadRight = armnn::numeric_cast<uint32_t>(padRight);
5535  desc.m_PadTop = armnn::numeric_cast<uint32_t>(padTop);
5536  desc.m_PadBottom = armnn::numeric_cast<uint32_t>(padBottom);
5537  }
5538  else if (operation.inputs.size() == 11)
5539  {
5540  // explicit padding
5541  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
5542  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
5543  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
5544  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
5545  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
5546  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
5547  !GetInputActivationFunction(operation, 9, activation, model, data))
5548  {
5549  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
5550  }
5551  }
5552  else
5553  {
5554  return Fail("%s: Unsupported number of operation inputs", __func__);
5555  }
5556 
5557  desc.m_BiasEnabled = true;
5558  Optional<TensorInfo> biases(bias.GetInfo());
5559 
5560  bool isSupported = false;
5561  armnn::BackendId setBackend;
5562  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5563  {
5564  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5566  data.m_Backends,
5567  isSupported,
5568  setBackend,
5569  inputInfo,
5570  outputInfo,
5571  desc,
5572  weights.GetInfo(),
5573  biases);
5574  };
5575 
5576  if(IsDynamicTensor(outputInfo))
5577  {
5578  isSupported = AreDynamicTensorsSupported();
5579  }
5580  else
5581  {
5582  validateFunc(outputInfo, isSupported);
5583  }
5584  if (!isSupported)
5585  {
5586  return false;
5587  }
5588 
5589  IConnectableLayer* startLayer =
5590  data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
5591  startLayer->SetBackendId(setBackend);
5592  if (!startLayer)
5593  {
5594  return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
5595  }
5596 
5597  input.Connect(startLayer->GetInputSlot(0));
5598 
5599  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
5600  data, nullptr, validateFunc, activation);
5601 }
5602 
5603 bool Converter::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
5604 {
5605  VLOG(DRIVER) << "Converter::ConvertSqrt()";
5606  ActivationDescriptor desc;
5607  desc.m_Function = ActivationFunction::Sqrt;
5608 
5609  return ::ConvertToActivation(operation, __func__, desc, model, data);
5610 }
5611 
5612 bool Converter::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
5613 {
5614  VLOG(DRIVER) << "Converter::ConvertSqueeze()";
5615 
5616  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5617  if (!input.IsValid())
5618  {
5619  return Fail("%s: Operation has invalid inputs", __func__);
5620  }
5621 
5622  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5623  unsigned int rank = inputInfo.GetNumDimensions();
5624  if (rank > 4)
5625  {
5626  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5627  }
5628 
5629  const Operand* output = GetOutputOperand(operation, 0, model);
5630  if (!output)
5631  {
5632  return Fail("%s: Could not read output 0", __func__);
5633  }
5634 
5636  {
5637  return Fail("%s: Dynamic output tensors are not supported", __func__);
5638  }
5639 
5640  // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
5641  // if the operand index is out of bounds.
5642  const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
5643 
5644  const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
5645 
5646  std::vector<int32_t> axis;
5647  if (!axisOperand)
5648  {
5649  axis.assign(dimensionSequence,
5650  dimensionSequence + rank);
5651  }
5652  else if (!GetTensorInt32Values(*axisOperand, axis, model, data))
5653  {
5654  return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
5655  }
5656 
5657  std::vector<uint32_t> outputDims;
5658  for (unsigned int i = 0; i < rank; i++)
5659  {
5660  bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
5661  auto currentDimension = inputInfo.GetShape()[i];
5662  if (skipSqueeze || currentDimension != 1)
5663  {
5664  outputDims.push_back(currentDimension);
5665  }
5666  }
5667 
5668  armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
5669 
5670  armnn::TensorInfo outputInfo = inputInfo;
5671  outputInfo.SetShape(outShape);
5672 
5673  armnn::ReshapeDescriptor reshapeDesc;
5674  reshapeDesc.m_TargetShape = outputInfo.GetShape();
5675 
5676  bool isSupported = false;
5677  armnn::BackendId setBackend;
5678  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5680  data.m_Backends,
5681  isSupported,
5682  setBackend,
5683  inputInfo,
5684  outputInfo,
5685  reshapeDesc);
5686 
5687  if (!isSupported)
5688  {
5689  return false;
5690  }
5691 
5692  armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
5693  layer->SetBackendId(setBackend);
5694  assert(layer != nullptr);
5695  input.Connect(layer->GetInputSlot(0));
5696 
5697  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
5698 }
5699 
5700 bool Converter::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
5701 {
5702  VLOG(DRIVER) << "Converter::ConvertStridedSlice()";
5703 
5704  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5705  if (!input.IsValid())
5706  {
5707  return Fail("%s: Operation has invalid inputs", __func__);
5708  }
5709 
5710  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5711  unsigned int rank = inputInfo.GetNumDimensions();
5712  if (rank > 4)
5713  {
5714  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5715  }
5716 
5717  const Operand* output = GetOutputOperand(operation, 0, model);
5718  if (!output)
5719  {
5720  return Fail("%s: Could not read output 0", __func__);
5721  }
5722 
5723  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5724 
5725  const Operand* beginOperand = GetInputOperand(operation, 1, model);
5726  const Operand* endOperand = GetInputOperand(operation, 2, model);
5727  const Operand* stridesOperand = GetInputOperand(operation, 3, model);
5728 
5729  std::vector<int32_t> beginValues;
5730  std::vector<int32_t> endValues;
5731  std::vector<int32_t> stridesValues;
5732 
5733  // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
5734  auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
5735  {
5736  if (!GetTensorInt32Values(operand, operandValues, model, data))
5737  {
5738  return false;
5739  }
5740 
5741  if (operandValues.size() != rank)
5742  {
5743  return false;
5744  }
5745 
5746  return true;
5747  };
5748 
5749  if (!ValidateInputOperands(*beginOperand, beginValues)
5750  || !ValidateInputOperands(*endOperand, endValues)
5751  || !ValidateInputOperands(*stridesOperand, stridesValues))
5752  {
5753  return Fail("%s: Operation has invalid input operand", __func__);
5754  }
5755 
5756  // Stride cannot have value '0'
5757  if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
5758  {
5759  return Fail("%s: Stride must be non-zero value.", __func__);
5760  }
5761 
5762  armnn::StridedSliceDescriptor descriptor;
5763  descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
5764  descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
5765  descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
5767 
5768  // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
5769  if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) ||
5770  !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) ||
5771  !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
5772  {
5773  return Fail("%s: Operation has invalid inputs", __func__);
5774  }
5775 
5776  bool isSupported = false;
5777  armnn::BackendId setBackend;
5778  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5779  {
5780  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5782  data.m_Backends,
5783  isSupported,
5784  setBackend,
5785  inputInfo,
5786  outputInfo,
5787  descriptor);
5788  };
5789 
5790  if(IsDynamicTensor(outputInfo))
5791  {
5792  isSupported = AreDynamicTensorsSupported();
5793  }
5794  else
5795  {
5796  validateFunc(outputInfo, isSupported);
5797  }
5798 
5799  if (!isSupported)
5800  {
5801  return false;
5802  }
5803 
5804  // Check if slice can fit in a inferred output
5805  armnn::TensorShape inputShape = inputInfo.GetShape();
5806  for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
5807  {
5808  int stride = descriptor.m_Stride[i];
5809 
5810  if (descriptor.m_ShrinkAxisMask & (1 << i))
5811  {
5812  // If the difference between the start point and the end point of the slice on an axis being shrunk
5813  // is greater than 1 then throw an error as the output will not be large enough to hold the slice
5814  if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
5815  || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
5816  {
5817  return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
5818  }
5819 
5820  if(stride < 0)
5821  {
5822  return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
5823  }
5824  }
5825  }
5826 
5827  armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
5828  layer->SetBackendId(setBackend);
5829  assert(layer != nullptr);
5830  input.Connect(layer->GetInputSlot(0));
5831 
5832  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5833 }
5834 
5835 bool Converter::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
5836 {
5837  VLOG(DRIVER) << "Converter::ConvertTranspose()";
5838 
5839  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5840  if (!input.IsValid())
5841  {
5842  return Fail("%s: Operation has invalid inputs", __func__);
5843  }
5844 
5845  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5846  unsigned int rank = inputInfo.GetNumDimensions();
5847  if (rank > 4)
5848  {
5849  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5850  }
5851 
5852  // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
5853  // if the operand index is out of bounds.
5854  const Operand* permOperand = GetInputOperand(operation, 1, model, false);
5855 
5856  std::vector<int32_t> perm(rank);
5857  if (!permOperand || (permOperand->lifetime == OperandLifeTime::NO_VALUE))
5858  {
5859  for (unsigned int i = rank; i > 0; i--)
5860  {
5861  perm[rank - i] = armnn::numeric_cast<int> (i - 1);
5862  }
5863  }
5864  else if (!GetTensorInt32Values(*permOperand, perm, model, data))
5865  {
5866  return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
5867  }
5868 
5869  std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
5870 
5871  armnn::TransposeDescriptor transposeDesc;
5872  transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
5873 
5874  const Operand* output = GetOutputOperand(operation, 0, model);
5875  if (!output)
5876  {
5877  return Fail("%s: Could not read output 0", __func__);
5878  }
5879 
5880  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5881 
5882  bool isSupported = false;
5883  armnn::BackendId setBackend;
5884  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5885  {
5886  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5887  IsTransposeSupported,
5888  data.m_Backends,
5889  isSupported,
5890  setBackend,
5891  inputInfo,
5892  outputInfo,
5893  transposeDesc);
5894  };
5895 
5896  if(IsDynamicTensor(outputInfo))
5897  {
5898  isSupported = AreDynamicTensorsSupported();
5899  }
5900  else
5901  {
5902  validateFunc(outputInfo, isSupported);
5903  }
5904 
5905  if (!isSupported)
5906  {
5907  return false;
5908  }
5909 
5910  armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
5911  layer->SetBackendId(setBackend);
5912  assert(layer != nullptr);
5913  input.Connect(layer->GetInputSlot(0));
5914 
5915  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5916 }
5917 
5918 } // namespace armnn_driver
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:662
armnn::QLstmDescriptor::m_CellClip
float m_CellClip
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1361
armnn::QLstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1371
armnn_driver::IsWeightsValid
bool IsWeightsValid(const Operation &operation, uint32_t inputIndex, const Model &model)
Utility functions.
Definition: ConversionUtils.cpp:134
armnn_driver::Operation
::android::nn::Operation Operation
Definition: ConversionUtils.hpp:47
armnn::TransposeConvolution2dDescriptor::m_OutputShapeEnabled
bool m_OutputShapeEnabled
Output shape if it has been specified.
Definition: Descriptors.hpp:1432
armnn::LstmInputParams::m_CellToForgetWeights
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
armnn::InstanceNormalizationDescriptor::m_Eps
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
Definition: Descriptors.hpp:837
armnn::IsSoftmaxSupported
bool IsSoftmaxSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SoftmaxDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:552
armnn::BackendId
Definition: BackendId.hpp:75
armnnUtils::ExpandDims
armnn::TensorShape ExpandDims(const armnn::TensorShape &tensorShape, int axis)
Definition: TensorUtils.cpp:140
armnn::QuantizedLstmInputParamsInfo::m_OutputGateBias
const TensorInfo * m_OutputGateBias
Definition: QuantizedLstmParams.hpp:152
armnn::IsConvolution2dSupported
bool IsConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Convolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::Converter::Operation
::android::nn::Operation Operation
Definition: Converter.hpp:28
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1559
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1422
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:550
armnn::LstmInputParamsInfo::m_InputToCellWeights
const TensorInfo * m_InputToCellWeights
Definition: LstmParams.hpp:91
armnn_driver::IsConnectedToDequantize
bool IsConnectedToDequantize(armnn::IOutputSlot *ioutputSlot)
Definition: ConversionUtils.cpp:1040
armnn::QLstmDescriptor::m_ProjectionClip
float m_ProjectionClip
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1363
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1416
armnn::GetArgMinMaxFunctionAsCString
constexpr char const * GetArgMinMaxFunctionAsCString(ArgMinMaxFunction function)
Definition: TypesUtils.hpp:51
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:494
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:912
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:737
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1437
armnn_driver::ConvertPaddings
bool ConvertPaddings(const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
Definition: ConversionUtils.cpp:338
armnn::QLstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1375
armnn::IsTransposeConvolution2dSupported
bool IsTransposeConvolution2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TransposeConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1420
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:109
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1143
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:157
armnn::IsL2NormalizationSupported
bool IsL2NormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const L2NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
Connect
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:14
armnn::LstmInputParamsInfo::m_CellToInputWeights
const TensorInfo * m_CellToInputWeights
Definition: LstmParams.hpp:97
armnn_driver::isQuantizedOperand
bool isQuantizedOperand(const OperandType &operandType)
Definition: CanonicalUtils.cpp:505
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:548
armnn::TensorInfo::SetQuantizationScale
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:540
armnn::LstmInputParamsInfo::m_InputGateBias
const TensorInfo * m_InputGateBias
Definition: LstmParams.hpp:100
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:868
armnn::QuantizedLstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: QuantizedLstmParams.hpp:38
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::LstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1049
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:475
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1297
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:204
armnn_driver::ConstTensorPin::GetConstTensorPtr
const armnn::ConstTensor * GetConstTensorPtr() const
Definition: ConversionUtils.cpp:120
armnn::LstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: LstmParams.hpp:90
armnn::TransposeConvolution2dDescriptor::m_OutputShape
std::vector< unsigned int > m_OutputShape
Definition: Descriptors.hpp:1433
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::IsFullyConnectedSupported
bool IsFullyConnectedSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const TensorInfo &weights, const TensorInfo &biases, const FullyConnectedDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::LstmInputParamsInfo::m_InputToOutputWeights
const TensorInfo * m_InputToOutputWeights
Definition: LstmParams.hpp:92
armnn::IsStridedSliceSupported
bool IsStridedSliceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const StridedSliceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1016
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1531
armnn::LstmInputParamsInfo::m_ProjectionBias
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:932
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:498
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1250
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1289
armnn_driver::GetOptionalBool
bool GetOptionalBool(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:900
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn::LstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
armnn::QuantizedLstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: QuantizedLstmParams.hpp:36
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1428
armnn::LstmInputParamsInfo::m_RecurrentToForgetWeights
const TensorInfo * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:94
armnn::QuantizedLstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: QuantizedLstmParams.hpp:35
TensorUtils.hpp
armnn::OriginsDescriptor::SetConcatAxis
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
Definition: Descriptors.cpp:158
armnn::LayerType::Shape
@ Shape
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:822
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:546
armnn::LstmInputParams::m_OutputLayerNormWeights
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
armnn::QuantizedLstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: QuantizedLstmParams.hpp:33
armnn::LstmInputParamsInfo::m_CellBias
const TensorInfo * m_CellBias
Definition: LstmParams.hpp:102
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:43
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:500
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1093
armnn::QuantizedLstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: QuantizedLstmParams.hpp:40
armnn::QuantizedLstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: QuantizedLstmParams.hpp:41
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToCellWeights
const TensorInfo * m_RecurrentToCellWeights
Definition: QuantizedLstmParams.hpp:146
armnn::GetUnaryOperationAsCString
constexpr char const * GetUnaryOperationAsCString(UnaryOperation operation)
Definition: TypesUtils.hpp:75
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1099
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:224
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:676
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1137
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:538
armnn::IsLstmSupported
bool IsLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &outputStateIn, const TensorInfo &cellStateIn, const TensorInfo &scratchBuffer, const TensorInfo &outputStateOut, const TensorInfo &cellStateOut, const TensorInfo &output, const LstmDescriptor &descriptor, const LstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:536
armnn_driver::GetInputActivationFunctionFromTensor
bool GetInputActivationFunctionFromTensor(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:837
armnn_driver::DequantizeAndMakeConstTensorPin
ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
Definition: ConversionUtils.cpp:731
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::QuantizedLstmInputParamsInfo::m_ForgetGateBias
const TensorInfo * m_ForgetGateBias
Definition: QuantizedLstmParams.hpp:150
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:668
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:102
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:672
armnn::LstmInputParamsInfo::m_InputToInputWeights
const TensorInfo * m_InputToInputWeights
Definition: LstmParams.hpp:89
armnnUtils::TransposeTensorShape
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Transpose.cpp:98
armnn::LstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
armnn_driver::ConversionData
Definition: ConversionUtils.hpp:51
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::LstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
armnn::LstmInputParamsInfo::m_CellToForgetWeights
const TensorInfo * m_CellToForgetWeights
Definition: LstmParams.hpp:98
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:763
armnn::QLstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1377
armnn::IsReshapeSupported
bool IsReshapeSupported(const BackendId &backend, const TensorInfo &input, const ReshapeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:155
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:660
armnn::LstmInputParams::m_CellLayerNormWeights
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
armnn::LstmInputParams::m_CellToOutputWeights
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
armnn::FillDescriptor::m_Value
float m_Value
Definition: Descriptors.hpp:908
armnn::QLstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1379
armnn::QuantizedLstmInputParamsInfo::m_InputToInputWeights
const TensorInfo * m_InputToInputWeights
Definition: QuantizedLstmParams.hpp:139
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:773
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:664
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:843
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:44
armnn::OptionalReferenceSwitch::value
const T & value() const
Definition: Optional.hpp:146
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn_driver::Half
half_float::half Half
Definition: Converter.cpp:14
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1022
armnn::ReduceOperation::Min
@ Min
armnn::IsSplitterSupported
bool IsSplitterSupported(const BackendId &backend, const TensorInfo &input, const std::vector< std::reference_wrapper< TensorInfo >> &outputs, const ViewsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1291
armnn::TensorInfo::SetQuantizationOffset
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:60
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:963
armnn::IsSubtractionSupported
bool IsSubtractionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::LogicalBinaryOperation
LogicalBinaryOperation
Definition: Types.hpp:118
armnn::LstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
armnn::GetLogicalBinaryOperationAsCString
constexpr char const * GetLogicalBinaryOperationAsCString(LogicalBinaryOperation operation)
Definition: TypesUtils.hpp:91
armnn::ConcatDescriptor
OriginsDescriptor ConcatDescriptor
Definition: DescriptorsFwd.hpp:55
armnn::QuantizedLstmInputParamsInfo::m_InputToCellWeights
const TensorInfo * m_InputToCellWeights
Definition: QuantizedLstmParams.hpp:141
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:893
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:90
armnn::ResizeMethod
ResizeMethod
Definition: Types.hpp:152
armnn::ReduceOperation::Sum
@ Sum
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:956
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:66
armnn::SpaceToDepthDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1042
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:674
armnn::LstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:928
armnn_driver::Converter::Model
::android::nn::Model Model
Definition: Converter.hpp:24
armnn::ReduceOperation::Max
@ Max
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:678
armnn::QuantizedLstmInputParamsInfo::m_InputGateBias
const TensorInfo * m_InputGateBias
Definition: QuantizedLstmParams.hpp:149
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:172
armnn_driver::ConvertPooling2d
bool ConvertPooling2d(const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:380
armnn::NormalizationAlgorithmChannel::Across
@ Across
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:534
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:627
armnn::GetResizeMethodAsCString
constexpr const char * GetResizeMethodAsCString(ResizeMethod method)
Definition: TypesUtils.hpp:258
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1119
armnn::NormalizationAlgorithmMethod::LocalBrightness
@ LocalBrightness
Krichevsky 2012: Local Brightness Normalization.
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:765
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:782
armnn_driver::LayerInputHandle::GetOutputSlot
armnn::IOutputSlot * GetOutputSlot() const
Definition: ConversionUtils.cpp:70
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:870
armnn::QuantizedLstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: QuantizedLstmParams.hpp:45
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:777
armnn::LstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
armnn::QuantizedLstmInputParamsInfo::m_CellBias
const TensorInfo * m_CellBias
Definition: QuantizedLstmParams.hpp:151
armnn::IsResizeSupported
bool IsResizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ResizeDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::QuantizedLstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: QuantizedLstmParams.hpp:39
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1095
armnn::ComparisonOperation
ComparisonOperation
Definition: Types.hpp:108
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1560
armnn_driver::GetOptionalConvolutionDilationParams
bool GetOptionalConvolutionDilationParams(const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:874
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::GetOperandType
bool GetOperandType(const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
Definition: ConversionUtils.hpp:683
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1018
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:961
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1039
armnn::IsDivisionSupported
bool IsDivisionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:502
Converter.hpp
armnn::LstmInputParamsInfo::m_OutputGateBias
const TensorInfo * m_OutputGateBias
Definition: LstmParams.hpp:103
armnn::UnaryOperation
UnaryOperation
Definition: Types.hpp:124
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1418
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:32
armnn_driver::LayerInputHandle::SanitizeQuantizationScale
void SanitizeQuantizationScale(LayerInputHandle &weight, LayerInputHandle &input)
Definition: ConversionUtils.cpp:55
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1327
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn_driver::GetOptionalInputActivation
bool GetOptionalInputActivation(const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:853
armnn::InstanceNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:839
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn::GetComparisonOperationAsCString
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:61
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:643
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:170
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:959
armnn_driver::Converter::ConvertOperation
static bool ConvertOperation(const Operation &operation, const Model &model, ConversionData &data)
Definition: Converter.cpp:21
armnn::QLstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1383
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:761
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1461
armnn::QLstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1381
armnn::LstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
armnn::LstmInputParamsInfo::m_ForgetLayerNormWeights
const TensorInfo * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:107
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToForgetWeights
const TensorInfo * m_RecurrentToForgetWeights
Definition: QuantizedLstmParams.hpp:145
armnn::QuantizedLstmInputParams
Definition: QuantizedLstmParams.hpp:13
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1293
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:486
armnn::DataLayout::NHWC
@ NHWC
armnn::IsDequantizeSupported
bool IsDequantizeSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::LstmInputParamsInfo::m_InputLayerNormWeights
const TensorInfo * m_InputLayerNormWeights
Definition: LstmParams.hpp:106
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::LstmInputParamsInfo::m_OutputLayerNormWeights
const TensorInfo * m_OutputLayerNormWeights
Definition: LstmParams.hpp:109
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:542
armnn::LstmInputParamsInfo::m_ProjectionWeights
const TensorInfo * m_ProjectionWeights
Definition: LstmParams.hpp:104
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:771
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1168
armnn::PermutationVector
Definition: Types.hpp:295
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:27
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:866
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1302
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1013
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:769
armnn::LstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
armnn::IsDepthwiseConvolutionSupported
bool IsDepthwiseConvolutionSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const DepthwiseConvolution2dDescriptor &descriptor, const TensorInfo &weights, const Optional< TensorInfo > &biases, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1089
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:181
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:970
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1430
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::LstmInputParams::m_ProjectionWeights
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
armnn::LstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1387
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:496
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:792
armnn::TensorInfo::GetQuantizationDim
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToOutputWeights
const TensorInfo * m_RecurrentToOutputWeights
Definition: QuantizedLstmParams.hpp:147
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn_driver::ConstTensorPin::IsOptional
bool IsOptional() const
Definition: ConversionUtils.cpp:110
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::QuantizedLstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: QuantizedLstmParams.hpp:44
armnnUtils::DataLayoutIndexed
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
Definition: DataLayoutIndexed.hpp:17
armnn::ActivationFunction::TanH
@ TanH
armnn::IsMaximumSupported
bool IsMaximumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnSupported=nullptr, size_t reasonIfUnSupportedMaxLength=0)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToInputWeights
const TensorInfo * m_RecurrentToInputWeights
Definition: QuantizedLstmParams.hpp:144
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:662
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:50
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1465
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1097
armnn::IsBatchToSpaceNdSupported
bool IsBatchToSpaceNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const BatchToSpaceNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:718
armnn::IsMinimumSupported
bool IsMinimumSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::QLstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1373
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1310
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1139
armnn::Optional
Definition: Optional.hpp:270
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::IsMeanSupported
bool IsMeanSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const MeanDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1424
armnn::IsNormalizationSupported
bool IsNormalizationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const NormalizationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:853
armnn::QLstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
Definition: Descriptors.hpp:1365
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:823
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1300
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:666
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:115
armnn::InstanceNormalizationDescriptor::m_Gamma
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:833
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:105
armnn::QuantizedLstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: QuantizedLstmParams.hpp:34
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:761
armnn::LstmInputParams::m_CellToInputWeights
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1087
armnn::CreateDescriptorForConcatenation
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
Definition: Descriptors.hpp:268
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1091
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
armnn::QLstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1367
armnn::LstmInputParamsInfo::m_CellLayerNormWeights
const TensorInfo * m_CellLayerNormWeights
Definition: LstmParams.hpp:108
armnn::BaseTensor::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:297
armnn::QuantizedLstmInputParamsInfo::m_InputToOutputWeights
const TensorInfo * m_InputToOutputWeights
Definition: QuantizedLstmParams.hpp:142
armnn::IsMultiplicationSupported
bool IsMultiplicationSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::ConvertReduce
bool ConvertReduce(const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
Definition: ConversionUtils.cpp:508
armnn::LstmInputParamsInfo::m_ForgetGateBias
const TensorInfo * m_ForgetGateBias
Definition: LstmParams.hpp:101
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::IsPreluSupported
bool IsPreluSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &alpha, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:670
armnn::ActivationFunction::ReLu
@ ReLu
armnn::LstmInputParams::m_InputLayerNormWeights
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1426
armnn::LstmInputParams
Definition: LstmParams.hpp:13
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::LstmInputParamsInfo::m_RecurrentToOutputWeights
const TensorInfo * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:96
armnn::IsActivationSupported
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:544
armnn::IsFloorSupported
bool IsFloorSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:963
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn::ResizeDescriptor::m_HalfPixelCenters
bool m_HalfPixelCenters
Half Pixel Centers.
Definition: Descriptors.hpp:965
armnn::IsQuantizedLstmSupported
bool IsQuantizedLstmSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &previousCellStateIn, const TensorInfo &previousOutputIn, const TensorInfo &cellStateOut, const TensorInfo &output, const QuantizedLstmInputParamsInfo &paramsInfo, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:986
armnn::LstmInputParamsInfo::m_RecurrentToInputWeights
const TensorInfo * m_RecurrentToInputWeights
Definition: LstmParams.hpp:93
armnn::QuantizedLstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: QuantizedLstmParams.hpp:43
armnn::LstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
armnn::QuantizedLstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: QuantizedLstmParams.hpp:46
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn_driver::GetInputFloat32
bool GetInputFloat32(const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:791
armnn::LstmInputParamsInfo::m_CellToOutputWeights
const TensorInfo * m_CellToOutputWeights
Definition: LstmParams.hpp:99
armnn_driver::OperandType
::android::nn::OperandType OperandType
Definition: ConversionUtils.hpp:46
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:698
armnn::LstmInputParams::m_ForgetLayerNormWeights
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
armnn::BoostLogSeverityMapping::error
@ error
armnn::QLstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1369
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:815
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:767
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:742
armnn::LstmInputParams::m_ProjectionBias
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
armnn::LstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:153
armnn::QuantizedLstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: QuantizedLstmParams.hpp:140
armnn::LstmInputParamsInfo::m_RecurrentToCellWeights
const TensorInfo * m_RecurrentToCellWeights
Definition: LstmParams.hpp:95
armnn::OriginsDescriptor::SetViewOriginCoord
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
Definition: Descriptors.cpp:167
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:59
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
armnn::IsSpaceToDepthSupported
bool IsSpaceToDepthSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToDepthDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::IsConcatSupported
bool IsConcatSupported(const BackendId &backend, const std::vector< const TensorInfo * > inputs, const TensorInfo &output, const OriginsDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::InstanceNormalizationDescriptor::m_Beta
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:835
armnn_driver::ConvertToActivation
bool ConvertToActivation(const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:592
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:954
armnn::IsAdditionSupported
bool IsAdditionSupported(const BackendId &backend, const TensorInfo &input0, const TensorInfo &input1, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:990
armnn::IsPadSupported
bool IsPadSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const PadDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
android::nn
Definition: support_library_service.cpp:10
armnn::IsSpaceToBatchNdSupported
bool IsSpaceToBatchNdSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const SpaceToBatchNdDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.