ArmNN
 23.08
Converter.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Converter.hpp"
7 #include <half/half.hpp>
9 
10 namespace armnn_driver
11 {
12 
13 using namespace android::nn;
14 using Half = half_float::half;
15 
16 namespace
17 {
18 
19 } // anonymouse namespace
20 
21 bool Converter::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
22 {
23  switch (operation.type)
24  {
25  case OperationType::ABS:
26  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
27  case OperationType::ADD:
28  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Add);
29  case OperationType::ARGMAX:
30  return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
31  case OperationType::ARGMIN:
32  return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
33  case OperationType::AVERAGE_POOL_2D:
34  return ConvertAveragePool2d(operation, model, data);
35  case OperationType::BATCH_MATMUL:
36  return ConvertBatchMatMul(operation, model, data);
37  case OperationType::BATCH_TO_SPACE_ND:
38  return ConvertBatchToSpaceNd(operation, model, data);
39  case OperationType::CAST:
40  return ConvertCast(operation, model, data);
41  case OperationType::CONCATENATION:
42  return ConvertConcatenation(operation, model, data);
43  case OperationType::CONV_2D:
44  return ConvertConv2d(operation, model, data);
45  case OperationType::DEPTH_TO_SPACE:
46  return ConvertDepthToSpace(operation, model, data);
47  case OperationType::DEPTHWISE_CONV_2D:
48  return ConvertDepthwiseConv2d(operation, model, data);
49  case OperationType::DEQUANTIZE:
50  return ConvertDequantize(operation, model, data);
51  case OperationType::DIV:
52  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Div);
53  case OperationType::ELU:
54  return ConvertElu(operation, model, data);
55  case OperationType::EQUAL:
56  return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
57  case OperationType::EXP:
58  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Exp);
59  case OperationType::EXPAND_DIMS:
60  return ConvertExpandDims(operation, model, data);
61  case OperationType::FILL:
62  return ConvertFill(operation, model, data);
63  case OperationType::FLOOR:
64  return ConvertFloor(operation, model, data);
65  case OperationType::FULLY_CONNECTED:
66  return ConvertFullyConnected(operation, model, data);
67  case OperationType::GATHER:
68  return ConvertGather(operation, model, data);
69  case OperationType::GREATER:
70  return ConvertComparison(operation, model, data, ComparisonOperation::Greater);
71  case OperationType::GREATER_EQUAL:
72  return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
73  case OperationType::GROUPED_CONV_2D:
74  return ConvertGroupedConv2d(operation, model, data);
75  case OperationType::HARD_SWISH:
76  return ConvertHardSwish(operation, model, data);
77  case OperationType::INSTANCE_NORMALIZATION:
78  return ConvertInstanceNormalization(operation, model, data);
79  case OperationType::L2_NORMALIZATION:
80  return ConvertL2Normalization(operation, model, data);
81  case OperationType::L2_POOL_2D:
82  return ConvertL2Pool2d(operation, model, data);
83  case OperationType::LESS:
84  return ConvertComparison(operation, model, data, ComparisonOperation::Less);
85  case OperationType::LESS_EQUAL:
86  return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
87  case OperationType::LOCAL_RESPONSE_NORMALIZATION:
88  return ConvertLocalResponseNormalization(operation, model, data);
89  case OperationType::LOG:
90  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Log);
91  case OperationType::LOGICAL_AND:
92  return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
93  case OperationType::LOGICAL_NOT:
94  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
95  case OperationType::LOGICAL_OR:
96  return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
97  case OperationType::LOGISTIC:
98  return ConvertLogistic(operation, model, data);
99  case OperationType::LOG_SOFTMAX:
100  return ConvertLogSoftmax(operation, model, data);
101  case OperationType::LSTM:
102  return ConvertLstm(operation, model, data);
103  case OperationType::MAX_POOL_2D:
104  return ConvertMaxPool2d(operation, model, data);
105  case OperationType::MAXIMUM:
106  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Maximum);
107  case OperationType::MEAN:
108  return ConvertMean(operation, model, data);
109  case OperationType::MINIMUM:
110  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Minimum);
111  case OperationType::MUL:
112  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Mul);
113  case OperationType::NEG:
114  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
115  case OperationType::NOT_EQUAL:
116  return ConvertComparison(operation, model, data, ComparisonOperation::NotEqual);
117  case OperationType::PAD:
118  return ConvertPad(operation, model, data);
119  case OperationType::PAD_V2:
120  return ConvertPadV2(operation, model, data);
121  case OperationType::PRELU:
122  return ConvertPrelu(operation, model, data);
123  case OperationType::POW:
124  return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Power);
125  case OperationType::QUANTIZE:
126  return ConvertQuantize(operation, model, data);
127  case OperationType::QUANTIZED_LSTM:
128  return ConvertQuantizedLstm(operation, model, data);
129  case OperationType::QUANTIZED_16BIT_LSTM:
130  return ConvertQuantized16BitLstm(operation, model, data);
131  case OperationType::RANK:
132  return ConvertRank(operation, model, data);
133  case OperationType::REDUCE_MAX:
134  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Max);
135  case OperationType::REDUCE_MIN:
136  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Min);
137  case OperationType::REDUCE_PROD:
138  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Prod);
139  case OperationType::REDUCE_SUM:
140  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Sum);
141  case OperationType::RELU:
142  return ConvertReLu(operation, model, data);
143  case OperationType::RELU1:
144  return ConvertReLu1(operation, model, data);
145  case OperationType::RELU6:
146  return ConvertReLu6(operation, model, data);
147  case OperationType::RESHAPE:
148  return ConvertReshape(operation, model, data);
149  case OperationType::RESIZE_BILINEAR:
150  return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
151  case OperationType::RESIZE_NEAREST_NEIGHBOR:
152  return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
153  case OperationType::REVERSE:
154  return ConvertReverseV2(operation, model, data);
155  case OperationType::RSQRT:
156  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
157  case OperationType::SIN:
158  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Sin);
159  case OperationType::SOFTMAX:
160  return ConvertSoftmax(operation, model, data);
161  case OperationType::SPACE_TO_BATCH_ND :
162  return ConvertSpaceToBatchNd(operation, model, data);
163  case OperationType::SPACE_TO_DEPTH:
164  return ConvertSpaceToDepth(operation, model, data);
165  case OperationType::SQRT:
166  return ConvertSqrt(operation, model, data);
167  case OperationType::SQUEEZE:
168  return ConvertSqueeze(operation, model, data);
169  case OperationType::STRIDED_SLICE:
170  return ConvertStridedSlice(operation, model, data);
171  case OperationType::SUB:
172  return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Sub);
173  case OperationType::TILE:
174  return ConvertTile(operation, model, data);
175  case OperationType::TRANSPOSE:
176  return ConvertTranspose(operation, model, data);
177  case OperationType::TRANSPOSE_CONV_2D:
178  return ConvertTransposeConv2d(operation, model, data);
179  case OperationType::TANH:
180  return ConvertTanH(operation, model, data);
181  default:
182  VLOG(DRIVER) << "Operation type: " << operation.type << "is not supported in ArmnnDriver";
183  return false;
184  }
185 }
186 
187 bool Converter::ConvertArgMinMax(const Operation& operation,
188  const Model& model,
189  ConversionData& data,
190  armnn::ArgMinMaxFunction argMinMaxFunction)
191 {
192  VLOG(DRIVER) << "Converter::ConvertArgMinMax()";
193  VLOG(DRIVER) << "argMinMaxFunction = " << GetArgMinMaxFunctionAsCString(argMinMaxFunction);
194 
195  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
196 
197  if (!input0.IsValid())
198  {
199  return Fail("%s: Operation has invalid inputs", __func__);
200  }
201 
202  int32_t axis;
203  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
204  {
205  return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
206  }
207 
208  const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
209  int rank = static_cast<int>(inputInfo.GetNumDimensions());
210 
211  if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
212  {
213  // Square bracket denotes inclusive n while parenthesis denotes exclusive n
214  // E.g. Rank 4 tensor can have axis in range [-4, 3)
215  // -1 == 3, -2 == 2, -3 == 1, -4 == 0
216  return Fail("%s: Axis must be in range [-n, n)", __func__);
217  }
218 
219  const Operand* output = GetOutputOperand(operation, 0, model);
220  if (!output)
221  {
222  return Fail("%s: Could not read output 0", __func__);
223  }
224 
225  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
226 
227  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
228 
229  armnn::ArgMinMaxDescriptor descriptor;
230  descriptor.m_Function = argMinMaxFunction;
231  descriptor.m_Axis = axis;
232 
233  bool isSupported = false;
234  armnn::BackendId setBackend;
235  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
236  {
238  IsArgMinMaxSupported,
239  data.m_Backends,
240  isSupported,
241  setBackend,
242  inputInfo0,
243  outputInfo,
244  descriptor);
245  };
246 
247  if(IsDynamicTensor(outputInfo))
248  {
249  isSupported = AreDynamicTensorsSupported();
250  }
251  else
252  {
253  validateFunc(outputInfo, isSupported);
254  }
255 
256  if (!isSupported)
257  {
258  return false;
259  }
260 
261  armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
262  layer->SetBackendId(setBackend);
263  assert(layer != nullptr);
264 
265  input0.Connect(layer->GetInputSlot(0));
266 
267  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
268 }
269 
270 bool Converter::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
271 {
272  VLOG(DRIVER) << "Converter::ConvertAveragePool2d()";
273  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Average, model, data);
274 }
275 
276 bool Converter::ConvertBatchMatMul(const Operation& operation, const Model& model, ConversionData& data)
277 {
278  VLOG(DRIVER) << "Converter::ConvertBatchMatMul()";
279  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
280  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
281 
282  if (!input0.IsValid() || !input1.IsValid())
283  {
284  return Fail("%s: Operation has invalid inputs", __func__);
285  }
286 
287  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
288  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
289 
290  unsigned int rankInput0 = inputInfo0.GetNumDimensions();
291  if (rankInput0 > 4 || rankInput0 < 2)
292  {
293  Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
294  }
295 
296  unsigned int rankInput1 = inputInfo1.GetNumDimensions();
297  if (rankInput1 > 4 || rankInput1 < 2)
298  {
299  Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
300  }
301 
302  // Determine data type of input tensor 0
303  OperandType input0Type;
304  if (!GetOperandType(operation, 0, model, input0Type))
305  {
306  return Fail("%s: Operation has invalid inputs", __func__);
307  }
308 
309  // Determine data type of input tensor 0
310  OperandType input1Type;
311  if (!GetOperandType(operation, 0, model, input1Type))
312  {
313  return Fail("%s: Operation has invalid inputs", __func__);
314  }
315 
316  if (input0Type != input1Type)
317  {
318  return Fail("%s: Operation has invalid inputs (Inputs must have same OperandCode)", __func__);
319  }
320 
321  const Operand* output = GetOutputOperand(operation, 0, model);
322  if (!output)
323  {
324  return Fail("%s: Could not read output 0", __func__);
325  }
326 
327  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
328 
329  armnn::BatchMatMulDescriptor batchMatMulDesc;
330 
331  // Inputs 2 and 3 are adjoint in Android NeuralNetworks, but they perform transpose.
332  // This is why we are linking them with transpose parameters in the descriptor
333  batchMatMulDesc.m_TransposeX = GetOptionalBool(operation, 2, model, data);
334  batchMatMulDesc.m_TransposeY = GetOptionalBool(operation, 3, model, data);
335 
336  bool isSupported = false;
337  armnn::BackendId setBackend;
338  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
339  {
341  IsBatchMatMulSupported,
342  data.m_Backends,
343  isSupported,
344  setBackend,
345  inputInfo0,
346  inputInfo1,
347  outputInfo,
348  batchMatMulDesc);
349  };
350 
351  if(!IsDynamicTensor(outputInfo))
352  {
353  validateFunc(outputInfo, isSupported);
354  }
355  else
356  {
357  isSupported = AreDynamicTensorsSupported();
358  }
359 
360 
361  if (!isSupported)
362  {
363  return false;
364  }
365 
366  armnn::IConnectableLayer* const layer = data.m_Network->AddBatchMatMulLayer(batchMatMulDesc);
367  layer->SetBackendId(setBackend);
368  assert(layer != nullptr);
369  input0.Connect(layer->GetInputSlot(0));
370  input1.Connect(layer->GetInputSlot(1));
371 
372  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
373 }
374 
375 bool Converter::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
376 {
377  VLOG(DRIVER) << "Converter::ConvertBatchToSpaceNd()";
378  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
379  if (!input.IsValid())
380  {
381  return Fail("%s: Operation has invalid inputs", __func__);
382  }
383 
384  const Operand* output = GetOutputOperand(operation, 0, model);
385  if (!output)
386  {
387  return Fail("%s: Could not read output 0", __func__);
388  }
389 
390  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
391 
392  const Operand* blockOperand = GetInputOperand(operation, 1, model);
393  if (!blockOperand)
394  {
395  return Fail("%s: Could not read input 1", __func__);
396  }
397 
398  // Convert the block operand to int32
399  std::vector<int32_t> block;
400  if (!GetTensorInt32Values(*blockOperand, block, model, data))
401  {
402  return Fail("%s: Input 1 has invalid values", __func__);
403  }
404 
405  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
406 
407  unsigned int rank = inputInfo.GetNumDimensions();
408  if (rank != 4)
409  {
410  Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
411  }
412 
413  if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
414  {
415  return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
416  " greater than or equal to 1", __func__);
417  }
418 
419  armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
420  batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
421  batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
422 
423  if (Is12OrLaterOperand(*output))
424  {
425  batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
426  }
427  // Setting crops to 0,0 0,0 as it is not supported in Android NN API
428  batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
429 
430  bool isSupported = false;
431  armnn::BackendId setBackend;
432  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
433  {
435  IsBatchToSpaceNdSupported,
436  data.m_Backends,
437  isSupported,
438  setBackend,
439  inputInfo,
440  outputInfo,
441  batchToSpaceNdDesc);
442  };
443 
444  if(!IsDynamicTensor(outputInfo))
445  {
446  validateFunc(outputInfo, isSupported);
447  }
448  else
449  {
450  isSupported = AreDynamicTensorsSupported();
451  }
452 
453 
454  if (!isSupported)
455  {
456  return false;
457  }
458 
459  armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
460  layer->SetBackendId(setBackend);
461  assert(layer != nullptr);
462  input.Connect(layer->GetInputSlot(0));
463 
464  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
465 }
466 
467 bool Converter::ConvertCast(const Operation& operation, const Model& model, ConversionData& data)
468 {
469  VLOG(DRIVER) << "Converter::ConvertCast()";
470 
471  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
472 
473  if (!input.IsValid())
474  {
475  return Fail("%s: Operation has invalid inputs", __func__);
476  }
477 
478  const Operand* output = GetOutputOperand(operation, 0, model);
479  if (!output)
480  {
481  return Fail("%s: Could not read output 0", __func__);
482  }
483 
484  const TensorInfo& inputInfo = input.GetTensorInfo();
485  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
486 
487  bool isSupported = false;
488  armnn::BackendId setBackend;
489  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
490  {
492  IsCastSupported,
493  data.m_Backends,
494  isSupported,
495  setBackend,
496  inputInfo,
497  outputInfo);
498  };
499 
500  if(!IsDynamicTensor(outputInfo))
501  {
502  validateFunc(outputInfo, isSupported);
503  }
504  else
505  {
506  isSupported = AreDynamicTensorsSupported();
507  }
508 
509  if (!isSupported)
510  {
511  return false;
512  }
513 
514  IConnectableLayer* layer = data.m_Network->AddCastLayer();
515  layer->SetBackendId(setBackend);
516  assert(layer != nullptr);
517  input.Connect(layer->GetInputSlot(0));
518 
519  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
520 }
521 
522 bool Converter::ConvertComparison(const Operation& operation,
523  const Model& model,
524  ConversionData& data,
525  ComparisonOperation comparisonOperation)
526 {
527  VLOG(DRIVER) << "Converter::ConvertComparison()";
528  VLOG(DRIVER) << "comparisonOperation = " << GetComparisonOperationAsCString(comparisonOperation);
529 
530  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
531  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
532 
533  if (!(input0.IsValid() && input1.IsValid()))
534  {
535  return Fail("%s: Operation has invalid inputs", __func__);
536  }
537 
538  const Operand* output = GetOutputOperand(operation, 0, model);
539  if (!output)
540  {
541  return Fail("%s: Could not read output 0", __func__);
542  }
543 
544  const TensorInfo& inputInfo0 = input0.GetTensorInfo();
545  const TensorInfo& inputInfo1 = input1.GetTensorInfo();
546  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
547 
548  ComparisonDescriptor descriptor(comparisonOperation);
549 
550  bool isSupported = false;
551  armnn::BackendId setBackend;
552  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
553  {
555  IsComparisonSupported,
556  data.m_Backends,
557  isSupported,
558  setBackend,
559  inputInfo0,
560  inputInfo1,
561  outputInfo,
562  descriptor);
563  };
564 
565  if(!IsDynamicTensor(outputInfo))
566  {
567  validateFunc(outputInfo, isSupported);
568  }
569  else
570  {
571  isSupported = AreDynamicTensorsSupported();
572  }
573 
574  if (!isSupported)
575  {
576  return false;
577  }
578 
579  IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
580  layer->SetBackendId(setBackend);
581  assert(layer != nullptr);
582 
583  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
584  if (!isReshapeSupported)
585  {
586  return false;
587  }
588 
589  if(IsDynamicTensor(outputInfo))
590  {
591  input0.Connect(layer->GetInputSlot(0));
592  input1.Connect(layer->GetInputSlot(1));
593  }
594 
595  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
596 }
597 
598 
599 bool Converter::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
600 {
601  VLOG(DRIVER) << "Converter::ConvertConcatenation()";
602 
603  // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
604  if (operation.inputs.size() <= 1)
605  {
606  return Fail("%s: Operation has insufficient arguments", __func__);
607  }
608 
609  // Get inputs and outputs
610  const std::size_t numInputTensors = operation.inputs.size() - 1;
611 
612  int32_t concatDim;
613  if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
614  {
615  return Fail("%s: Operation has invalid inputs", __func__);
616  }
617 
618  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
619  if (!outputOperand)
620  {
621  return Fail("%s: Operation has no outputs", __func__);
622  }
623 
624  armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
625  armnn::TensorShape outputShape = outputInfo.GetShape();
626  const bool isDynamicTensor = IsDynamicTensor(outputInfo);
627  //
628  // handle negative concat dims along the lines of tensorflow as described here:
629  // https://www.tensorflow.org/api_docs/python/tf/concat
630  // "negative axis refers to axis + rank(values)-th dimension"
631  //
632  if (concatDim < 0)
633  {
634  concatDim += outputShape.GetNumDimensions();
635  }
636 
637  if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
638  {
639  return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
640  }
641 
642  std::vector<LayerInputHandle> inputHandles;
643  std::vector<armnn::TensorShape> inputShapes;
644 
645  inputHandles.reserve(numInputTensors);
646  inputShapes.reserve(numInputTensors);
647 
648  bool inputsHaveBeenReshaped = false;
649  unsigned int tensorDimensionsAdded = 0;
650  for (uint32_t i = 0; i < numInputTensors; ++i)
651  {
652  const Operand* operand = GetInputOperand(operation, i, model);
653  if (!operand)
654  {
655  return Fail("%s: Operation has invalid inputs", __func__);
656  }
657 
658  LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
659  if (!operandInputHandle.IsValid())
660  {
661  return Fail("%s: Operation has invalid inputs", __func__);
662  }
663 
664  armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
665  if (operandShape.GetNumDimensions() == 0)
666  {
667  return Fail("%s: Operands with rank 0 are not supported", __func__);
668  }
669 
670  if (RequiresReshape(operandShape))
671  {
672  inputsHaveBeenReshaped = true;
673 
674  armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
675 
676  // Expand the tensor to three dimensions
677  if (operandShape.GetNumDimensions() == 2)
678  {
679  reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
680  tensorDimensionsAdded = 1;
681  }
682  else
683  {
684  reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
685  tensorDimensionsAdded = 2;
686  }
687 
688  armnn::ReshapeDescriptor reshapeDescriptor;
689  reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
690 
691  bool isSupported = false;
692  armnn::BackendId setBackendReshape;
694  IsReshapeSupported,
695  data.m_Backends,
696  isSupported,
697  setBackendReshape,
698  operandInputHandle.GetTensorInfo(),
699  reshapeInfo,
700  reshapeDescriptor);
701 
702  if (!isSupported)
703  {
704  return false;
705  }
706  armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
707  newReshape.SetBackendId(setBackendReshape);
708 
709  // Point to the reshape operation rather then the input operation
710  operandShape = reshapeInfo.GetShape();
711  operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
712  }
713 
714  inputShapes.emplace_back(operandShape);
715  inputHandles.emplace_back(operandInputHandle);
716 
717  if (!inputHandles.back().IsValid())
718  {
719  return Fail("%s: Operation has invalid inputs", __func__);
720  }
721  }
722 
723  if (inputShapes.size() != inputHandles.size())
724  {
725  return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
726  inputShapes.size(), inputHandles.size());
727  }
728 
729  if (inputsHaveBeenReshaped)
730  {
731  // Adjust the concatenation dimension by the amount of dimensions added (if any)
732  concatDim += tensorDimensionsAdded;
733 
734  // Add extra dimensions to the output shape to reflect the addition of the reshape layers
735  if (tensorDimensionsAdded == 1)
736  {
737  if (IsDynamicTensor(outputInfo))
738  {
739  outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
740  }
741  else
742  {
743  outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
744  }
745  }
746  else if (tensorDimensionsAdded == 2)
747  {
748  if (IsDynamicTensor(outputInfo))
749  {
750  outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
751  }
752  else
753  {
754  outputShape = armnn::TensorShape({1, 1, outputShape[0]});
755  }
756  }
757  }
758 
759  // Check if permutations is required and get the pair of permutations required for the concatenation.
760  // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
761  std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
762  std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
763  bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
764  concatDim,
765  permutationPair);
766 
767  // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
768  if (!isDynamicTensor)
769  {
770  if (needPermute)
771  {
772  outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
773  }
774 
775  outputInfo.SetShape(outputShape);
776  }
777  // this is no-op for identity swizzles, otherwise it replaces both
778  // the handles and shapes with the swizzled layer output handles and shapes
779  if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
780  {
781  return false;
782  }
783 
784  // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
785  armnn::OriginsDescriptor concatDescriptor;
786 
787  try
788  {
789  // The concat descriptor is always created across the only supported concat dimension
790  // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
791  concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
792  inputShapes.end(),
793  concatDim);
794  } catch (std::exception& error)
795  {
796  return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
797  }
798 
799  // Validate the output shape is correct given the input shapes based on the
800  // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
801  if (!isDynamicTensor)
802  {
803  if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
804  {
805  return Fail("%s: Error validating the output shape for concat", __func__);
806  }
807  }
808 
809  std::vector<const armnn::TensorInfo*> inputTensorInfos;
810  std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
811  [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
812 
813  bool isSupported = false;
814  armnn::BackendId setBackendConcat;
815  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
817  IsConcatSupported,
818  data.m_Backends,
819  isSupported,
820  setBackendConcat,
821  inputTensorInfos,
822  outputInfo,
823  concatDescriptor);
824  };
825 
826  if (!isDynamicTensor)
827  {
828  validateFunc(outputInfo, isSupported);
829  }
830  else
831  {
832  isSupported = AreDynamicTensorsSupported();
833  }
834 
835  if (!isSupported)
836  {
837  return false;
838  }
839 
840  armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
841  layer->SetBackendId(setBackendConcat);
842  assert(layer != nullptr);
843  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
844  // Connect inputs to the layer
845  const int numInputSlots = layer->GetNumInputSlots();
846  assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
847  for (int i = 0; i < numInputSlots; ++i)
848  {
849  // connect the input directly to the merge (concat) layer
850  inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
851  }
852 
853  // Transpose the output shape
854  auto transposeOutputShape = [&](){
855  armnn::TransposeDescriptor transposeDesc;
856  transposeDesc.m_DimMappings = permutationPair.second;
857  armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
858  armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
859  permutationPair.second);
860  isSupported = false;
861  armnn::BackendId setBackendTranspose;
863  IsTransposeSupported,
864  data.m_Backends,
865  isSupported,
866  setBackendTranspose,
867  inputTransposeInfo,
868  outputTransposeInfo,
869  transposeDesc);
870  if (!isSupported)
871  {
872  return false;
873  }
874  // Add permutation layer and connect the output to it, the permutation becomes the output layer
875  armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
876  permutationPair.second);
877  deswizzleLayer.SetBackendId(setBackendTranspose);
878  layer = &deswizzleLayer;
879 
880  return true;
881  };
882 
883  if (needPermute && !isDynamicTensor)
884  {
885  transposeOutputShape();
886  }
887 
888  if (inputsHaveBeenReshaped)
889  {
890  if (isDynamicTensor)
891  {
892  // Infer the output shapes of concat if outputs are type 1 dynamic
894  if (!ValidateConcatOutputShape(inputShapes,
895  layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
896  concatDim))
897  {
898  return Fail("%s: Error validating the output shape for concat", __func__);
899  }
900  transposeOutputShape();
901  }
902 
903  armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
904  // Undo the reshape knowing the amount of dimensions added
905  if (tensorDimensionsAdded == 1)
906  {
907  afterConcatInfo.SetShape(
908  armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
909  }
910  else if (tensorDimensionsAdded == 2)
911  {
912  afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
913  }
914 
915  armnn::ReshapeDescriptor reshapeDescriptor;
916  reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
917  armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
918 
919  isSupported = false;
920  armnn::BackendId setBackendReshape2;
921  auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
923  IsReshapeSupported,
924  data.m_Backends,
925  isSupported,
926  setBackendReshape2,
927  concatInfo,
928  afterConcatInfo,
929  reshapeDescriptor);
930  };
931 
932  if (!IsDynamicTensor(afterConcatInfo))
933  {
934  validateReshapeFunc(afterConcatInfo, isSupported);
935  }
936  else
937  {
938  isSupported = AreDynamicTensorsSupported();
939  }
940 
941  if (!isSupported)
942  {
943  return false;
944  }
945  layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
946  layer->SetBackendId(setBackendReshape2);
947  return SetupAndTrackLayerOutputSlot(operation,
948  0,
949  *layer,
950  model,
951  data,
952  nullptr,
953  validateReshapeFunc);
954  }
955 
956  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
957 }
958 
959 bool Converter::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
960 {
961  VLOG(DRIVER) << "Converter::ConvertConv2d()";
962 
963  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
964  if (!input.IsValid())
965  {
966  return Fail("%s: Operation has invalid inputs", __func__);
967  }
968 
969  const Operand* output = GetOutputOperand(operation, 0, model);
970  if (!output)
971  {
972  return Fail("%s: Could not read output 0", __func__);
973  }
974 
975  const TensorInfo& inputInfo = input.GetTensorInfo();
976  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
977 
979  desc.m_DataLayout = DataLayout::NHWC;
980 
981  // Determine whether padding is implicit or explicit
982  bool implicitPadding = operation.inputs.size() == 7
983  || (operation.inputs.size() >= 8
984  && GetInputOperand(operation, 7, model)->type == OperandType::BOOL);
985 
986  if (implicitPadding)
987  {
988  desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
989  }
990  else if (operation.inputs.size() >= 10)
991  {
992  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
993  }
994 
995  const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
996 
997  // ArmNN does not currently support non-fixed weights or bias
998  // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
999  // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
1000  // the DataLayout is NCHW
1001 
1002  if (!IsWeightsValid(operation, 1, model) && desc.m_DataLayout == DataLayout::NCHW)
1003  {
1004  return Fail("%s: Operation has unsupported weights OperandLifeTime", __func__);
1005  }
1006 
1007  LayerInputHandle weightsInput = (desc.m_DataLayout == DataLayout::NCHW)
1008  ? ConvertToLayerInputHandle(operation, 1, model, data, OHWIToOIHW, &input)
1009  : ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
1010 
1011  if (!weightsInput.IsValid())
1012  {
1013  return Fail("%s: Operation has invalid inputs", __func__);
1014  }
1015 
1016  LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
1017  if (!biasInput.IsValid())
1018  {
1019  return Fail("%s: Operation has invalid inputs", __func__);
1020  }
1021 
1022  biasInput.SanitizeQuantizationScale(weightsInput, input);
1023  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1024  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1025 
1026  ActivationFn activation;
1027  if (implicitPadding)
1028  {
1029  ::android::nn::PaddingScheme paddingScheme;
1030  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1031  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1032  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1033  || !GetInputActivationFunction(operation, 6, activation, model, data)
1034  || !GetOptionalConvolutionDilationParams(operation, 8, desc, model, data))
1035  {
1036  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1037  }
1038 
1039  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1040  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1041  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1042  const uint32_t kernelX = weightsInfo.GetShape()[widthIndex];
1043  const uint32_t kernelY = weightsInfo.GetShape()[heightIndex];
1044  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1045  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1046 
1047  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1048  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1049 
1050  }
1051  else if (operation.inputs.size() >= 10)
1052  {
1053  // explicit padding
1054  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1055  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1056  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1057  || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1058  || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1059  || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1060  || !GetInputActivationFunction(operation, 9, activation, model, data)
1061  || !GetOptionalConvolutionDilationParams(operation, 11, desc, model, data))
1062  {
1063  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1064  }
1065  }
1066  else
1067  {
1068  return Fail("%s: Unsupported number of operation inputs", __func__);
1069  }
1070 
1071  desc.m_BiasEnabled = true;
1072  Optional<TensorInfo> biases(biasInfo);
1073 
1074  bool requiresValidation = true;
1075  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1076  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1077  if (IsConnectedToDequantize(weightsInput.GetOutputSlot())
1078  || IsConnectedToDequantize(biasInput.GetOutputSlot()))
1079  {
1080  // Do not require validation for now. There will be an optimization step
1081  // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1082  // then at the end of the optimization there will be layer supported validation.
1083  requiresValidation = false;
1084  VLOG(DRIVER) << "Converter::ConvertConv2d(): Weights and Biases are as INPUTS.";
1085  }
1086 
1087  armnn::BackendId setBackend;
1088  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
1089  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1090  IsConvolution2dSupported,
1091  data.m_Backends,
1092  isSupported,
1093  setBackend,
1094  inputInfo,
1095  outputInfo,
1096  desc,
1097  weightsInfo,
1098  biases);
1099  };
1100 
1101  if (requiresValidation)
1102  {
1103  VLOG(DRIVER) << "Converter::ConvertConv2d(): Requires Validation!";
1104  bool isSupported = false;
1105  if (!IsDynamicTensor(outputInfo))
1106  {
1107  validateFunc(outputInfo, isSupported);
1108  }
1109  else
1110  {
1111  isSupported = AreDynamicTensorsSupported();
1112  }
1113 
1114  if (!isSupported)
1115  {
1116  return false;
1117  }
1118  }
1119 
1120  armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
1121  startLayer->SetBackendId(setBackend);
1122 
1123  if (!startLayer)
1124  {
1125  return Fail("%s: AddConvolution2dLayer failed", __func__);
1126  }
1127 
1128  input.Connect(startLayer->GetInputSlot(0));
1129  weightsInput.Connect(startLayer->GetInputSlot(1));
1130  biasInput.Connect(startLayer->GetInputSlot(2));
1131 
1132  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1133 }
1134 
1135 bool Converter::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
1136 {
1137  VLOG(DRIVER) << "Converter::ConvertDepthToSpace()";
1138 
1139  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1140  if (!input.IsValid() )
1141  {
1142  return Fail("%s: Operation has invalid inputs", __func__);
1143  }
1144 
1145  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1146  unsigned int rank = inputInfo.GetNumDimensions();
1147  if (rank != 4)
1148  {
1149  return Fail("%s: Only inputs with rank 4 are supported", __func__);
1150  }
1151 
1152  const Operand* output = GetOutputOperand(operation, 0, model);
1153  if (!output)
1154  {
1155  return Fail("%s: Could not read output 0", __func__);
1156  }
1157 
1158  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1159 
1160  armnn::DepthToSpaceDescriptor descriptor;
1161 
1162  GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_BlockSize, model, data);
1163  if (descriptor.m_BlockSize <= 1)
1164  {
1165  return Fail("%s: Block size must be at least 1 in all dimensions", __func__);
1166  }
1167 
1169  if (Is12OrLaterOperand(*output))
1170  {
1171  descriptor.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
1172  }
1173 
1174  bool isSupported = false;
1175  armnn::BackendId setBackend;
1176  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1177  {
1178  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1179  IsDepthToSpaceSupported,
1180  data.m_Backends,
1181  isSupported,
1182  setBackend,
1183  inputInfo,
1184  outputInfo,
1185  descriptor);
1186  };
1187 
1188  if(!IsDynamicTensor(outputInfo))
1189  {
1190  validateFunc(outputInfo, isSupported);
1191  }
1192  else
1193  {
1194  isSupported = AreDynamicTensorsSupported();
1195  }
1196 
1197  if (!isSupported)
1198  {
1199  return false;
1200  }
1201 
1202  armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
1203  layer->SetBackendId(setBackend);
1204  assert(layer != nullptr);
1205  input.Connect(layer->GetInputSlot(0));
1206 
1207  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1208 }
1209 
1210 bool Converter::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
1211 {
1212  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d()";
1213 
1214  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1215 
1216  if (!input.IsValid())
1217  {
1218  return Fail("%s: Operation has invalid inputs", __func__);
1219  }
1220 
1221  const Operand* output = GetOutputOperand(operation, 0, model);
1222 
1223  if (!output)
1224  {
1225  return Fail("%s: Could not read output 0", __func__);
1226  }
1227 
1228  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1229  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1230 
1231  // ArmNN does not currently support non-fixed weights or bias
1232  // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1233  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1234 
1235  if (!weightsOperand)
1236  {
1237  return Fail("%s: Could not read weights", __func__);
1238  }
1239  // Basic sanity check on the weights shape.
1240  // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
1241  // [1, filter_height, filter_width, depth_out]
1242  if (weightsOperand->dimensions[0] != 1)
1243  {
1244  return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
1245  }
1246 
1249 
1250  // Determine whether padding is implicit or explicit
1251  bool implicitPadding = operation.inputs.size() == 8
1252  || (operation.inputs.size() >= 9
1253  && GetInputOperand(operation, 8, model)->type == OperandType::BOOL);
1254 
1255  // Look ahead to find the optional DataLayout, if present
1256  const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
1257  desc.m_DataLayout = OptionalDataLayout(operation, dataLayoutFlagIndex, model, data);
1258 
1259  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1260  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1261  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1262 
1263  LayerInputHandle weightsInput = ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
1264  if (!weightsInput.IsValid())
1265  {
1266  return Fail("%s: Operation has invalid inputs", __func__);
1267  }
1268 
1269  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1270  if (!biasOperand)
1271  {
1272  return Fail("%s: Could not read bias", __func__);
1273  }
1274 
1275  LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
1276  if (!biasInput.IsValid())
1277  {
1278  return Fail("%s: Operation has invalid inputs", __func__);
1279  }
1280 
1281  biasInput.SanitizeQuantizationScale(weightsInput, input);
1282  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1283  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1284 
1285  ActivationFn activation;
1286  if (implicitPadding)
1287  {
1288  ::android::nn::PaddingScheme paddingScheme;
1289  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1290  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1291  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1292  || !GetInputActivationFunction(operation, 7, activation, model, data)
1293  || !GetOptionalConvolutionDilationParams(operation, 9, desc, model, data))
1294  {
1295  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1296  }
1297 
1298  const uint32_t kernelX = weightsInfo.GetShape()[2];
1299  const uint32_t kernelY = weightsInfo.GetShape()[1];
1300  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1301  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1302 
1303  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1304  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1305  }
1306  else if (operation.inputs.size() >= 11)
1307  {
1308  // explicit padding
1309  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1310  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1311  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1312  || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1313  || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1314  || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1315  || !GetInputActivationFunction(operation, 10, activation, model, data)
1316  || !GetOptionalConvolutionDilationParams(operation, 12, desc, model, data))
1317  {
1318  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1319  }
1320  }
1321  else
1322  {
1323  return Fail("%s: Unsupported number of operation inputs", __func__);
1324  }
1325 
1326  desc.m_BiasEnabled = true;
1327  Optional<TensorInfo> biases(biasInfo);
1328 
1329  bool requiresValidation = true;
1330  if (IsConnectedToDequantize(weightsInput.GetOutputSlot()) || IsConnectedToDequantize(biasInput.GetOutputSlot()))
1331  {
1332  // Do not require validation for now. There will be an optimization step
1333  // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1334  // then at the end of the optimization there will be layer supported validation.
1335  requiresValidation = false;
1336  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Weights and Biases are as INPUTS.";
1337  }
1338 
1339  armnn::BackendId setBackend;
1340  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
1341  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1342  IsDepthwiseConvolutionSupported,
1343  data.m_Backends,
1344  isSupported,
1345  setBackend,
1346  inputInfo,
1347  outputInfo,
1348  desc,
1349  weightsInfo,
1350  biases);
1351  };
1352 
1353  if (requiresValidation)
1354  {
1355  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Requires Validation!";
1356  bool isSupported = false;
1357  if (!IsDynamicTensor(outputInfo))
1358  {
1359  validateFunc(outputInfo, isSupported);
1360  }
1361  else
1362  {
1363  isSupported = AreDynamicTensorsSupported();
1364  }
1365 
1366  if (!isSupported)
1367  {
1368  return false;
1369  }
1370  }
1371 
1372  armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
1373  startLayer->SetBackendId(setBackend);
1374 
1375  if (!startLayer)
1376  {
1377  return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1378  }
1379 
1380  input.Connect(startLayer->GetInputSlot(0));
1381 
1382  // Connect weights and bias inputs
1383  weightsInput.Connect(startLayer->GetInputSlot(1));
1384  biasInput.Connect(startLayer->GetInputSlot(2));
1385 
1386  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1387 }
1388 
1389 bool Converter::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
1390 {
1391  VLOG(DRIVER) << "Converter::ConvertDequantize()";
1392 
1393  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1394  if (!input.IsValid())
1395  {
1396  return Fail("%s: Operation has invalid input", __func__);
1397  }
1398 
1399  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1400  const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
1401  if (quantizationDim.has_value() && quantizationDim.value() != 0)
1402  {
1403  return Fail("%s: Operation has quantization dimension different than 0", __func__);
1404  }
1405 
1406  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1407  if (!outputOperand)
1408  {
1409  return Fail("%s: Operation has invalid outputs", __func__);
1410  }
1411 
1412  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1413 
1414  bool isSupported = false;
1415  armnn::BackendId setBackend;
1416  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1417  {
1418  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1419  IsDequantizeSupported,
1420  data.m_Backends,
1421  isSupported,
1422  setBackend,
1423  inputInfo,
1424  outputInfo);
1425  };
1426 
1427  if(IsDynamicTensor(outputInfo))
1428  {
1429  isSupported = AreDynamicTensorsSupported();
1430  }
1431  else
1432  {
1433  validateFunc(outputInfo, isSupported);
1434  }
1435 
1436  if (!isSupported)
1437  {
1438  return false;
1439  }
1440 
1441  armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
1442  layer->SetBackendId(setBackend);
1443  assert(layer != nullptr);
1444  input.Connect(layer->GetInputSlot(0));
1445 
1446  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1447 }
1448 
1449 bool Converter::ConvertElementwiseBinary(const Operation& operation,
1450  const Model& model,
1451  ConversionData& data,
1452  armnn::BinaryOperation binaryOperation)
1453 {
1454  VLOG(DRIVER) << "Converter::ConvertElementwiseBinary()";
1455  VLOG(DRIVER) << "binaryOperation = " << GetBinaryOperationAsCString(binaryOperation);
1456 
1457  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1458  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1459 
1460  if (!input0.IsValid() || !input1.IsValid())
1461  {
1462  return Fail("%s: Operation has invalid inputs", __func__);
1463  }
1464 
1465  // The FuseActivation parameter is always the input index 2, and it should be optional
1466  ActivationFn activationFunction;
1467  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1468  {
1469  return Fail("%s: Operation has invalid optional input: activation function", __func__);
1470  }
1471 
1472  const Operand* output = GetOutputOperand(operation, 0, model);
1473  if (!output)
1474  {
1475  return Fail("%s: Could not read output", __func__);
1476  }
1477 
1478  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1479 
1480  armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
1481 
1482  bool isSupported = false;
1483  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1484  {
1485  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1486  IsElementwiseBinarySupported,
1487  data.m_Backends,
1488  isSupported,
1489  armnn::BackendId(),
1490  input0.GetTensorInfo(),
1491  input1.GetTensorInfo(),
1492  outputInfo,
1493  binaryOperation);
1494  };
1495 
1496  if (!IsDynamicTensor(outputInfo))
1497  {
1498  validateFunc(outputInfo, isSupported);
1499  }
1500  else
1501  {
1502  isSupported = AreDynamicTensorsSupported();
1503  }
1504 
1505  if (!isSupported)
1506  {
1507  return false;
1508  }
1509 
1510  armnn::IConnectableLayer* layer = data.m_Network->AddElementwiseBinaryLayer(descriptor);
1511  if (!layer)
1512  {
1513  return Fail("%s: Could not add the ElementwiseBinaryLayer", __func__);
1514  }
1515  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
1516  if (!isReshapeSupported)
1517  {
1518  return false;
1519  }
1520 
1521  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model,
1522  data, nullptr, validateFunc, activationFunction);
1523 }
1524 
1525 bool Converter::ConvertElementwiseUnary(const Operation& operation,
1526  const Model& model,
1527  ConversionData& data,
1528  UnaryOperation unaryOperation)
1529 {
1530  VLOG(DRIVER) << "Converter::ConvertElementwiseUnary()";
1531  VLOG(DRIVER) << "unaryOperation = " << GetUnaryOperationAsCString(unaryOperation);
1532 
1533  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1534 
1535  if (!input.IsValid())
1536  {
1537  return Fail("%s: Operation has invalid input", __func__);
1538  }
1539 
1540  const Operand* output = GetOutputOperand(operation, 0, model);
1541  if (!output)
1542  {
1543  return Fail("%s: Could not read output 0", __func__);
1544  }
1545 
1546  const TensorInfo& inputInfo = input.GetTensorInfo();
1547  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1548 
1549  ElementwiseUnaryDescriptor descriptor(unaryOperation);
1550 
1551  bool isSupported = false;
1552  armnn::BackendId setBackend;
1553  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1554  {
1555  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1556  IsElementwiseUnarySupported,
1557  data.m_Backends,
1558  isSupported,
1559  setBackend,
1560  inputInfo,
1561  outputInfo,
1562  descriptor);
1563  };
1564 
1565  if(!IsDynamicTensor(outputInfo))
1566  {
1567  validateFunc(outputInfo, isSupported);
1568  }
1569  else
1570  {
1571  isSupported = AreDynamicTensorsSupported();
1572  }
1573 
1574  if (!isSupported)
1575  {
1576  return false;
1577  }
1578 
1579  IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
1580  layer->SetBackendId(setBackend);
1581  assert(layer != nullptr);
1582  input.Connect(layer->GetInputSlot(0));
1583 
1584  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1585 }
1586 
1587 bool Converter::ConvertElu(const Operation& operation, const Model& model, ConversionData& data)
1588 {
1589  VLOG(DRIVER) << "Converter::ConvertElu()";
1590 
1591  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1592  if (!input0.IsValid())
1593  {
1594  return Fail("%s: Operation has invalid inputs", __func__);
1595  }
1596 
1597  // Determine data type of input tensor
1598  OperandType inputType;
1599  if (!GetOperandType(operation, 0, model, inputType))
1600  {
1601  return Fail("%s: Operation has invalid inputs", __func__);
1602  }
1603 
1604  ActivationDescriptor desc;
1605  desc.m_Function = ActivationFunction::Elu;
1606 
1607  // Read alpha
1608  if (inputType == OperandType::TENSOR_FLOAT16)
1609  {
1610  Half alpha;
1611 
1612  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, alpha, model, data))
1613  {
1614  return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
1615  }
1616 
1617  desc.m_A = static_cast<float>(alpha);
1618  }
1619  else if (inputType == OperandType::TENSOR_FLOAT32)
1620  {
1621  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_A, model, data))
1622  {
1623  return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
1624  }
1625  }
1626  else
1627  {
1628  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
1629  }
1630 
1631  return ::ConvertToActivation(operation, __func__, desc, model, data);
1632 }
1633 
1634 bool Converter::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
1635 {
1636  VLOG(DRIVER) << "Converter::ConvertExpandDims()";
1637 
1638  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1639 
1640  if (!input.IsValid())
1641  {
1642  return Fail("%s: Operation has invalid input", __func__);
1643  }
1644 
1645  const Operand* output = GetOutputOperand(operation, 0, model);
1646  if (!output)
1647  {
1648  return Fail("%s: Operation has invalid output", __func__);
1649  }
1650 
1651  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1652 
1653  int32_t axis;
1654  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
1655  {
1656  return Fail("%s: failed to get axis input value", __func__);
1657  }
1658 
1659  TensorShape targetShape;
1660 
1661  try
1662  {
1663  targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
1664  }
1665  catch (const std::exception& e)
1666  {
1667  return Fail("%s: %s", __func__, e.what());
1668  }
1669 
1670  ReshapeDescriptor reshapeDescriptor;
1671  reshapeDescriptor.m_TargetShape = targetShape;
1672 
1673  bool isSupported = false;
1674  armnn::BackendId setBackend;
1675  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1676  {
1677  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1678  IsReshapeSupported,
1679  data.m_Backends,
1680  isSupported,
1681  setBackend,
1682  input.GetTensorInfo(),
1683  outputInfo,
1684  reshapeDescriptor);
1685  };
1686 
1687  if(!IsDynamicTensor(outputInfo))
1688  {
1689  if (targetShape != outputInfo.GetShape())
1690  {
1691  return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
1692  }
1693  validateFunc(outputInfo, isSupported);
1694  }
1695  else
1696  {
1697  isSupported = AreDynamicTensorsSupported();
1698  }
1699 
1700  if (!isSupported)
1701  {
1702  return false;
1703  }
1704 
1705  IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1706  layer->SetBackendId(setBackend);
1707  assert(layer != nullptr);
1708  input.Connect(layer->GetInputSlot(0));
1709 
1710  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1711 }
1712 
1713 bool Converter::ConvertFill(const Operation& operation, const Model& model, ConversionData& data)
1714 {
1715  VLOG(DRIVER) << "Converter::ConvertFill()";
1716  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1717  if (!input.IsValid())
1718  {
1719  return Fail("%s: Operation has invalid inputs", __func__);
1720  }
1721 
1722  const Operand* output = GetOutputOperand(operation, 0, model);
1723  if (!output)
1724  {
1725  return Fail("%s: Could not read output", __func__);
1726  }
1727 
1728  const TensorInfo& inputInfo = input.GetTensorInfo();
1729  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1730  if (IsDynamicTensor(outputInfo))
1731  {
1732  return Fail("%s: Dynamic output tensors are not supported", __func__);
1733  }
1734 
1735  // Determine data type of output tensor
1736  OperandType outputType = output->type;
1737  FillDescriptor descriptor;
1738  // Read the scalar fill value
1739  if (outputType == OperandType::TENSOR_FLOAT16)
1740  {
1741  Half value;
1742 
1743  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
1744  {
1745  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1746  }
1747 
1748  descriptor.m_Value = static_cast<float>(value);
1749  }
1750  else if (outputType == OperandType::TENSOR_FLOAT32)
1751  {
1752  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Value, model, data))
1753  {
1754  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1755  }
1756  }
1757  else if (outputType == OperandType::TENSOR_INT32)
1758  {
1759  int32_t value;
1760 
1761  if (!GetInputScalar(operation, 1, OperandType::INT32, value, model, data))
1762  {
1763  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1764  }
1765 
1766  descriptor.m_Value = static_cast<float>(value);
1767  }
1768  else
1769  {
1770  return Fail("%s: Unsupported input tensor type: %d", __func__, outputType);
1771  }
1772 
1773  bool isSupported = false;
1774  armnn::BackendId setBackend;
1775  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1776  IsFillSupported,
1777  data.m_Backends,
1778  isSupported,
1779  setBackend,
1780  inputInfo,
1781  outputInfo,
1782  descriptor);
1783  if (!isSupported)
1784  {
1785  return false;
1786  }
1787 
1788  IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
1789  layer->SetBackendId(setBackend);
1790  assert(layer != nullptr);
1791  input.Connect(layer->GetInputSlot(0));
1792 
1793  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1794 }
1795 
1796 bool Converter::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
1797 {
1798  VLOG(DRIVER) << "Converter::ConvertFloor()";
1799  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1800  if (!input.IsValid())
1801  {
1802  return Fail("%s: Operation has invalid inputs", __func__);
1803  }
1804 
1805  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1806  if (!outputOperand)
1807  {
1808  return Fail("%s: Operation has invalid outputs", __func__);
1809  }
1810 
1811  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1812 
1813  bool isSupported = false;
1814  armnn::BackendId setBackend;
1815  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1816  {
1817  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1818  IsFloorSupported,
1819  data.m_Backends,
1820  isSupported,
1821  setBackend,
1822  input.GetTensorInfo(),
1823  outputInfo);
1824  };
1825 
1826  if(!IsDynamicTensor(outputInfo))
1827  {
1828  validateFunc(outputInfo, isSupported);
1829  }
1830  else
1831  {
1832  isSupported = AreDynamicTensorsSupported();
1833  }
1834 
1835  if (!isSupported)
1836  {
1837  return false;
1838  }
1839 
1840  armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
1841  layer->SetBackendId(setBackend);
1842  assert(layer != nullptr);
1843  input.Connect(layer->GetInputSlot(0));
1844 
1845  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1846 }
1847 
1848 bool Converter::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
1849 {
1850  VLOG(DRIVER) << "Converter::ConvertFullyConnected()";
1851  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1852  if (!input.IsValid())
1853  {
1854  return Fail("%s: Operation has invalid inputs", __func__);
1855  }
1856 
1857  const Operand* output = GetOutputOperand(operation, 0, model);
1858  if (!output)
1859  {
1860  return Fail("%s: Could not read output 0", __func__);
1861  }
1862 
1863  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1864  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1865 
1866  LayerInputHandle weightsInput = LayerInputHandle();
1867  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1868  if (!weightsOperand)
1869  {
1870  return Fail("%s: Could not read weights", __func__);
1871  }
1872 
1873  // If weights are constant a separate constant layer will be created to store data.
1874  // Otherwise handle non const weights as inputs.
1875  weightsInput = ConvertToLayerInputHandle(operation, 1, model, data);
1876  if (!weightsInput.IsValid())
1877  {
1878  return Fail("%s: Operation has invalid inputs", __func__);
1879  }
1880 
1881  LayerInputHandle biasInput = LayerInputHandle();
1882  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1883  if (!biasOperand)
1884  {
1885  return Fail("%s: Could not read bias", __func__);
1886  }
1887 
1888  // If bias are constant a separate constant layer will be created to store data.
1889  // Otherwise handle non const bias as inputs.
1890  biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D
1891  if (!biasInput.IsValid())
1892  {
1893  return Fail("%s: Operation has invalid inputs", __func__);
1894  }
1895 
1896  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1897  armnn::TensorInfo reshapedInfo = inputInfo;
1898  try
1899  {
1900  reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
1901  }
1902  catch (const std::exception& e)
1903  {
1904  return Fail("%s: %s", __func__, e.what());
1905  }
1906 
1907  // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
1908  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1909  SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
1910 
1911  ActivationFn activationFunction;
1912  if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
1913  {
1914  return Fail("%s: Operation has invalid inputs", __func__);
1915  }
1916 
1918  desc.m_TransposeWeightMatrix = true;
1919  desc.m_BiasEnabled = true;
1920  desc.m_ConstantWeights = IsOperandConstant(*weightsOperand);
1921 
1922  bool isSupported = false;
1923  armnn::BackendId setBackend;
1924  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1925  {
1926  if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
1927  weightsInfo.GetShape(),
1928  outputInfo.GetShape(),
1930  {
1931  isSupported = false;
1932  Fail("%s: Expected outputShape does not match actual outputShape", __func__);
1933  return;
1934  }
1935 
1936  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1937  IsFullyConnectedSupported,
1938  data.m_Backends,
1939  isSupported,
1940  setBackend,
1941  reshapedInfo,
1942  outputInfo,
1943  weightsInfo,
1944  biasInfo,
1945  desc);
1946  };
1947 
1948  if(!IsDynamicTensor(outputInfo))
1949  {
1950  validateFunc(outputInfo, isSupported);
1951  }
1952  else
1953  {
1954  isSupported = AreDynamicTensorsSupported();
1955  }
1956 
1957  if (!isSupported)
1958  {
1959  return false;
1960  }
1961 
1962  // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
1963  armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
1964  startLayer->SetBackendId(setBackend);
1965 
1966  if (inputInfo.GetNumDimensions() > 2U)
1967  {
1968  armnn::ReshapeDescriptor reshapeDescriptor;
1969  reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
1970 
1971  armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1972  assert(reshapeLayer != nullptr);
1973  input.Connect(reshapeLayer->GetInputSlot(0));
1974  reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
1975  reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
1976  }
1977  else
1978  {
1979  input.Connect(startLayer->GetInputSlot(0));
1980  }
1981 
1982  // Connect weights and bias inputs
1983  weightsInput.Connect(startLayer->GetInputSlot(1));
1984  biasInput.Connect(startLayer->GetInputSlot(2));
1985 
1986  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
1987  data, nullptr, validateFunc, activationFunction);
1988 }
1989 
1990 bool Converter::ConvertGather(const Operation& operation, const Model& model, ConversionData& data)
1991 {
1992  VLOG(DRIVER) << "Converter::ConvertGather()";
1993 
1994  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1995  if (!input.IsValid())
1996  {
1997  return Fail("%s: Operation has invalid input", __func__);
1998  }
1999  auto inputDimensions = input.GetTensorInfo().GetNumDimensions();
2000 
2001  LayerInputHandle indices = ConvertToLayerInputHandle(operation, 2, model, data);
2002  if (!indices.IsValid())
2003  {
2004  return Fail("%s: Operation has invalid indices", __func__);
2005  }
2006  auto indicesDimensions = indices.GetTensorInfo().GetNumDimensions();
2007 
2008  const Operand* output = GetOutputOperand(operation, 0, model);
2009  if (!output)
2010  {
2011  return Fail("%s: Operation has invalid output", __func__);
2012  }
2013  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2014  auto outputDimensions = outputInfo.GetNumDimensions();
2015  if (outputDimensions != inputDimensions + indicesDimensions - 1)
2016  {
2017  return Fail("%s: Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
2018  __func__, outputDimensions, inputDimensions, indicesDimensions);
2019  }
2020 
2021  int32_t axis;
2022  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
2023  {
2024  return Fail("%s: Operation has invalid or unsupported axis operand", __func__);
2025  }
2026  int32_t inputDimensions_int = static_cast<int32_t>(inputDimensions);
2027  if ((axis < -inputDimensions_int) || (inputDimensions_int <= axis))
2028  {
2029  return Fail("%s: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", __func__, axis,
2030  inputDimensions, inputDimensions);
2031  }
2032 
2033  GatherDescriptor desc;
2034  desc.m_Axis = axis;
2035 
2036  bool isSupported = false;
2037  armnn::BackendId setBackend;
2038  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2039  {
2040  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2041  IsGatherSupported,
2042  data.m_Backends,
2043  isSupported,
2044  setBackend,
2045  input.GetTensorInfo(),
2046  indices.GetTensorInfo(),
2047  outputInfo,
2048  desc);
2049  };
2050 
2051  if(!IsDynamicTensor(outputInfo))
2052  {
2053  validateFunc(outputInfo, isSupported);
2054  }
2055  else
2056  {
2057  isSupported = AreDynamicTensorsSupported();
2058  }
2059 
2060  if (!isSupported)
2061  {
2062  return false;
2063  }
2064 
2065  IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
2066  layer->SetBackendId(setBackend);
2067  assert(layer != nullptr);
2068  input.Connect(layer->GetInputSlot(0));
2069  indices.Connect(layer->GetInputSlot(1));
2070 
2071  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2072 }
2073 
2074 bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data)
2075 {
2076  VLOG(DRIVER) << "Converter::ConvertGroupedConv2d()";
2077  //
2078  // Parse data
2079  //
2080  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2081  if (!input.IsValid())
2082  {
2083  return Fail("%s: Operation has invalid inputs", __func__);
2084  }
2085  const TensorInfo& inputInfo = input.GetTensorInfo();
2086 
2087  const Operand* output = GetOutputOperand(operation, 0, model);
2088  if (!output)
2089  {
2090  return Fail("%s: Could not read output 0", __func__);
2091  }
2092  TensorInfo outputInfo = GetTensorInfoForOperand(*output);
2093 
2094  // Look ahead to determine data layout
2095  DataLayout dataLayout = DataLayout::NHWC;
2096  if (operation.inputs.size() == 12)
2097  {
2098  dataLayout = OptionalDataLayout(operation, 11, model, data);
2099  }
2100  else
2101  {
2102  dataLayout = OptionalDataLayout(operation, 8, model, data);
2103  }
2104 
2105  // NOTE:
2106  // NNAPI weights are always OHWI, i.e. [depth_out, filter_height, filter_width, depth_group],
2107  // but Arm NN expects the filter's height and width indices to match the input's height and
2108  // width indices so when the DataLayout is NCHW, we need to permute the weights to OIHW
2109  const PermutationVector ohwiToOihw = { 0u, 2u, 3u, 1u };
2110  const ConstTensorPin weightsPin = (dataLayout == DataLayout::NCHW) ?
2112  model, data, ohwiToOihw) :
2113  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
2114  const ConstTensorPin biasesPin =
2115  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
2116  if (!weightsPin.IsValid() || !biasesPin.IsValid())
2117  {
2118  return Fail("%s: Operation has invalid inputs", __func__);
2119  }
2120 
2121  ConstTensor weights = weightsPin.GetConstTensor();
2122  ConstTensor biases = biasesPin.GetConstTensor();
2123  SanitizeBiasQuantizationScale(biases.GetInfo(), weights.GetInfo(), inputInfo);
2124 
2125  const TensorShape& inputShape = inputInfo.GetShape();
2126  const TensorShape& outputShape = outputInfo.GetShape();
2127  const TensorShape& weightsShape = weights.GetShape();
2128  const TensorShape& biasesShape = biases.GetShape();
2129 
2130  armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
2131  const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
2132  const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
2133  const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
2134 
2136  desc.m_DataLayout = dataLayout;
2137  desc.m_BiasEnabled = true;
2138 
2139  int numGroups;
2140  ActivationFn activation;
2141 
2142  if (operation.inputs.size() == 12)
2143  {
2144  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
2145  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
2146  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
2147  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
2148  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
2149  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
2150  !GetInputScalar(operation, 9, OperandType::INT32, numGroups, model, data) ||
2151  !GetInputActivationFunction(operation, 10, activation, model, data))
2152  {
2153  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
2154  }
2155 
2156  }
2157  else if (operation.inputs.size() == 9)
2158  {
2159  ::android::nn::PaddingScheme paddingScheme;
2160  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
2161  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
2162  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
2163  !GetInputScalar(operation, 6, OperandType::INT32, numGroups, model, data) ||
2164  !GetInputActivationFunction(operation, 7, activation, model, data))
2165  {
2166  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
2167  }
2168 
2169  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
2170  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
2171 
2172  const uint32_t kernelX = weightsShape[widthIndex];
2173  const uint32_t kernelY = weightsShape[heightIndex];
2174 
2175  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2176  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2177  }
2178  else
2179  {
2180  return Fail("%s: Unsupported number of operation inputs", __func__);
2181  }
2182 
2183  // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2184  const unsigned int outputChannels = weightsShape[0];
2185 
2186  const unsigned int channelsPerGroup = weightsShape[channelsIndex];
2187  const unsigned int channelMultiplier = outputChannels / numGroups;
2188 
2189  //
2190  // Validate all relevant inputs
2191  //
2192  if (numGroups <= 0)
2193  {
2194  return Fail("%s: Number of groups must be greater than 0. Got: %d", __func__, numGroups);
2195  }
2196 
2197  if (outputChannels % numGroups != 0u)
2198  {
2199  return Fail("%s: Output channels must be divisible by the number of groups", __func__);
2200  }
2201 
2202  //
2203  // Set up Splitter layer
2204  //
2205  unsigned int splitterDimSizes[4] = { inputShape[0], inputShape[1], inputShape[2], inputShape[3] };
2206  splitterDimSizes[channelsIndex] /= numGroups; // split in depth
2207 
2208  TensorInfo splitterOutputInfo(4,
2209  splitterDimSizes,
2210  inputInfo.GetDataType(),
2211  inputInfo.GetQuantizationScale(),
2212  inputInfo.GetQuantizationOffset());
2213 
2214  std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(numGroups, std::ref(splitterOutputInfo));
2215 
2216  ViewsDescriptor splitterDesc(numGroups);
2217  for (unsigned int group = 0u; group < numGroups; ++group)
2218  {
2219  splitterDesc.SetViewOriginCoord(group, channelsIndex, splitterDimSizes[channelsIndex] * group);
2220  for (unsigned int dimIdx = 0u; dimIdx < 4u; dimIdx++)
2221  {
2222  splitterDesc.SetViewSize(group, dimIdx, splitterDimSizes[dimIdx]);
2223  }
2224  }
2225 
2226  bool isSupported = false;
2227  armnn::BackendId setBackendSplit;
2228  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2229  IsSplitterSupported,
2230  data.m_Backends,
2231  isSupported,
2232  setBackendSplit,
2233  inputInfo,
2234  splitterOutputInfos,
2235  splitterDesc);
2236  if (!isSupported)
2237  {
2238  return false;
2239  }
2240 
2241  IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
2242  splitterLayer->SetBackendId(setBackendSplit);
2243  if (!splitterLayer)
2244  {
2245  return Fail("%s: Failed to add SplitterLayer", __func__);
2246  }
2247 
2248  input.Connect(splitterLayer->GetInputSlot(0));
2249  for (unsigned int group = 0u; group < splitterLayer->GetNumOutputSlots(); ++group)
2250  {
2251  splitterLayer->GetOutputSlot(group).SetTensorInfo(splitterOutputInfo);
2252  }
2253 
2254  //
2255  // Set up Convolution2d layers for each group
2256  //
2257 
2258  // Set up group tensor shapes
2259  TensorShape groupInputShape(inputShape);
2260  groupInputShape[channelsIndex] = channelsPerGroup;
2261 
2262  TensorShape groupWeightsShape(weightsShape);
2263  groupWeightsShape[0] /= channelMultiplier * numGroups;
2264 
2265  TensorShape groupBiasesShape({ 1 });
2266 
2267  // Set up group tensor infos
2268  TensorInfo groupInputInfo(inputInfo);
2269  groupInputInfo.SetShape(groupInputShape);
2270 
2271  const TensorInfo& weightsInfo = weights.GetInfo();
2272  TensorInfo groupWeightsInfo(weightsInfo);
2273  groupWeightsInfo.SetShape(groupWeightsShape);
2274 
2275  const TensorInfo& biasesInfo = biases.GetInfo();
2276  TensorInfo groupBiasesInfo(biasesInfo);
2277  groupBiasesInfo.SetShape(groupBiasesShape);
2278 
2279  TensorInfo groupOutputInfo(outputInfo);
2280 
2281  TensorShape groupOutputShape(outputShape);
2282  const bool isDynamic = IsDynamicTensor(outputInfo);
2283  if (!isDynamic)
2284  {
2285  groupOutputShape[channelsIndex] = 1;
2286  }
2287  groupOutputInfo.SetShape(groupOutputShape);
2288 
2289  const unsigned int weightsDataTypeSize = GetDataTypeSize(groupWeightsInfo.GetDataType());
2290  const unsigned int biasesDataTypeSize = GetDataTypeSize(groupBiasesInfo.GetDataType());
2291 
2292  std::vector<IConnectableLayer*> convLayers(numGroups * channelMultiplier, nullptr);
2293  for (unsigned int group = 0u; group < numGroups; ++group)
2294  {
2295  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2296  {
2297  auto index = group * channelMultiplier + m;
2298 
2299  const unsigned int weightsDataOffset = groupWeightsShape.GetNumElements() * index * weightsDataTypeSize;
2300  const unsigned int biasesDataOffset = groupBiasesShape.GetNumElements() * index * biasesDataTypeSize;
2301 
2302  if (weightsInfo.HasPerAxisQuantization())
2303  {
2304  // Extract per-axis quantization scales for group weights
2305  const std::vector<float>& weightsQuantScales = weightsInfo.GetQuantizationScales();
2306  groupWeightsInfo.SetQuantizationScales(
2307  std::vector<float>(weightsQuantScales.begin() + index,
2308  weightsQuantScales.begin() + index + groupWeightsShape[0]));
2309 
2310  // Extract per-axis quantization scales for group biases
2311  const std::vector<float>& biasesQuantScales = biasesInfo.GetQuantizationScales();
2312  groupBiasesInfo.SetQuantizationScales(
2313  std::vector<float>(biasesQuantScales.begin() + index,
2314  biasesQuantScales.begin() + index + groupWeightsShape[0]));
2315  }
2316 
2317  // Extract weights and biases data for current group convolution
2318  ConstTensor groupWeights(groupWeightsInfo,
2319  static_cast<const void *>(reinterpret_cast<const char *>(weights.GetMemoryArea()) +
2320  weightsDataOffset));
2321  ConstTensor groupBiases(groupBiasesInfo,
2322  static_cast<const void *>(reinterpret_cast<const char *>(biases.GetMemoryArea()) +
2323  biasesDataOffset));
2324 
2325  isSupported = false;
2326  armnn::BackendId setBackendConv;
2327  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2328  {
2329  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2330  IsConvolution2dSupported,
2331  data.m_Backends,
2332  isSupported,
2333  setBackendConv,
2334  groupInputInfo,
2335  outputInfo,
2336  desc,
2337  groupWeightsInfo,
2338  Optional<TensorInfo>(groupBiasesInfo));
2339  };
2340 
2341  if(!isDynamic)
2342  {
2343  validateFunc(groupOutputInfo, isSupported);
2344  }
2345  else
2346  {
2347  isSupported = AreDynamicTensorsSupported();
2348  }
2349 
2350  if (!isSupported)
2351  {
2352  return false;
2353  }
2354 
2355  IConnectableLayer* weightsLayer = data.m_Network->AddConstantLayer(groupWeights);
2356  IConnectableLayer* biasLayer = data.m_Network->AddConstantLayer(groupBiases);
2357  IConnectableLayer* convLayer = data.m_Network->AddConvolution2dLayer(desc);
2358 
2359  convLayer->SetBackendId(setBackendConv);
2360 
2361  if (!convLayer)
2362  {
2363  return Fail("%s: AddConvolution2dLayer failed", __func__);
2364  }
2365 
2366  splitterLayer->GetOutputSlot(group).Connect(convLayer->GetInputSlot(0));
2367  weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
2368  biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
2369 
2370  weightsLayer->GetOutputSlot(0).SetTensorInfo(groupWeightsInfo);
2371  biasLayer->GetOutputSlot(0).SetTensorInfo(groupBiasesInfo);
2372  convLayer->GetOutputSlot(0).SetTensorInfo(groupOutputInfo);
2373 
2374  if(isDynamic)
2375  {
2376  convLayer->GetOutputSlot(0).IsTensorInfoSet();
2377 
2378  validateFunc(convLayer->GetOutputSlot(0).GetTensorInfo(), isSupported);
2379 
2380  outputInfo = convLayer->GetOutputSlot(0).GetTensorInfo();
2381 
2382  if (!isSupported)
2383  {
2384  return false;
2385  }
2386  }
2387 
2388  convLayers[index] = convLayer;
2389  }
2390  }
2391 
2392  //
2393  // Set up Concat layer
2394  //
2395  ConcatDescriptor concatDescriptor;
2396  // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2397  concatDescriptor = ConcatDescriptor(weightsShape[0]);
2398  for (unsigned int group = 0u; group < numGroups; ++group)
2399  {
2400  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2401  {
2402  auto index = group * channelMultiplier + m;
2403  concatDescriptor.SetViewOriginCoord(index, channelsIndex, index);
2404  concatDescriptor.SetConcatAxis(channelsIndex);
2405  }
2406  }
2407 
2408  isSupported = false;
2409  armnn::BackendId setBackendConcat;
2410  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2411  IsConcatSupported,
2412  data.m_Backends,
2413  isSupported,
2414  setBackendConcat,
2415  std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
2416  outputInfo,
2417  concatDescriptor);
2418 
2419  if (!isSupported)
2420  {
2421  return false;
2422  }
2423 
2424  IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
2425  concatLayer->SetBackendId(setBackendConcat);
2426  if (!concatLayer)
2427  {
2428  return Fail("%s: AddConcatLayer failed", __func__);
2429  }
2430 
2431  for (unsigned int group = 0u; group < numGroups; ++group)
2432  {
2433  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2434  {
2435  auto index = group * channelMultiplier + m;
2436  convLayers[index]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(index));
2437  }
2438  }
2439  concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2440 
2441  return SetupAndTrackLayerOutputSlot(operation, 0, *concatLayer, model,
2442  data, nullptr, nullptr, activation);
2443 }
2444 
2445 bool Converter::ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data)
2446 {
2447  VLOG(DRIVER) << "Converter::ConvertHardSwish()";
2448  ActivationDescriptor desc;
2449  desc.m_Function = ActivationFunction::HardSwish;
2450 
2451  return ::ConvertToActivation(operation, __func__, desc, model, data);
2452 }
2453 
2454 bool Converter::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
2455 {
2456  VLOG(DRIVER) << "Converter::ConvertInstanceNormalization()";
2457 
2458  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2459  if (!input.IsValid())
2460  {
2461  return Fail("%s: Operation has an invalid input 0", __func__);
2462  }
2463 
2464  const Operand* output = GetOutputOperand(operation, 0, model);
2465  if (!output)
2466  {
2467  return Fail("%s: Operation has an invalid output", __func__);
2468  }
2469 
2470  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2471 
2472  // Determine data type of input tensor
2473  OperandType inputType;
2474  if (!GetOperandType(operation, 0, model, inputType))
2475  {
2476  return Fail("%s: Operation has invalid inputs", __func__);
2477  }
2478 
2480 
2481  // Read gamma, beta & epsilon
2482  if (inputType == OperandType::TENSOR_FLOAT16)
2483  {
2484  Half fp16Gamma;
2485  Half fp16Beta;
2486  Half fp16Epsilon;
2487 
2488  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Gamma, model, data) ||
2489  !GetInputScalar(operation, 2, OperandType::FLOAT16, fp16Beta, model, data) ||
2490  !GetInputScalar(operation, 3, OperandType::FLOAT16, fp16Epsilon, model, data))
2491  {
2492  return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
2493  }
2494 
2495  desc.m_Gamma = static_cast<float>(fp16Gamma);
2496  desc.m_Beta = static_cast<float>(fp16Beta);
2497  desc.m_Eps = static_cast<float>(fp16Epsilon);
2498  }
2499  else if (inputType == OperandType::TENSOR_FLOAT32)
2500  {
2501  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_Gamma, model, data) ||
2502  !GetInputScalar(operation, 2, OperandType::FLOAT32, desc.m_Beta, model, data) ||
2503  !GetInputScalar(operation, 3, OperandType::FLOAT32, desc.m_Eps, model, data))
2504  {
2505  return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
2506  }
2507  }
2508  else
2509  {
2510  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2511  }
2512 
2513  desc.m_DataLayout = OptionalDataLayout(operation, 4, model, data);
2514 
2515  bool isSupported = false;
2516  armnn::BackendId setBackend;
2517  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2518  {
2519  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2520  IsInstanceNormalizationSupported,
2521  data.m_Backends,
2522  isSupported,
2523  setBackend,
2524  input.GetTensorInfo(),
2525  outputInfo,
2526  desc);
2527  };
2528 
2529  if(IsDynamicTensor(outputInfo))
2530  {
2531  isSupported = AreDynamicTensorsSupported();
2532  }
2533  else
2534  {
2535  validateFunc(outputInfo, isSupported);
2536  }
2537 
2538  if (!isSupported)
2539  {
2540  return false;
2541  }
2542 
2543  IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
2544  layer->SetBackendId(setBackend);
2545  input.Connect(layer->GetInputSlot(0));
2546 
2547  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2548 }
2549 
2550 bool Converter::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2551 {
2552  VLOG(DRIVER) << "Converter::ConvertL2Normalization()";
2553 
2554  if (operation.inputs.size() != 1)
2555  {
2556  return Fail("%s: Optional inputs are not supported", __func__);
2557  }
2558 
2559  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2560  if (!input.IsValid())
2561  {
2562  return Fail("%s: Operation has invalid inputs", __func__);
2563  }
2564 
2565  const Operand* output = GetOutputOperand(operation, 0, model);
2566  if (!output)
2567  {
2568  return Fail("%s: Could not read output 0", __func__);
2569  }
2570 
2571  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2572  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2573 
2574  if (outputInfo.GetNumDimensions() != 4u)
2575  {
2576  return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2577  }
2578 
2581 
2582  bool isSupported = false;
2583  armnn::BackendId setBackend;
2584  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2585  {
2586  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2587  IsL2NormalizationSupported,
2588  data.m_Backends,
2589  isSupported,
2590  setBackend,
2591  inputInfo,
2592  outputInfo,
2593  desc);
2594  };
2595 
2596  if(!IsDynamicTensor(outputInfo))
2597  {
2598  validateFunc(outputInfo, isSupported);
2599  }
2600  else
2601  {
2602  isSupported = AreDynamicTensorsSupported();
2603  }
2604 
2605  if (!isSupported)
2606  {
2607  return false;
2608  }
2609 
2610  armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2611  layer->SetBackendId(setBackend);
2612  assert(layer != nullptr);
2613  input.Connect(layer->GetInputSlot(0));
2614 
2615  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2616 }
2617 
2618 bool Converter::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
2619 {
2620  VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
2621  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::L2, model, data);
2622 }
2623 
2624 bool Converter::ConvertLocalResponseNormalization(const Operation& operation,
2625  const Model& model,
2626  ConversionData& data)
2627 {
2628  VLOG(DRIVER) << "Converter::ConvertLocalResponseNormalization()";
2629 
2630  if (operation.inputs.size() != 5)
2631  {
2632  return Fail("%s: Optional inputs are not supported", __func__);
2633  }
2634 
2635  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2636  if (!input.IsValid())
2637  {
2638  return Fail("%s: Operation has invalid inputs", __func__);
2639  }
2640 
2641  const Operand* output = GetOutputOperand(operation, 0, model);
2642  if (!output)
2643  {
2644  return Fail("%s: Could not read output 0", __func__);
2645  }
2646 
2647  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2648  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2649 
2650  if (outputInfo.GetNumDimensions() != 4u)
2651  {
2652  return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2653  }
2654 
2655  armnn::NormalizationDescriptor descriptor;
2659 
2660  if (!input.IsValid() ||
2661  !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2662  !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
2663  !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
2664  !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
2665  {
2666  return Fail("%s: Operation has invalid inputs", __func__);
2667  }
2668 
2669  // ArmNN expects normSize to be the full size of the normalization
2670  // window rather than the radius as in AndroidNN.
2671  descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2672 
2673  bool isSupported = false;
2674  armnn::BackendId setBackend;
2675  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2676  {
2677  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2678  IsNormalizationSupported,
2679  data.m_Backends,
2680  isSupported,
2681  setBackend,
2682  inputInfo,
2683  outputInfo,
2684  descriptor);
2685  };
2686 
2687  if(!IsDynamicTensor(outputInfo))
2688  {
2689  validateFunc(outputInfo, isSupported);
2690  }
2691  else
2692  {
2693  isSupported = AreDynamicTensorsSupported();
2694  }
2695 
2696  if (!isSupported)
2697  {
2698  return false;
2699  }
2700 
2701 
2702  armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2703  layer->SetBackendId(setBackend);
2704  assert(layer != nullptr);
2705  input.Connect(layer->GetInputSlot(0));
2706 
2707  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2708 }
2709 
2710 bool Converter::ConvertLogicalBinary(const Operation& operation,
2711  const Model& model,
2712  ConversionData& data,
2713  armnn::LogicalBinaryOperation logicalOperation)
2714 {
2715  VLOG(DRIVER) << "Converter::ConvertLogicalBinary()";
2716  VLOG(DRIVER) << "ConvertLogicalBinary()";
2717  VLOG(DRIVER) << "logicalOperation = " << GetLogicalBinaryOperationAsCString(logicalOperation);
2718 
2719  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
2720  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
2721 
2722  if (!(input0.IsValid() && input1.IsValid()))
2723  {
2724  return Fail("%s: Operation has invalid inputs", __func__);
2725  }
2726 
2727  const Operand* output = GetOutputOperand(operation, 0, model);
2728  if (!output)
2729  {
2730  return Fail("%s: Could not read output 0", __func__);
2731  }
2732 
2733  const TensorInfo& inputInfo0 = input0.GetTensorInfo();
2734  const TensorInfo& inputInfo1 = input1.GetTensorInfo();
2735  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2736 
2737  LogicalBinaryDescriptor descriptor(logicalOperation);
2738 
2739  bool isSupported = false;
2740  armnn::BackendId setBackend;
2741  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2742  {
2743  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2744  IsLogicalBinarySupported,
2745  data.m_Backends,
2746  isSupported,
2747  setBackend,
2748  inputInfo0,
2749  inputInfo1,
2750  outputInfo,
2751  descriptor);
2752  };
2753 
2754  if(!IsDynamicTensor(outputInfo))
2755  {
2756  validateFunc(outputInfo, isSupported);
2757  }
2758  else
2759  {
2760  isSupported = AreDynamicTensorsSupported();
2761  }
2762 
2763  if (!isSupported)
2764  {
2765  return false;
2766  }
2767 
2768  IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
2769  layer->SetBackendId(setBackend);
2770  assert(layer != nullptr);
2771 
2772  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
2773  if (!isReshapeSupported)
2774  {
2775  return false;
2776  }
2777 
2778  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2779 }
2780 
2781 bool Converter::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2782 {
2783  VLOG(DRIVER) << "Converter::ConvertLogistic()";
2786 
2787  return ConvertToActivation(operation, __func__, desc, model, data);
2788 }
2789 
2790 bool Converter::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data)
2791 {
2792  VLOG(DRIVER) << "Converter::ConvertLogSoftmax()";
2793 
2794  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2795  if (!input.IsValid())
2796  {
2797  return Fail("%s: Failed to read input 0", __func__);
2798  }
2799 
2800  const Operand* output = GetOutputOperand(operation, 0, model);
2801  if (!output)
2802  {
2803  return Fail("%s: Failed to read output", __func__);
2804  }
2805 
2806  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2807 
2808  // Determine data type of input tensor
2809  OperandType inputType;
2810  if (!GetOperandType(operation, 0, model, inputType))
2811  {
2812  return Fail("%s: Operation has invalid inputs", __func__);
2813  }
2814 
2815  LogSoftmaxDescriptor descriptor;
2816 
2817  // Read beta
2818  if (inputType == OperandType::TENSOR_FLOAT16)
2819  {
2820  Half fp16Beta;
2821  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Beta, model, data))
2822  {
2823  return Fail("%s: Failed to read input 1 (FLOAT16)", __func__);
2824  }
2825 
2826  descriptor.m_Beta = static_cast<float>(fp16Beta);
2827  }
2828  else if (inputType == OperandType::TENSOR_FLOAT32)
2829  {
2830  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Beta, model, data))
2831  {
2832  return Fail("%s: Failed to read input 1 (FLOAT32)", __func__);
2833  }
2834  }
2835  else
2836  {
2837  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2838  }
2839 
2840  // Read axis
2841  if (!GetInputInt32(operation, 2, descriptor.m_Axis, model, data))
2842  {
2843  return Fail("%s: Failed to read input 2", __func__);
2844  }
2845 
2846  bool isSupported = false;
2847  armnn::BackendId setBackend;
2848  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2849  {
2850  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2851  IsLogSoftmaxSupported,
2852  data.m_Backends,
2853  isSupported,
2854  setBackend,
2855  input.GetTensorInfo(),
2856  outputInfo,
2857  descriptor);
2858  };
2859 
2860  if(IsDynamicTensor(outputInfo))
2861  {
2862  isSupported = AreDynamicTensorsSupported();
2863  }
2864  else
2865  {
2866  validateFunc(outputInfo, isSupported);
2867  }
2868 
2869  if (!isSupported)
2870  {
2871  return false;
2872  }
2873 
2874  IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
2875  layer->SetBackendId(setBackend);
2876  if (!layer)
2877  {
2878  return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
2879  }
2880 
2881  input.Connect(layer->GetInputSlot(0));
2882 
2883  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2884 }
2885 
2886 bool Converter::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
2887 {
2888  VLOG(DRIVER) << "Converter::ConvertLstm()";
2889 
2890  // Inputs:
2891  // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2892  // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2893  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2894  if (!input.IsValid())
2895  {
2896  return Fail("%s: Could not read input 0: input", __func__);
2897  }
2898  // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2899  LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
2900  if (!outputStateIn.IsValid())
2901  {
2902  return Fail("%s: Could not read input 18: outputStateIn", __func__);
2903  }
2904  // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2905  LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
2906  if (!cellStateIn.IsValid())
2907  {
2908  return Fail("%s: Could not read input 19: cellStateIn", __func__);
2909  }
2910 
2911  // Get the mandatory input tensors:
2912  // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2913  // [num_units, input_size].
2914  const ConstTensorPin inputToForgetWeightsPin =
2915  (DequantizeAndMakeConstTensorPin(operation, model, data, 2));
2916  // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2917  // [num_units, input_size].
2918  const ConstTensorPin inputToCellWeightsPin =
2919  (DequantizeAndMakeConstTensorPin(operation, model, data, 3));
2920  // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2921  // [num_units, input_size].
2922  const ConstTensorPin inputToOutputWeightsPin =
2923  (DequantizeAndMakeConstTensorPin(operation, model, data, 4));
2924  // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2925  // [num_units, output_size].
2926  const ConstTensorPin recurrentToForgetWeightsPin =
2927  (DequantizeAndMakeConstTensorPin(operation, model, data, 6));
2928  // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2929  // [num_units, output_size].
2930  const ConstTensorPin recurrentToCellWeightsPin =
2931  (DequantizeAndMakeConstTensorPin(operation, model, data, 7));
2932  // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2933  // [num_units, output_size].
2934  const ConstTensorPin recurrentToOutputWeightsPin =
2935  (DequantizeAndMakeConstTensorPin(operation, model, data, 8));
2936  // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2937  const ConstTensorPin forgetGateBiasPin =
2938  ConvertOperationInputToConstTensorPin(operation, 13, model, data);
2939  // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2940  const ConstTensorPin cellBiasPin =
2941  ConvertOperationInputToConstTensorPin(operation, 14, model, data);
2942  // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2943  const ConstTensorPin outputGateBiasPin =
2944  ConvertOperationInputToConstTensorPin(operation, 15, model, data);
2945 
2946  if (!inputToForgetWeightsPin.IsValid() ||
2947  !inputToCellWeightsPin.IsValid() ||
2948  !inputToOutputWeightsPin.IsValid() ||
2949  !recurrentToForgetWeightsPin.IsValid() ||
2950  !recurrentToCellWeightsPin.IsValid() ||
2951  !recurrentToOutputWeightsPin.IsValid() ||
2952  !forgetGateBiasPin.IsValid() ||
2953  !cellBiasPin.IsValid() ||
2954  !outputGateBiasPin.IsValid())
2955  {
2956  return Fail("%s: Operation has invalid tensor inputs", __func__);
2957  }
2958 
2959  // Get the optional input tensors:
2960  // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2961  // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2962  const ConstTensorPin inputToInputWeightsPin =
2963  (DequantizeAndMakeConstTensorPin(operation, model, data, 1, true));
2964  // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2965  // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2966  // “num_units”), or the second dimension of the “projection_weights”, if defined.
2967  const ConstTensorPin recurrentToInputWeightsPin =
2968  (DequantizeAndMakeConstTensorPin(operation, model, data, 5, true));
2969  // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2970  const ConstTensorPin cellToInputWeightsPin =
2971  (DequantizeAndMakeConstTensorPin(operation, model, data, 9, true));
2972  // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2973  const ConstTensorPin cellToForgetWeightsPin =
2974  (DequantizeAndMakeConstTensorPin(operation, model, data, 10, true));
2975  // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2976  const ConstTensorPin cellToOutputWeightsPin =
2977  (DequantizeAndMakeConstTensorPin(operation, model, data, 11, true));
2978  // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2979  const ConstTensorPin inputGateBiasPin =
2981  12,
2982  model,
2983  data,
2984  g_DontPermute,
2985  nullptr,
2986  true);
2987 
2988  // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2989  // [output_size, num_units].
2990  const ConstTensorPin projectionWeightsPin =
2991  (DequantizeAndMakeConstTensorPin(operation, model, data, 16, true));
2992  // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2993  const ConstTensorPin projectionBiasPin =
2995  17,
2996  model,
2997  data,
2998  g_DontPermute,
2999  nullptr,
3000  true);
3001 
3002  if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
3003  (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
3004  (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
3005  (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
3006  (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
3007  (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
3008  (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
3009  (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
3010  {
3011  return Fail("%s: Operation has invalid tensor inputs", __func__);
3012  }
3013 
3014  // Get the mandatory input scalars (actually 1-D tensors of size 1):
3015  // 20: The activation function: A value indicating the activation function:
3016  // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
3017  // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
3018  // If set to 0.0 then clipping is disabled.
3019  // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
3020  // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
3021  ActivationFn activation = ActivationFn::kActivationNone;
3022  float cellClip;
3023  float projClip;
3024  if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
3025  !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
3026  !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
3027  {
3028  return Fail("%s: Operation has invalid scalar inputs", __func__);
3029  }
3030 
3031  // Get the normalization tensors
3032  // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
3033  // Used to rescale normalized inputs to activation at input gate.
3034  const ConstTensorPin inputLayerNormWeightsPin
3035  (DequantizeAndMakeConstTensorPin(operation, model, data, 23, true));
3036 
3037  // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
3038  // Used to rescale normalized inputs to activation at forget gate.
3039  const ConstTensorPin forgetLayerNormWeightsPin =
3041  24,
3042  model,
3043  data,
3044  g_DontPermute,
3045  nullptr,
3046  true);
3047 
3048  // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
3049  // Used to rescale normalized inputs to activation at cell gate.
3050  const ConstTensorPin cellLayerNormWeightsPin =
3052  25,
3053  model,
3054  data,
3055  g_DontPermute,
3056  nullptr,
3057  true);
3058 
3059  // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
3060  // Used to rescale normalized inputs to activation at output gate.
3061  const ConstTensorPin outputLayerNormWeightsPin =
3063  26,
3064  model,
3065  data,
3066  g_DontPermute,
3067  nullptr,
3068  true);
3069 
3070  // Outputs:
3071  // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
3072  // with CIFG, or [batch_size, num_units * 3] without CIFG.
3073  const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
3074  if (!scratchBuffer)
3075  {
3076  return Fail("%s: Could not read output 0: scratchBuffer", __func__);
3077  }
3078  // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
3079  const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
3080  if (!outputStateOut)
3081  {
3082  return Fail("%s: Could not read output 1: outputStateOut", __func__);
3083  }
3084  // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
3085  const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
3086  if (!cellStateOut)
3087  {
3088  return Fail("%s: Could not read output 2: cellStateOut", __func__);
3089  }
3090  // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
3091  // effectively the same as the current “output state (out)” value.
3092  const Operand* output = GetOutputOperand(operation, 3, model);
3093  if (!output)
3094  {
3095  return Fail("%s: Could not read output 3: output", __func__);
3096  }
3097 
3098  // set the params structure for the AddLstmLayer call
3099  LstmInputParams params;
3100  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
3101  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
3102  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
3103  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
3104  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
3105  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
3106  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
3107  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
3108  params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
3109  params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
3110  params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
3111  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
3112  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
3113  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
3114  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
3115  params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
3116  params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
3117  params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
3118  params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
3119  params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
3120  params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
3121 
3122  // set the layer descriptor
3123  LstmDescriptor desc;
3124  desc.m_ActivationFunc = activation;
3125  desc.m_ClippingThresCell = cellClip;
3126  desc.m_ClippingThresProj = projClip;
3127  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
3128  params.m_RecurrentToInputWeights == nullptr ||
3129  params.m_InputGateBias == nullptr);
3130  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
3131  params.m_CellToOutputWeights != nullptr);
3132  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
3133  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
3134  params.m_ForgetLayerNormWeights != nullptr ||
3135  params.m_CellLayerNormWeights != nullptr ||
3136  params.m_OutputLayerNormWeights != nullptr);
3137 
3138  // validate the optional input groups
3139  if (desc.m_CifgEnabled &&
3140  (params.m_InputToInputWeights != nullptr ||
3141  params.m_RecurrentToInputWeights != nullptr ||
3142  params.m_InputGateBias != nullptr))
3143  {
3144  return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
3145  " and input gate bias must be provided", __func__);
3146  }
3147 
3148  if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
3149  {
3150  return Fail("%s: projection bias should not be provided without projection weights", __func__);
3151  }
3152 
3153  if (desc.m_PeepholeEnabled &&
3154  (params.m_CellToForgetWeights == nullptr ||
3155  params.m_CellToOutputWeights == nullptr ||
3156  (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
3157  {
3158  return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
3159  " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
3160  }
3161 
3162  if (desc.m_LayerNormEnabled &&
3163  (params.m_ForgetLayerNormWeights == nullptr ||
3164  params.m_CellLayerNormWeights == nullptr ||
3165  params.m_OutputLayerNormWeights == nullptr ||
3166  (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
3167  {
3168  return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
3169  " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
3170  }
3171 
3172  // Check if the layer is supported
3173  // Inputs
3174  const TensorInfo& inputInfo = input.GetTensorInfo();
3175  const TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
3176  const TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
3177 
3178  // Outputs
3179  const TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
3180  const TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
3181  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
3182  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3183 
3184  // Basic parameters
3185  LstmInputParamsInfo paramsInfo;
3186  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
3187  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
3188  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
3190  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
3192  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
3193  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
3194  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
3195 
3196  // Optional parameters
3197  if (!desc.m_CifgEnabled)
3198  {
3199  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
3201  if (params.m_CellToInputWeights != nullptr)
3202  {
3203  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
3204  }
3205  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
3206  }
3207 
3208  if (desc.m_ProjectionEnabled)
3209  {
3210  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
3211  if (params.m_ProjectionBias != nullptr)
3212  {
3213  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
3214  }
3215  }
3216 
3217  if (desc.m_PeepholeEnabled)
3218  {
3219  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
3220  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
3221  }
3222 
3223  if (desc.m_LayerNormEnabled)
3224  {
3225  if(!desc.m_CifgEnabled)
3226  {
3227  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
3228  }
3229  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
3230  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
3231  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
3232  }
3233 
3234  bool isSupported = false;
3235  armnn::BackendId setBackend;
3236  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3237  {
3238  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3239  IsLstmSupported,
3240  data.m_Backends,
3241  isSupported,
3242  setBackend,
3243  inputInfo,
3244  outputStateInInfo,
3245  cellStateInInfo,
3246  scratchBufferInfo,
3247  outputStateOutInfo,
3248  cellStateOutInfo,
3249  outputInfo,
3250  desc,
3251  paramsInfo);
3252  };
3253 
3254  bool isDynamic = false;
3255  if (!IsDynamicTensor(outputStateOutInfo) &&
3256  !IsDynamicTensor(scratchBufferInfo) &&
3257  !IsDynamicTensor(cellStateOutInfo) &&
3258  !IsDynamicTensor(outputInfo))
3259  {
3260  validateFunc(outputInfo, isSupported);
3261  }
3262  else
3263  {
3264  isDynamic = true;
3265  isSupported = AreDynamicTensorsSupported();
3266  }
3267 
3268  if (!isSupported)
3269  {
3270  return false;
3271  }
3272 
3273  // Add the layer
3274  IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
3275  layer->SetBackendId(setBackend);
3276 
3277  input.Connect(layer->GetInputSlot(0));
3278  outputStateIn.Connect(layer->GetInputSlot(1));
3279  cellStateIn.Connect(layer->GetInputSlot(2));
3280 
3281  if (!isDynamic)
3282  {
3283  return (
3284  SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3285  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3286  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3287  SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
3288  }
3289  else
3290  {
3291  return (
3292  SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3293  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3294  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3296  operation, 3, *layer, 3, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
3297  }
3298 
3299 }
3300 
3301 bool Converter::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
3302 {
3303  VLOG(DRIVER) << "Converter::ConvertMaxPool2d()";
3304  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Max, model, data);
3305 }
3306 
3307 bool Converter::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
3308 {
3309  VLOG(DRIVER) << "Converter::ConvertMean()";
3310 
3311  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3312  if (!input.IsValid())
3313  {
3314  return Fail("%s: Operation has invalid inputs", __func__);
3315  }
3316 
3317  const Operand* output = GetOutputOperand(operation, 0, model);
3318  if (!output)
3319  {
3320  return Fail("%s: Could not read output 0", __func__);
3321  }
3322 
3323  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3324 
3325  const Operand* axisOperand = GetInputOperand(operation, 1, model);
3326  if (!axisOperand)
3327  {
3328  return Fail("%s: Could not read input 1", __func__);
3329  }
3330 
3331  std::vector<int32_t> axis;
3332  if (!GetTensorInt32Values(*axisOperand, axis, model, data))
3333  {
3334  return Fail("%s: Input 1 has invalid values", __func__);
3335  }
3336 
3337  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3338 
3339  // Convert the axis to unsigned int and remove duplicates.
3340  unsigned int rank = inputInfo.GetNumDimensions();
3341  std::set<unsigned int> uniqueAxis;
3342  std::transform(axis.begin(), axis.end(),
3343  std::inserter(uniqueAxis, uniqueAxis.begin()),
3344  [rank](int i) -> unsigned int { return (i + rank) % rank; });
3345 
3346  // Get the "keep dims" flag.
3347  int32_t keepDims = 0;
3348  if (!GetInputInt32(operation, 2, keepDims, model, data))
3349  {
3350  return Fail("%s: Could not read input 2", __func__);
3351  }
3352 
3353  armnn::MeanDescriptor descriptor;
3354  descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3355  descriptor.m_KeepDims = keepDims > 0;
3356 
3357  bool isSupported = false;
3358  armnn::BackendId setBackend;
3359  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3360  {
3361  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3362  IsMeanSupported,
3363  data.m_Backends,
3364  isSupported,
3365  setBackend,
3366  inputInfo,
3367  outputInfo,
3368  descriptor);
3369  };
3370 
3371  if(!IsDynamicTensor(outputInfo))
3372  {
3373  validateFunc(outputInfo, isSupported);
3374  }
3375  else
3376  {
3377  isSupported = AreDynamicTensorsSupported();
3378  }
3379 
3380  if (!isSupported)
3381  {
3382  return false;
3383  }
3384 
3385  armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3386  layer->SetBackendId(setBackend);
3387  assert(layer != nullptr);
3388  input.Connect(layer->GetInputSlot(0));
3389 
3390  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3391 }
3392 
3393 bool Converter::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
3394 {
3395  VLOG(DRIVER) << "Converter::ConvertPad()";
3396 
3397  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3398  if (!input.IsValid())
3399  {
3400  return Fail("%s: Operation has invalid inputs", __func__);
3401  }
3402 
3403  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3404  unsigned int rank = inputInfo.GetNumDimensions();
3405 
3406  armnn::PadDescriptor descriptor;
3407  if (!ConvertPaddings(operation, model, data, rank, descriptor))
3408  {
3409  return Fail("%s: Could not convert paddings", __func__);
3410  }
3411 
3412  // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3413  // the scale and zeroPoint must be the same as input0
3414  // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3415  // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3416  // (QuantizationOffset - QuantizationOffset) * scale = 0.
3417  if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
3418  {
3419  descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3420  }
3421 
3422  const Operand* output = GetOutputOperand(operation, 0, model);
3423  if (!output)
3424  {
3425  return Fail("%s: Could not read output", __func__);
3426  }
3427 
3428  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3429 
3430  bool isSupported = false;
3431  armnn::BackendId setBackend;
3432  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3433  {
3434  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3435  IsPadSupported,
3436  data.m_Backends,
3437  isSupported,
3438  setBackend,
3439  inputInfo,
3440  outputInfo,
3441  descriptor);
3442  };
3443 
3444  if(!IsDynamicTensor(outputInfo))
3445  {
3446  validateFunc(outputInfo, isSupported);
3447  }
3448  else
3449  {
3450  isSupported = AreDynamicTensorsSupported();
3451  }
3452 
3453  if (!isSupported)
3454  {
3455  return false;
3456  }
3457 
3458  armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3459  layer->SetBackendId(setBackend);
3460  assert(layer != nullptr);
3461  input.Connect(layer->GetInputSlot(0));
3462 
3463  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3464 }
3465 
3466 bool Converter::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
3467 {
3468  VLOG(DRIVER) << "Converter::ConvertPadV2()";
3469 
3470  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3471  if (!input.IsValid())
3472  {
3473  return Fail("%s: Could not read input 0", __func__);
3474  }
3475 
3476  const Operand* output = GetOutputOperand(operation, 0, model);
3477  if (!output)
3478  {
3479  return Fail("%s: Could not read output", __func__);
3480  }
3481 
3482  const TensorInfo& inputInfo = input.GetTensorInfo();
3483  unsigned int rank = inputInfo.GetNumDimensions();
3484 
3485  PadDescriptor descriptor;
3486  if (!ConvertPaddings(operation, model, data, rank, descriptor))
3487  {
3488  return Fail("%s: Could not convert paddings", __func__);
3489  }
3490 
3491  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3492 
3493  // Determine type of padding value
3494  OperandType operandType0;
3495  OperandType operandType2;
3496 
3497  if (!GetOperandType(operation, 0, model, operandType0) ||
3498  !GetOperandType(operation, 2, model, operandType2))
3499  {
3500  return Fail("%s: Operation has invalid inputs", __func__);
3501  }
3502 
3503  // Read value to use for padding
3504  if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
3505  {
3506  Half f16PadValue;
3507  if (!GetInputScalar(operation, 2, operandType2, f16PadValue, model, data))
3508  {
3509  return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
3510  }
3511 
3512  descriptor.m_PadValue = f16PadValue;
3513  }
3514  else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
3515  {
3516  if (!GetInputFloat32(operation, 2, descriptor.m_PadValue, model, data))
3517  {
3518  return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
3519  }
3520  }
3521  else if (isQuantizedOperand(operandType0) && operandType2 == OperandType::INT32)
3522  {
3523  int32_t intPadValue = 0;
3524  if (!GetInputInt32(operation, 2, intPadValue, model, data))
3525  {
3526  return Fail("%s: Could not read input 2 (INT32)", __func__);
3527  }
3528  descriptor.m_PadValue = intPadValue;
3529  }
3530  else
3531  {
3532  return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
3533  }
3534 
3535  bool isSupported = false;
3536  armnn::BackendId setBackend;
3537  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3538  {
3539  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3540  IsPadSupported,
3541  data.m_Backends,
3542  isSupported,
3543  setBackend,
3544  inputInfo,
3545  outputInfo,
3546  descriptor);
3547  };
3548 
3549  if(IsDynamicTensor(outputInfo))
3550  {
3551  isSupported = AreDynamicTensorsSupported();
3552  }
3553  else
3554  {
3555  validateFunc(outputInfo, isSupported);
3556  }
3557 
3558  if (!isSupported)
3559  {
3560  return false;
3561  }
3562 
3563  IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3564  layer->SetBackendId(setBackend);
3565  assert(layer != nullptr);
3566  input.Connect(layer->GetInputSlot(0));
3567 
3568  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3569 }
3570 
3571 bool Converter::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
3572 {
3573  VLOG(DRIVER) << "Converter::ConvertPrelu()";
3574 
3575  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3576  LayerInputHandle alpha = ConvertToLayerInputHandle(operation, 1, model, data);
3577 
3578  if (!input.IsValid() || !alpha.IsValid())
3579  {
3580  return Fail("%s: Operation has invalid inputs", __func__);
3581  }
3582 
3583  const Operand* output = GetOutputOperand(operation, 0, model);
3584 
3585  if (!output)
3586  {
3587  return Fail("%s: Could not read output", __func__);
3588  }
3589 
3590  const TensorInfo& inputInfo = input.GetTensorInfo();
3591  const TensorInfo& alphaInfo = alpha.GetTensorInfo();
3592  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3593 
3594  bool isSupported = false;
3595  armnn::BackendId setBackend;
3596  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3597  {
3598  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3599  IsPreluSupported,
3600  data.m_Backends,
3601  isSupported,
3602  setBackend,
3603  inputInfo,
3604  alphaInfo,
3605  outputInfo);
3606  };
3607 
3608  if(IsDynamicTensor(outputInfo))
3609  {
3610  isSupported = AreDynamicTensorsSupported();
3611  }
3612  else
3613  {
3614  validateFunc(outputInfo, isSupported);
3615  }
3616 
3617  if (!isSupported)
3618  {
3619  return false;
3620  }
3621 
3622  IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
3623  layer->SetBackendId(setBackend);
3624 
3625  if (!layer)
3626  {
3627  return Fail("%s: AddPreluLayer failed", __func__);
3628  }
3629 
3630  bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
3631  if (!isReshapeSupported)
3632  {
3633  return false;
3634  }
3635 
3636  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3637 }
3638 
3639 bool Converter::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
3640 {
3641  VLOG(DRIVER) << "Converter::ConvertQuantize()";
3642 
3643  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3644  if (!input.IsValid())
3645  {
3646  return Fail("%s: Operation has invalid input", __func__);
3647  }
3648 
3649  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
3650  if (!outputOperand)
3651  {
3652  return Fail("%s: Operation has invalid outputs", __func__);
3653  }
3654 
3655  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3656 
3657  bool isSupported = false;
3658  armnn::BackendId setBackend;
3659  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3660  {
3661  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3662  IsQuantizeSupported,
3663  data.m_Backends,
3664  isSupported,
3665  setBackend,
3666  input.GetTensorInfo(),
3667  outputInfo);
3668  };
3669 
3670  if(IsDynamicTensor(outputInfo))
3671  {
3672  isSupported = AreDynamicTensorsSupported();
3673  }
3674  else
3675  {
3676  validateFunc(outputInfo, isSupported);
3677  }
3678 
3679  if (!isSupported)
3680  {
3681  return false;
3682  }
3683 
3684  IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
3685  layer->SetBackendId(setBackend);
3686  assert(layer != nullptr);
3687  input.Connect(layer->GetInputSlot(0));
3688 
3689  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3690 }
3691 
3692 bool Converter::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
3693 {
3694  VLOG(DRIVER) << "Converter::ConvertQuantizedLstm()";
3695 
3696  VLOG(DRIVER) << "ConvertQuantizedLstm()";
3697 
3698  //Inputs:
3699  // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
3700  // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
3701  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3702  if (!input.IsValid())
3703  {
3704  return Fail("%s: Could not read input 0: input", __func__);
3705  }
3706 
3707  // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, of shape [batch_size, output_size].
3708  LayerInputHandle outputStatePrevTimeStep = ConvertToLayerInputHandle(operation, 18, model, data);
3709  if (!outputStatePrevTimeStep.IsValid())
3710  {
3711  return Fail("%s: Could not read input 18: outputStatePrevTimeStep", __func__);
3712  }
3713 
3714  // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
3715  LayerInputHandle cellStatePrevTimeStep = ConvertToLayerInputHandle(operation, 19, model, data);
3716  if (!cellStatePrevTimeStep.IsValid())
3717  {
3718  return Fail("%s: Could not read input 19: cellStatePrevTimeStep", __func__);
3719  }
3720 
3721  // Get the mandatory input tensors:
3722 
3723  // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3724  // [num_units, input_size].
3725  const ConstTensorPin inputToForgetWeightsPin =
3726  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
3727 
3728  // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3729  // [num_units, input_size].
3730  const ConstTensorPin inputToCellWeightsPin =
3731  ConvertOperationInputToConstTensorPin(operation, 3, model, data);
3732 
3733  // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3734  // [num_units, input_size].
3735  const ConstTensorPin inputToOutputWeightsPin =
3736  ConvertOperationInputToConstTensorPin(operation, 4, model, data);
3737 
3738  // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3739  // [num_units, output_size].
3740  const ConstTensorPin recurrentToForgetWeightsPin =
3741  ConvertOperationInputToConstTensorPin(operation, 6, model, data);
3742 
3743  // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3744  // [num_units, output_size].
3745  const ConstTensorPin recurrentToCellWeightsPin =
3746  ConvertOperationInputToConstTensorPin(operation, 7, model, data);
3747 
3748  // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3749  // [num_units, output_size].
3750  const ConstTensorPin recurrentToOutputWeightsPin =
3751  ConvertOperationInputToConstTensorPin(operation, 8, model, data);
3752 
3753  // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3754  const ConstTensorPin forgetGateBiasPin =
3755  ConvertOperationInputToConstTensorPin(operation, 13, model, data);
3756 
3757  // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3758  const ConstTensorPin cellBiasPin =
3759  ConvertOperationInputToConstTensorPin(operation, 14, model, data);
3760 
3761  // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3762  const ConstTensorPin outputGateBiasPin =
3763  ConvertOperationInputToConstTensorPin(operation, 15, model, data);
3764 
3765  if (!inputToForgetWeightsPin.IsValid() ||
3766  !inputToCellWeightsPin.IsValid() ||
3767  !inputToOutputWeightsPin.IsValid() ||
3768  !recurrentToForgetWeightsPin.IsValid() ||
3769  !recurrentToCellWeightsPin.IsValid() ||
3770  !recurrentToOutputWeightsPin.IsValid() ||
3771  !forgetGateBiasPin.IsValid() ||
3772  !cellBiasPin.IsValid() ||
3773  !outputGateBiasPin.IsValid())
3774  {
3775  return Fail("%s: Operation has invalid tensor inputs", __func__);
3776  }
3777 
3778  // Get the optional input tensors:
3779 
3780  // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3781  // [num_units, input_size], where “num_units” corresponds to the number of cell units.
3782  const ConstTensorPin inputToInputWeightsPin =
3784  1,
3785  model,
3786  data,
3787  g_DontPermute,
3788  nullptr,
3789  true);
3790 
3791  // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3792  // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
3793  // “num_units”), or the second dimension of the “projection_weights”, if defined.
3794  const ConstTensorPin recurrentToInputWeightsPin =
3796  5,
3797  model,
3798  data,
3799  g_DontPermute,
3800  nullptr,
3801  true);
3802 
3803  // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
3804  // [num_units].
3805  const ConstTensorPin cellToInputWeightsPin =
3807  9,
3808  model,
3809  data,
3810  g_DontPermute,
3811  nullptr,
3812  true);
3813 
3814  // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
3815  // [num_units].
3816  const ConstTensorPin cellToForgetWeightsPin =
3818  10,
3819  model,
3820  data,
3821  g_DontPermute,
3822  nullptr,
3823  true);
3824 
3825  // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
3826  // [num_units].
3827  const ConstTensorPin cellToOutputWeightsPin =
3829  11,
3830  model,
3831  data,
3832  g_DontPermute,
3833  nullptr,
3834  true);
3835 
3836  // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3837  const ConstTensorPin inputGateBiasPin =
3839  12,
3840  model,
3841  data,
3842  g_DontPermute,
3843  nullptr,
3844  true);
3845 
3846  // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3847  // [output_size, num_units].
3848  const ConstTensorPin projectionWeightsPin =
3850  16,
3851  model,
3852  data,
3853  g_DontPermute,
3854  nullptr,
3855  true);
3856 
3857  // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [output_size].
3858  const ConstTensorPin projectionBiasPin =
3860  17,
3861  model,
3862  data,
3863  g_DontPermute,
3864  nullptr,
3865  true);
3866 
3867  if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional())
3868  || (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional())
3869  || (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional())
3870  || (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional())
3871  || (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional())
3872  || (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional())
3873  || (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional())
3874  || (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
3875  {
3876  return Fail("%s: Operation has invalid tensor inputs", __func__);
3877  }
3878 
3879 
3880  // Get the optional normalization tensors
3881 
3882  // 20: The input layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
3883  // Used to rescale normalized inputs to activation at input gate.
3884  const ConstTensorPin inputLayerNormWeightsPin =
3886  20,
3887  model,
3888  data,
3889  g_DontPermute,
3890  nullptr,
3891  true);
3892 
3893  // 21: The forget layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM
3894  // Used to rescale normalized inputs to activation at forget gate.
3895  const ConstTensorPin forgetLayerNormWeightsPin =
3897  21,
3898  model,
3899  data,
3900  g_DontPermute,
3901  nullptr,
3902  true);
3903 
3904  // 22: The cell layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
3905  // Used to rescale normalized inputs to activation at cell gate.
3906  const ConstTensorPin cellLayerNormWeightsPin =
3908  22,
3909  model,
3910  data,
3911  g_DontPermute,
3912  nullptr,
3913  true);
3914 
3915  // 23: The output layer normalization weights. A 1-D tensor of shape [num_units].
3916  // Used to rescale normalized inputs to activation at output gate.
3917  const ConstTensorPin outputLayerNormWeightsPin =
3919  23,
3920  model,
3921  data,
3922  g_DontPermute,
3923  nullptr,
3924  true);
3925 
3926  if ((!inputLayerNormWeightsPin.IsValid() && !inputLayerNormWeightsPin.IsOptional())
3927  || (!forgetLayerNormWeightsPin.IsValid() && !forgetLayerNormWeightsPin.IsOptional())
3928  || (!cellLayerNormWeightsPin.IsValid() && !cellLayerNormWeightsPin.IsOptional())
3929  || (!outputLayerNormWeightsPin.IsValid() && !outputLayerNormWeightsPin.IsOptional()))
3930  {
3931  return Fail("%s: Operation has invalid tensor inputs", __func__);
3932  }
3933 
3934  // Get the optional input scalars:
3935  // 24: The cell clip: If provided the cell state is clipped by this value prior to the cell output activation.
3936  // 25: The projection clip: If provided and projection is enabled, this is used for clipping the projected values.
3937 
3938  // Get the mandatory input scalars:
3939  // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
3940  // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
3941  // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
3942  // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
3943  // 30: The zero point of the hidden state, i.e. input to projection.
3944  // 31: The scale of the hidden state, i.e. input to projection.
3945  float cellClip, projClip, matMulInputGate, matMulForgetGate, matMulCellGate, matMulOutputGate, projInputScale;
3946  int projInputZeroPoint;
3947 
3948  if (!GetInputScalar(operation, 24, OperandType::FLOAT32, cellClip, model, data, true) ||
3949  !GetInputScalar(operation, 25, OperandType::FLOAT32, projClip, model, data, true) ||
3950  !GetInputScalar(operation, 26, OperandType::FLOAT32, matMulInputGate, model, data) ||
3951  !GetInputScalar(operation, 27, OperandType::FLOAT32, matMulForgetGate, model, data) ||
3952  !GetInputScalar(operation, 28, OperandType::FLOAT32, matMulCellGate, model, data) ||
3953  !GetInputScalar(operation, 29, OperandType::FLOAT32, matMulOutputGate, model, data) ||
3954  !GetInputScalar(operation, 30, OperandType::INT32, projInputZeroPoint, model, data) ||
3955  !GetInputScalar(operation, 31, OperandType::FLOAT32, projInputScale, model, data))
3956  {
3957  return Fail("%s: Operation has invalid scalar inputs", __func__);
3958  }
3959 
3960  // Outputs:
3961  // 0: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size,
3962  // output_size].
3963  const Operand* outputStateOut = GetOutputOperand(operation, 0, model);
3964  if (!outputStateOut)
3965  {
3966  return Fail("%s: Could not read output 0: outputStateOut", __func__);
3967  }
3968 
3969  // 1: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
3970  const Operand* cellStateOut = GetOutputOperand(operation, 1, model);
3971  if (!cellStateOut)
3972  {
3973  return Fail("%s: Could not read output 1: cellStateOut", __func__);
3974  }
3975 
3976  // 2: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size, output_size].
3977  // This is effectively the same as the current “output state (out)” value.
3978  const Operand* output = GetOutputOperand(operation, 2, model);
3979  if (!output)
3980  {
3981  return Fail("%s: Could not read output 2: output", __func__);
3982  }
3983 
3984  // set the params structure for the AddLstmLayer call
3985  LstmInputParams params;
3986  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
3987  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
3988  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
3989  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
3990  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
3991  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
3992  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
3993  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
3994  params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
3995  params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
3996  params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
3997  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
3998  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
3999  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4000  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4001  params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
4002  params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
4003  params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
4004  params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
4005  params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
4006  params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
4007 
4008  // set the layer descriptor
4009  QLstmDescriptor desc;
4010  desc.m_CellClip = cellClip;
4011  desc.m_ProjectionClip = projClip;
4012  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
4013  params.m_RecurrentToInputWeights == nullptr ||
4014  params.m_InputGateBias == nullptr);
4015  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
4016  params.m_CellToOutputWeights != nullptr);
4017  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
4018  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
4019  params.m_ForgetLayerNormWeights != nullptr ||
4020  params.m_CellLayerNormWeights != nullptr ||
4021  params.m_OutputLayerNormWeights != nullptr);
4022  desc.m_InputIntermediateScale = matMulInputGate;
4023  desc.m_ForgetIntermediateScale = matMulForgetGate;
4024  desc.m_CellIntermediateScale = matMulCellGate;
4025  desc.m_OutputIntermediateScale = matMulOutputGate;
4026  desc.m_HiddenStateScale = projInputScale;
4027  desc.m_HiddenStateZeroPoint = projInputZeroPoint;
4028 
4029  // validate the optional input groups
4030  if (desc.m_CifgEnabled &&
4031  (params.m_InputToInputWeights != nullptr ||
4032  params.m_RecurrentToInputWeights != nullptr ||
4033  params.m_InputGateBias != nullptr))
4034  {
4035  return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
4036  " and input gate bias must be provided", __func__);
4037  }
4038 
4039  if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
4040  {
4041  return Fail("%s: projection bias should not be provided without projection weights", __func__);
4042  }
4043 
4044  if (desc.m_PeepholeEnabled &&
4045  (params.m_CellToForgetWeights == nullptr ||
4046  params.m_CellToOutputWeights == nullptr ||
4047  (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
4048  {
4049  return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
4050  " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
4051  }
4052 
4053  if (desc.m_LayerNormEnabled &&
4054  (params.m_ForgetLayerNormWeights == nullptr ||
4055  params.m_CellLayerNormWeights == nullptr ||
4056  params.m_OutputLayerNormWeights == nullptr ||
4057  (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
4058  {
4059  return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
4060  " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
4061  }
4062 
4063  // Basic parameters
4064  LstmInputParamsInfo paramsInfo;
4065  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4066  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4067  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4069  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4071  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4072  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4073  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4074 
4075  // Inputs
4076  const TensorInfo& inputInfo = input.GetTensorInfo();
4077  const TensorInfo& outputStatePrevTimeStepInfo = outputStatePrevTimeStep.GetTensorInfo();
4078  const TensorInfo& cellStatePrevTimeStepInfo = cellStatePrevTimeStep.GetTensorInfo();
4079 
4080  // Outputs
4081  TensorInfo outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
4082  TensorInfo outputInfo = GetTensorInfoForOperand(*output);
4083  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4084 
4085  // Optional parameters
4086  if (!desc.m_CifgEnabled)
4087  {
4088  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4090  if (desc.m_PeepholeEnabled)
4091  {
4092  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
4093  }
4094  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4095  }
4096 
4097 
4098  if (desc.m_ProjectionEnabled)
4099  {
4100  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
4101  if (params.m_ProjectionBias != nullptr)
4102  {
4103  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
4104  }
4105  }
4106  else
4107  {
4108  // If Projection is disabled, override non-const outputs to change the quant info with hidden params, then
4109  // create a new const TensorInfo based on this
4110  outputStateOutInfo.SetQuantizationScale(projInputScale);
4111  outputStateOutInfo.SetQuantizationOffset(projInputZeroPoint);
4112  outputInfo.SetQuantizationScale(projInputScale);
4113  outputInfo.SetQuantizationOffset(projInputZeroPoint);
4114  }
4115 
4116  const TensorInfo constOutputStateOutInfo(outputStateOutInfo);
4117  const TensorInfo constOutputInfo(outputInfo);
4118 
4119  if (desc.m_PeepholeEnabled)
4120  {
4121  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
4122  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
4123  }
4124 
4125  if (desc.m_LayerNormEnabled)
4126  {
4127  if(!desc.m_CifgEnabled)
4128  {
4129  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
4130  }
4131  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
4132  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
4133  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
4134  }
4135 
4136  // Check if the layer is supported
4137  bool isSupported = false;
4138  armnn::BackendId setBackend;
4139  auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
4140  {
4141  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4142  IsQLstmSupported,
4143  data.m_Backends,
4144  isSupported,
4145  setBackend,
4146  inputInfo,
4147  outputStatePrevTimeStepInfo,
4148  cellStatePrevTimeStepInfo,
4149  constOutputStateOutInfo,
4150  cellStateOutInfo,
4151  constOutputInfo,
4152  desc,
4153  paramsInfo);
4154  };
4155 
4156  bool isDynamic = false;
4157  if (!IsDynamicTensor(constOutputStateOutInfo) &&
4158  !IsDynamicTensor(cellStateOutInfo) &&
4159  !IsDynamicTensor(constOutputInfo))
4160  {
4161  validateFunc(outputInfo, isSupported);
4162  }
4163  else
4164  {
4165  isDynamic = true;
4166  isSupported = AreDynamicTensorsSupported();
4167  }
4168 
4169  if (!isSupported)
4170  {
4171  return false;
4172  }
4173 
4174  // Add the layer
4175  IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
4176  layer->SetBackendId(setBackend);
4177 
4178  input.Connect(layer->GetInputSlot(0));
4179  outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
4180  cellStatePrevTimeStep.Connect(layer->GetInputSlot(2));
4181 
4182  if (!isDynamic)
4183  {
4185  operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4186  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
4187  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4188  }
4189  else
4190  {
4192  operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4194  operation, 1, *layer, 1, model, data, nullptr, validateFunc,
4195  ActivationFn::kActivationNone, true) &&
4196  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4197  }
4198 }
4199 
4200 bool Converter::ConvertQuantized16BitLstm(const Operation& operation, const Model& model, ConversionData& data)
4201 {
4202  VLOG(DRIVER) << "Converter::ConvertQuantized16BitLstm()";
4203  VLOG(DRIVER) << "Policy::ConvertQuantized16BitLstm()";
4204 
4205  //Inputs:
4206  // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
4207  // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
4208  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4209  if (!input.IsValid())
4210  {
4211  return Fail("%s: Could not read input 0: input", __func__);
4212  }
4213 
4214  //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
4215  // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
4216  // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
4217  LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle(operation, 13, model, data);
4218  if (!previousCellStateIn.IsValid())
4219  {
4220  return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
4221  }
4222 
4223  // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4224  // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
4225  // is quantized with a fixed quantization range of -1, 127/128.
4226  LayerInputHandle previousOutputIn = ConvertToLayerInputHandle(operation, 14, model, data);
4227  if (!previousOutputIn.IsValid())
4228  {
4229  return Fail("%s: Could not read input 14: previousOutputIn", __func__);
4230  }
4231 
4232  // Get the input tensors:
4233  // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4234  // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
4235  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4236  const ConstTensorPin inputToInputWeightsPin =
4237  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
4238 
4239  // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4240  // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
4241  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4242  const ConstTensorPin inputToForgetWeightsPin =
4243  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
4244 
4245  // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4246  // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
4247  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4248  const ConstTensorPin inputToCellWeightsPin =
4249  ConvertOperationInputToConstTensorPin(operation, 3, model, data);
4250 
4251  // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4252  // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
4253  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4254  const ConstTensorPin inputToOutputWeightsPin =
4255  ConvertOperationInputToConstTensorPin(operation, 4, model, data);
4256 
4257  // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4258  // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
4259  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4260  const ConstTensorPin recurrentToInputWeightsPin =
4261  ConvertOperationInputToConstTensorPin(operation, 5, model, data);
4262 
4263  // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4264  // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
4265  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4266  const ConstTensorPin recurrentToForgetWeightsPin =
4267  ConvertOperationInputToConstTensorPin(operation, 6, model, data);
4268 
4269  // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4270  // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
4271  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4272  const ConstTensorPin recurrentToCellWeightsPin =
4273  ConvertOperationInputToConstTensorPin(operation, 7, model, data);
4274 
4275  // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4276  // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
4277  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4278  const ConstTensorPin recurrentToOutputWeightsPin =
4279  ConvertOperationInputToConstTensorPin(operation, 8, model, data);
4280 
4281  // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
4282  // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4283  // of input and weights scales and zeroPoint equal to 0.
4284  const ConstTensorPin inputGateBiasPin =
4285  ConvertOperationInputToConstTensorPin(operation, 9, model, data);
4286 
4287  // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4288  // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4289  // of input and weights scales and zeroPoint equal to 0.
4290  const ConstTensorPin forgetGateBiasPin =
4291  ConvertOperationInputToConstTensorPin(operation, 10, model, data);
4292 
4293  // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
4294  // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
4295  // and weights scales and zeroPoint equal to 0.
4296  const ConstTensorPin cellBiasPin =
4297  ConvertOperationInputToConstTensorPin(operation, 11, model, data);
4298 
4299  // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4300  // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4301  // of input and weights scales and zeroPoint equal to 0.
4302  const ConstTensorPin outputGateBiasPin =
4303  ConvertOperationInputToConstTensorPin(operation, 12, model, data);
4304 
4305  if (!inputToInputWeightsPin.IsValid() ||
4306  !inputToForgetWeightsPin.IsValid() ||
4307  !inputToCellWeightsPin.IsValid() ||
4308  !inputToOutputWeightsPin.IsValid() ||
4309  !recurrentToInputWeightsPin.IsValid() ||
4310  !recurrentToForgetWeightsPin.IsValid() ||
4311  !recurrentToCellWeightsPin.IsValid() ||
4312  !recurrentToOutputWeightsPin.IsValid() ||
4313  !inputGateBiasPin.IsValid() ||
4314  !forgetGateBiasPin.IsValid() ||
4315  !cellBiasPin.IsValid() ||
4316  !outputGateBiasPin.IsValid())
4317  {
4318  return Fail("%s: Operation has invalid tensor inputs", __func__);
4319  }
4320 
4321  // Outputs:
4322  // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
4323  // which contains a cell state from the current time step. Tensor is quantized using a quantization range
4324  // of -2^4, 2^4 * 32767/32768.
4325  const Operand* cellStateOut = GetOutputOperand(operation, 0, model);
4326  if (!cellStateOut)
4327  {
4328  return Fail("%s: Could not read output 0: cellStateOut", __func__);
4329  }
4330 
4331  // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
4332  // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
4333  const Operand* output = GetOutputOperand(operation, 1, model);
4334  if (!output)
4335  {
4336  return Fail("%s: Could not read output 1: output", __func__);
4337  }
4338 
4339  // Inputs
4340  const TensorInfo& inputInfo = input.GetTensorInfo();
4341  const TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
4342  const TensorInfo& previousOutputInInfo = previousOutputIn.GetTensorInfo();
4343 
4344  // Outputs
4345  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4346  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4347 
4348  // Dynamic tensors currently not supported
4349  if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
4350  {
4351  return Fail("%s: Dynamic output tensors are not supported", __func__);
4352  }
4353 
4354  QuantizedLstmInputParams params;
4355 
4356  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
4357  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
4358  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
4359  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
4360  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
4361  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
4362  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
4363  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4364  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4365  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4366  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4367  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4368 
4369  QuantizedLstmInputParamsInfo paramsInfo;
4370  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4371  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4372  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4373  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4376  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4378  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4379  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4380  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4381  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4382 
4383  bool isSupported = false;
4384  armnn::BackendId setBackend;
4385  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4386  {
4387  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4388  IsQuantizedLstmSupported,
4389  data.m_Backends,
4390  isSupported,
4391  setBackend,
4392  inputInfo,
4393  previousCellStateInInfo,
4394  previousOutputInInfo,
4395  cellStateOutInfo,
4396  outputInfo,
4397  paramsInfo);
4398  };
4399 
4400  bool isDynamic = false;
4401  if (!IsDynamicTensor(cellStateOutInfo) &&
4402  !IsDynamicTensor(outputInfo))
4403  {
4404  validateFunc(outputInfo, isSupported);
4405  }
4406  else
4407  {
4408  isDynamic = true;
4409  isSupported = AreDynamicTensorsSupported();
4410  }
4411 
4412  if (!isSupported)
4413  {
4414  return false;
4415  }
4416 
4417  IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
4418  layer->SetBackendId(setBackend);
4419  input.Connect(layer->GetInputSlot(0));
4420  previousCellStateIn.Connect(layer->GetInputSlot(1));
4421  previousOutputIn.Connect(layer->GetInputSlot(2));
4422 
4423  if (!isDynamic)
4424  {
4425  return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4426  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data));
4427  }
4428  else
4429  {
4430  return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4432  operation, 1, *layer, 1, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
4433  }
4434 
4435 }
4436 
4437 bool Converter::ConvertRank(const Operation& operation, const Model& model, ConversionData& data)
4438 {
4439  VLOG(DRIVER) << "Converter::ConvertRank()";
4440 
4441  const Operand* inputOperand = GetInputOperand(operation, 0, model);
4442  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4443 
4444  if (inputOperand == nullptr || outputOperand == nullptr)
4445  {
4446  return Fail("%s: Operation has invalid inputs", __func__);
4447  }
4448 
4449  const Shape inputOperandShape = GetOperandShape(*inputOperand);
4450  const Shape outputOperandShape = GetOperandShape(*outputOperand);
4451 
4452  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4453  if (!input.IsValid())
4454  {
4455  return Fail("%s: Could not read input 0", __func__);
4456  }
4457 
4458  armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
4459  if (IsDynamicTensor(outInfo))
4460  {
4461  return Fail("%s: Dynamic output tensors are not supported", __func__);
4462  }
4463 
4464  bool isSupported = false;
4465  armnn::BackendId setBackend;
4466  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4467  IsRankSupported,
4468  data.m_Backends,
4469  isSupported,
4470  setBackend,
4471  input.GetTensorInfo(),
4472  outInfo);
4473  if (!isSupported)
4474  {
4475  return false;
4476  }
4477 
4478  armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
4479  layer->SetBackendId(setBackend);
4480  assert(layer != nullptr);
4481  input.Connect(layer->GetInputSlot(0));
4482 
4483  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, &outInfo);
4484 }
4485 
4486 bool Converter::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
4487 {
4488  VLOG(DRIVER) << "Converter::ConvertReLu()";
4491 
4492 
4493  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4494  if (!input.IsValid())
4495  {
4496  return Fail("%s: Input 0 is invalid", "operationName", __func__);
4497  }
4498 
4499  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4500  if (!outputOperand)
4501  {
4502  return false;
4503  }
4504 
4505  const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
4506 
4507  bool isSupported = false;
4508  armnn::BackendId setBackend;
4509  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
4510  {
4511  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4512  IsActivationSupported,
4513  data.m_Backends,
4514  isSupported,
4515  setBackend,
4516  input.GetTensorInfo(),
4517  outInfo,
4518  desc);
4519  };
4520 
4521  if(IsDynamicTensor(outInfo))
4522  {
4523  isSupported = AreDynamicTensorsSupported();
4524  }
4525  else
4526  {
4527  validateFunc(outInfo, isSupported);
4528  }
4529 
4530  if (!isSupported)
4531  {
4532  return false;
4533  }
4534 
4535  armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
4536  layer->SetBackendId(setBackend);
4537  ARMNN_ASSERT(layer != nullptr);
4538  input.Connect(layer->GetInputSlot(0));
4539 
4540  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4541 }
4542 
4543 bool Converter::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
4544 {
4545  VLOG(DRIVER) << "Converter::ConvertReLu1()";
4548  desc.m_A = 1.0f;
4549  desc.m_B = -1.0f;
4550 
4551  return ConvertToActivation(operation, __func__, desc, model, data);
4552 }
4553 
4554 bool Converter::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
4555 {
4556  VLOG(DRIVER) << "Converter::ConvertReLu6()";
4559  desc.m_A = 6.0f;
4560 
4561  return ConvertToActivation(operation, __func__, desc, model, data);
4562 }
4563 
4564 bool Converter::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
4565 {
4566  VLOG(DRIVER) << "Converter::ConvertReshape()";
4567 
4568  const Operand* inputOperand = GetInputOperand(operation, 0, model);
4569  const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
4570  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4571 
4572  if (inputOperand == nullptr
4573  || requestedShapeOperand == nullptr
4574  || outputOperand == nullptr)
4575  {
4576  return Fail("%s: Operation has invalid inputs", __func__);
4577  }
4578 
4579  if (requestedShapeOperand->dimensions.size() != 1)
4580  {
4581  return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
4582  __func__, requestedShapeOperand->dimensions.size());
4583  }
4584 
4585  std::vector<int32_t> targetDimensions;
4586  if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
4587  {
4588  return Fail("%s: Could not read values of input 1", __func__);
4589  }
4590 
4591  const Shape inputOperandShape = GetOperandShape(*inputOperand);
4592 
4593  Shape requestedShape;
4594  // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
4595  // function that resolves these values into a fully specified tensor shape.
4596  if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
4597  {
4598  return Fail("%s: Failed to resolve the requested shape", __func__);
4599  }
4600 
4601  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4602  if (!input.IsValid())
4603  {
4604  return Fail("%s: Could not read input 0", __func__);
4605  }
4606 
4607  armnn::ReshapeDescriptor reshapeDescriptor;
4608  reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
4609  requestedShape.dimensions.data());
4610 
4611  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
4612 
4613  bool isSupported = false;
4614  armnn::BackendId setBackend;
4615  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4616  {
4617  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4618  IsReshapeSupported,
4619  data.m_Backends,
4620  isSupported,
4621  setBackend,
4622  input.GetTensorInfo(),
4623  outputInfo,
4624  reshapeDescriptor);
4625  };
4626 
4627  if(!IsDynamicTensor(outputInfo))
4628  {
4629  validateFunc(outputInfo, isSupported);
4630  }
4631  else
4632  {
4633  isSupported = AreDynamicTensorsSupported();
4634  }
4635 
4636  if (!isSupported)
4637  {
4638  return false;
4639  }
4640 
4641  armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
4642  layer->SetBackendId(setBackend);
4643  assert(layer != nullptr);
4644  input.Connect(layer->GetInputSlot(0));
4645 
4646  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4647 }
4648 
4649 bool Converter::ConvertResize(const Operation& operation,
4650  const Model& model,
4651  ConversionData& data,
4652  ResizeMethod resizeMethod)
4653 {
4654  VLOG(DRIVER) << "Converter::ConvertResize()";
4655  VLOG(DRIVER) << "resizeMethod = " << GetResizeMethodAsCString(resizeMethod);
4656 
4657  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4658  if (!input.IsValid())
4659  {
4660  return Fail("%s: Could not read input 0", __func__);
4661  }
4662 
4663  const Operand* output = GetOutputOperand(operation, 0, model);
4664  if (!output)
4665  {
4666  return Fail("%s: Could not read output 0", __func__);
4667  }
4668 
4669  const TensorInfo& inputInfo = input.GetTensorInfo();
4670  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4671 
4672  ResizeDescriptor descriptor;
4673  descriptor.m_Method = resizeMethod;
4674  descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
4675 
4676  OperandType operandType1;
4677  OperandType operandType2;
4678 
4679  if (!GetOperandType(operation, 1, model, operandType1) ||
4680  !GetOperandType(operation, 2, model, operandType2))
4681  {
4682  return Fail("%s: Operation has invalid inputs", __func__);
4683  }
4684 
4685  if (operandType1 != operandType2)
4686  {
4687  return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
4688  }
4689 
4690  if (operandType1 == OperandType::INT32)
4691  {
4692  // Case 1: resizing by shape
4693  int32_t targetWidth = 0;
4694  int32_t targetHeight = 0;
4695 
4696  if (!GetInputInt32(operation, 1, targetWidth, model, data) ||
4697  !GetInputInt32(operation, 2, targetHeight, model, data))
4698  {
4699  return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
4700  }
4701 
4702  if (targetWidth < 0 || targetHeight < 0)
4703  {
4704  return Fail("%s: Operation has invalid inputs for resizing by shape. "
4705  "Target width/height cannot be < 0", __func__);
4706  }
4707 
4708  descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
4709  descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
4710  }
4711  else if (operandType1 == OperandType::FLOAT32)
4712  {
4713  // Case 2: resizing by scale
4714  float widthScale = 1.0f;
4715  float heightScale = 1.0f;
4716 
4717  if (!GetInputFloat32(operation, 1, widthScale, model, data) ||
4718  !GetInputFloat32(operation, 2, heightScale, model, data))
4719  {
4720  return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4721  }
4722 
4723  const TensorShape& inputShape = inputInfo.GetShape();
4724  armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4725 
4726  float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
4727  float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
4728 
4729  descriptor.m_TargetWidth = std::floor(width * widthScale);
4730  descriptor.m_TargetHeight = std::floor(height * heightScale);
4731  }
4732  else if (operandType1 == OperandType::FLOAT16)
4733  {
4734  Half widthScale;
4735  Half heightScale;
4736 
4737  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, widthScale, model, data) ||
4738  !GetInputScalar(operation, 2, OperandType::FLOAT16, heightScale, model, data))
4739  {
4740  return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4741  }
4742 
4743  const TensorShape& inputShape = inputInfo.GetShape();
4744  armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4745 
4746  Half width = static_cast<Half>(inputShape[dataLayoutIndexed.GetWidthIndex()]);
4747  Half height = static_cast<Half>(inputShape[dataLayoutIndexed.GetHeightIndex()]);
4748 
4749  descriptor.m_TargetWidth = std::floor(width * widthScale);
4750  descriptor.m_TargetHeight = std::floor(height * heightScale);
4751  }
4752  else
4753  {
4754  return Fail("%s: Operand has invalid data type for resizing by scale", __func__);
4755  }
4756 
4757  descriptor.m_AlignCorners = GetOptionalBool(operation, 4, model, data);
4758  descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data);
4759 
4760  bool isSupported = false;
4761  armnn::BackendId setBackend;
4762  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4763  {
4764  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4765  IsResizeSupported,
4766  data.m_Backends,
4767  isSupported,
4768  setBackend,
4769  inputInfo,
4770  outputInfo,
4771  descriptor);
4772  };
4773 
4774  if(IsDynamicTensor(outputInfo))
4775  {
4776  isSupported = AreDynamicTensorsSupported();
4777  }
4778  else
4779  {
4780  validateFunc(outputInfo, isSupported);
4781  }
4782 
4783  if (!isSupported)
4784  {
4785  return false;
4786  }
4787 
4788  IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
4789  layer->SetBackendId(setBackend);
4790  assert(layer != nullptr);
4791  input.Connect(layer->GetInputSlot(0));
4792 
4793  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4794 }
4795 
4796 bool Converter::ConvertReverseV2(const Operation& operation, const Model& model, ConversionData& data)
4797 {
4798  VLOG(DRIVER) << "Converter::ConvertReverseV2()";
4799 
4800  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
4801  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
4802  if (!input0.IsValid() || !input1.IsValid())
4803  {
4804  return Fail("%s: Operation has invalid inputs", __func__);
4805  }
4806  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
4807  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
4808 
4809  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4810  if (!outputOperand)
4811  {
4812  return Fail("%s: Could not read output 0", __func__);
4813  }
4814  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
4815 
4816  bool isSupported = false;
4817  armnn::BackendId setBackend;
4818  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4819  {
4820  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4821  IsReverseV2Supported,
4822  data.m_Backends,
4823  isSupported,
4824  setBackend,
4825  inputInfo0,
4826  inputInfo1,
4827  outputInfo);
4828  };
4829 
4830  if(!IsDynamicTensor(outputInfo))
4831  {
4832  validateFunc(outputInfo, isSupported);
4833  }
4834  else
4835  {
4836  isSupported = AreDynamicTensorsSupported();
4837  }
4838 
4839  if (!isSupported)
4840  {
4841  return false;
4842  }
4843 
4844  armnn::IConnectableLayer* const layer = data.m_Network->AddReverseV2Layer();
4845  layer->SetBackendId(setBackend);
4846  assert(layer != nullptr);
4847  input0.Connect(layer->GetInputSlot(0));
4848  input1.Connect(layer->GetInputSlot(1));
4849 
4850  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4851 }
4852 
4853 bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
4854 {
4855  VLOG(DRIVER) << "Converter::ConvertSpaceToBatchNd()";
4856 
4857  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4858  if(!input.IsValid())
4859  {
4860  return Fail("%s: Operation has invalid inputs", __func__);
4861  }
4862 
4863  const armnn::TensorInfo &inputInfo = input.GetTensorInfo();
4864  unsigned int rank = inputInfo.GetNumDimensions();
4865  unsigned int spatialDim = rank - 2;
4866 
4867  if(rank != 4)
4868  {
4869  Fail("%s: Only inputs with rank 4 are supported", __func__);
4870  }
4871 
4872  const Operand *output = GetOutputOperand(operation, 0, model);
4873  if(!output)
4874  {
4875  return Fail("%s: Could not read output 0", __func__);
4876  }
4877 
4878  const armnn::TensorInfo &outputInfo = GetTensorInfoForOperand(*output);
4879 
4880  const Operand *blockShapeOperand = GetInputOperand(operation, 1, model);
4881  const Operand *paddingsOperand = GetInputOperand(operation, 2, model);
4882 
4883  armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4884  if(blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4885  {
4886  return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4887  }
4888 
4889  std::vector<int32_t> blockShape;
4890  if(!GetTensorInt32Values(*blockShapeOperand, blockShape, model, data))
4891  {
4892  return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4893  }
4894  if(std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i)
4895  { return i < 1; }))
4896  {
4897  return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4898  }
4899 
4900  armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4901  if(paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4902  {
4903  return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4904  }
4905 
4906  std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4907  std::vector<int32_t> paddings;
4908  if(!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
4909  {
4910  return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4911  }
4912  for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4913  {
4914  int paddingBeforeInput = paddings[i];
4915  int paddingAfterInput = paddings[i + 1];
4916  if(paddingBeforeInput < 0 || paddingAfterInput < 0)
4917  {
4918  return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4919  }
4920 
4921  paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4922  }
4923 
4926  descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4927  descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4928 
4929  if(Is12OrLaterOperand(*output))
4930  {
4931  descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
4932  }
4933 
4934  bool isSupported = false;
4935  armnn::BackendId setBackend;
4936  auto validateFunc = [&](const armnn::TensorInfo &outputInfo, bool &isSupported)
4937  {
4938  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4939  IsSpaceToBatchNdSupported,
4940  data.m_Backends,
4941  isSupported,
4942  setBackend,
4943  inputInfo,
4944  outputInfo,
4945  descriptor);
4946  };
4947 
4948  if(IsDynamicTensor(outputInfo))
4949  {
4950  isSupported = AreDynamicTensorsSupported();
4951  } else
4952  {
4953  validateFunc(outputInfo, isSupported);
4954  }
4955 
4956  if(!isSupported)
4957  {
4958  return false;
4959  }
4960 
4961  armnn::IConnectableLayer *const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4962  layer->SetBackendId(setBackend);
4963  assert(layer != nullptr);
4964  input.Connect(layer->GetInputSlot(0));
4965 
4966  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4967 }
4968 
4969 bool Converter::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
4970 {
4971  VLOG(DRIVER) << "Converter::ConvertSpaceToDepth()";
4972 
4973  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4974  if (!input.IsValid() )
4975  {
4976  return Fail("%s: Operation has invalid inputs", __func__);
4977  }
4978 
4979  const TensorInfo& inputInfo = input.GetTensorInfo();
4980  unsigned int rank = inputInfo.GetNumDimensions();
4981  if (rank != 4)
4982  {
4983  return Fail("%s: Only inputs with rank 4 are supported", __func__);
4984  }
4985 
4986  const Operand* output = GetOutputOperand(operation, 0, model);
4987  if (!output)
4988  {
4989  return Fail("%s: Could not read output 0", __func__);
4990  }
4991 
4992  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4993 
4995 
4996  GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
4997 
4998  if (desc.m_BlockSize <= 1)
4999  {
5000  return Fail("%s: Block size must be at least 1 in all dimensions", __func__);
5001  }
5002 
5003  desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
5004 
5005  bool isSupported = false;
5006  armnn::BackendId setBackend;
5007  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5008  {
5009  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5010  IsSpaceToDepthSupported,
5011  data.m_Backends,
5012  isSupported,
5013  setBackend,
5014  inputInfo,
5015  outputInfo,
5016  desc);
5017  };
5018 
5019  if(IsDynamicTensor(outputInfo))
5020  {
5021  isSupported = AreDynamicTensorsSupported();
5022  }
5023  else
5024  {
5025  validateFunc(outputInfo, isSupported);
5026  }
5027 
5028  if (!isSupported)
5029  {
5030  return false;
5031  }
5032 
5033  IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
5034  layer->SetBackendId(setBackend);
5035  assert(layer != nullptr);
5036  input.Connect(layer->GetInputSlot(0));
5037 
5038  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5039 }
5040 
5041 bool Converter::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
5042 {
5043  VLOG(DRIVER) << "Converter::ConvertSoftmax()";
5044 
5045  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5046  if (!input.IsValid())
5047  {
5048  return Fail("%s: Operation has invalid inputs", __func__);
5049  }
5050 
5051  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
5052  if (!outputOperand)
5053  {
5054  return Fail("%s: Operation has no outputs", __func__);
5055  }
5056 
5057  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
5058 
5059  SoftmaxDescriptor desc;
5060  OperandType outputType = outputOperand->type;
5061 
5062  // Read beta value
5063  if (outputType == OperandType::TENSOR_FLOAT16)
5064  {
5065  Half value;
5066 
5067  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
5068  {
5069  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5070  }
5071 
5072  desc.m_Beta = static_cast<float>(value);
5073  }
5074  else
5075  {
5076  if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
5077  {
5078  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5079  }
5080  }
5081 
5082  if (operation.inputs.size() > 2 && !GetInputScalar(operation,
5083  2,
5084  OperandType::INT32,
5085  desc.m_Axis,
5086  model,
5087  data))
5088  {
5089  return Fail("%s: Operation has invalid inputs", __func__);
5090  }
5091 
5092  bool isSupported = false;
5093  armnn::BackendId setBackend;
5094  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5095  {
5096  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5097  IsSoftmaxSupported,
5098  data.m_Backends,
5099  isSupported,
5100  setBackend,
5101  input.GetTensorInfo(),
5102  outputInfo,
5103  desc);
5104  };
5105 
5106  if(IsDynamicTensor(outputInfo))
5107  {
5108  isSupported = AreDynamicTensorsSupported();
5109  }
5110  else
5111  {
5112  validateFunc(outputInfo, isSupported);
5113  }
5114 
5115  if (!isSupported)
5116  {
5117  return false;
5118  }
5119 
5120  IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
5121  layer->SetBackendId(setBackend);
5122  assert(layer != nullptr);
5123  input.Connect(layer->GetInputSlot(0));
5124 
5125  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5126 }
5127 
5128 bool Converter::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
5129 {
5130  VLOG(DRIVER) << "Converter::ConvertTanH()";
5131 
5134  desc.m_A = 1.0f; // android nn does not support tanH parameters
5135  desc.m_B = 1.0f; // set to 1.0f for unity scaling
5136 
5137  return ConvertToActivation(operation, __func__, desc, model, data);
5138 }
5139 
5140 bool Converter::ConvertTile(const Operation& operation, const Model& model, ConversionData& data)
5141 {
5142  VLOG(DRIVER) << "Converter::ConvertTile()";
5143 
5144  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5145  if (!input.IsValid())
5146  {
5147  return Fail("%s: Operation has invalid inputs", __func__);
5148  }
5149  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5150 
5151  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
5152  if (!outputOperand)
5153  {
5154  return Fail("%s: Operation has no outputs", __func__);
5155  }
5156  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
5157 
5158  const Operand* multiplesOperand = GetInputOperand(operation, 1, model);
5159  if (!multiplesOperand)
5160  {
5161  return Fail("%s: Could not read input 1", __func__);
5162  }
5163  std::vector<int32_t> multiples;
5164  if (!GetTensorInt32Values(*multiplesOperand, multiples, model, data))
5165  {
5166  return Fail("%s: Input 1 has invalid values", __func__);
5167  }
5168 
5169  TileDescriptor descriptor;
5170  descriptor.m_Multiples.assign(multiples.begin(), multiples.end());
5171 
5172  bool isSupported = false;
5173  armnn::BackendId setBackend;
5174  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5175  {
5176  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5177  IsTileSupported,
5178  data.m_Backends,
5179  isSupported,
5180  setBackend,
5181  inputInfo,
5182  outputInfo,
5183  descriptor);
5184  };
5185 
5186  if(IsDynamicTensor(outputInfo))
5187  {
5188  isSupported = AreDynamicTensorsSupported();
5189  }
5190  else
5191  {
5192  validateFunc(outputInfo, isSupported);
5193  }
5194 
5195  if (!isSupported)
5196  {
5197  return false;
5198  }
5199 
5200  IConnectableLayer* const layer = data.m_Network->AddTileLayer(descriptor);
5201  layer->SetBackendId(setBackend);
5202  assert(layer != nullptr);
5203  input.Connect(layer->GetInputSlot(0));
5204 
5205  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5206 }
5207 
5208 bool Converter::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
5209 {
5210  VLOG(DRIVER) << "Converter::ConvertTransposeConv2d()";
5211 
5212  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5213 
5214  if (!input.IsValid())
5215  {
5216  return Fail("%s: Operation has invalid inputs", __func__);
5217  }
5218 
5219  const Operand* output = GetOutputOperand(operation, 0, model);
5220 
5221  if (!output)
5222  {
5223  return Fail("%s: Could not read output 0", __func__);
5224  }
5225 
5226  const TensorInfo& inputInfo = input.GetTensorInfo();
5227  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5228 
5229  // ArmNN does not currently support non-fixed weights or bias
5230  // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
5231  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
5232 
5233  if (weightsOperand == nullptr)
5234  {
5235  return Fail("%s: Operand is invalid", __func__);
5236  }
5238  desc.m_DataLayout = DataLayout::NHWC;
5239 
5240  // Determine whether padding is implicit or explicit
5241  bool implicitPadding = operation.inputs.size() == 9;
5242 
5243  if (implicitPadding )
5244  {
5245  desc.m_DataLayout = OptionalDataLayout(operation, 8, model, data);
5246  }
5247  else
5248  {
5249  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
5250  }
5251 
5252  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
5253  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
5254  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
5255 
5256  const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
5257 
5258  // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
5259  // We have to permute it to OIHW if the data layout is NCHW.
5260  const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
5262  model, data, OHWIToOIHW) :
5263  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
5264 
5265  // Bias is a 1D tensor
5266  const ConstTensorPin biasPin =
5267  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
5268 
5269  if (!weightsPin.IsValid())
5270  {
5271  return Fail("%s: Operation has invalid weights", __func__);
5272  }
5273 
5274  if (!biasPin.IsValid())
5275  {
5276  return Fail("%s: Operation has invalid biases", __func__);
5277  }
5278 
5279  ConstTensor weights = weightsPin.GetConstTensor();
5280  ConstTensor bias = biasPin.GetConstTensor();
5281  SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
5282 
5283  ActivationFn activation;
5284 
5285  if (implicitPadding)
5286  {
5287  int32_t strideX{0};
5288  int32_t strideY{0};
5289  int32_t padLeft{0};
5290  int32_t padRight{0};
5291  int32_t padTop{0};
5292  int32_t padBottom{0};
5293 
5294  ::android::nn::PaddingScheme paddingScheme;
5295  if (!GetInputPaddingScheme(operation, 4, paddingScheme, model, data) ||
5296  !GetInputScalar(operation, 5, OperandType::INT32, strideX, model, data) ||
5297  !GetInputScalar(operation, 6, OperandType::INT32, strideY, model, data) ||
5298  !GetInputActivationFunction(operation, 7, activation, model, data))
5299  {
5300  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
5301  }
5302 
5303  const uint32_t kernelX = weights.GetShape()[widthIndex];
5304  const uint32_t kernelY = weights.GetShape()[heightIndex];
5305 
5306  // If output shape has been specified as a parameter then extract it and make it available.
5307  const Operand* outputShapeOperand = GetInputOperand(operation, 3, model, false);
5308  std::vector<int32_t> outputShape;
5309  if ((outputShapeOperand) && (GetTensorInt32Values(*outputShapeOperand, outputShape, model, data)))
5310  {
5311  // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
5312  for (int dimension : outputShape)
5313  {
5314  desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
5315  }
5316  desc.m_OutputShapeEnabled = true;
5317  }
5318 
5319  uint32_t outputX;
5320  uint32_t outputY;
5321 
5322  if (IsDynamicTensor(outputInfo))
5323  {
5324  if (outputShape.size() == 0)
5325  {
5326  return Fail("%s: Padding sizes cannot be inferred", __func__);
5327  }
5328 
5329  outputX = outputShape[widthIndex];
5330  outputY = outputShape[heightIndex];
5331  }
5332  else
5333  {
5334  outputX = outputInfo.GetShape()[widthIndex];
5335  outputY = outputInfo.GetShape()[heightIndex];
5336  }
5337 
5338  CalcPaddingTransposeConv(outputX, kernelX, strideX, padLeft, padRight, paddingScheme);
5339  CalcPaddingTransposeConv(outputY, kernelY, strideY, padTop, padBottom, paddingScheme);
5340 
5341  // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
5342  // but Arm NN only supports values >= 0
5343  if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
5344  {
5345  return Fail("%s: Negative padding values are not supported", __func__);
5346  }
5347 
5348  desc.m_StrideX = armnn::numeric_cast<uint32_t>(strideX);
5349  desc.m_StrideY = armnn::numeric_cast<uint32_t>(strideY);
5350  desc.m_PadLeft = armnn::numeric_cast<uint32_t>(padLeft);
5351  desc.m_PadRight = armnn::numeric_cast<uint32_t>(padRight);
5352  desc.m_PadTop = armnn::numeric_cast<uint32_t>(padTop);
5353  desc.m_PadBottom = armnn::numeric_cast<uint32_t>(padBottom);
5354  }
5355  else if (operation.inputs.size() == 11)
5356  {
5357  // explicit padding
5358  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
5359  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
5360  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
5361  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
5362  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
5363  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
5364  !GetInputActivationFunction(operation, 9, activation, model, data))
5365  {
5366  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
5367  }
5368  }
5369  else
5370  {
5371  return Fail("%s: Unsupported number of operation inputs", __func__);
5372  }
5373 
5374  desc.m_BiasEnabled = true;
5375  Optional<TensorInfo> biases(bias.GetInfo());
5376 
5377  bool isSupported = false;
5378  armnn::BackendId setBackend;
5379  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5380  {
5381  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5382  IsTransposeConvolution2dSupported,
5383  data.m_Backends,
5384  isSupported,
5385  setBackend,
5386  inputInfo,
5387  outputInfo,
5388  desc,
5389  weights.GetInfo(),
5390  biases);
5391  };
5392 
5393  if(IsDynamicTensor(outputInfo))
5394  {
5395  isSupported = AreDynamicTensorsSupported();
5396  }
5397  else
5398  {
5399  validateFunc(outputInfo, isSupported);
5400  }
5401  if (!isSupported)
5402  {
5403  return false;
5404  }
5405 
5406  IConnectableLayer* startLayer =
5407  data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
5408  startLayer->SetBackendId(setBackend);
5409  if (!startLayer)
5410  {
5411  return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
5412  }
5413 
5414  input.Connect(startLayer->GetInputSlot(0));
5415 
5416  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
5417  data, nullptr, validateFunc, activation);
5418 }
5419 
5420 bool Converter::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
5421 {
5422  VLOG(DRIVER) << "Converter::ConvertSqrt()";
5423  ActivationDescriptor desc;
5424  desc.m_Function = ActivationFunction::Sqrt;
5425 
5426  return ::ConvertToActivation(operation, __func__, desc, model, data);
5427 }
5428 
5429 bool Converter::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
5430 {
5431  VLOG(DRIVER) << "Converter::ConvertSqueeze()";
5432 
5433  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5434  if (!input.IsValid())
5435  {
5436  return Fail("%s: Operation has invalid inputs", __func__);
5437  }
5438 
5439  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5440  unsigned int rank = inputInfo.GetNumDimensions();
5441  if (rank > 4)
5442  {
5443  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5444  }
5445 
5446  const Operand* output = GetOutputOperand(operation, 0, model);
5447  if (!output)
5448  {
5449  return Fail("%s: Could not read output 0", __func__);
5450  }
5451 
5453  {
5454  return Fail("%s: Dynamic output tensors are not supported", __func__);
5455  }
5456 
5457  // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
5458  // if the operand index is out of bounds.
5459  const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
5460 
5461  const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
5462 
5463  std::vector<int32_t> axis;
5464  if (!axisOperand)
5465  {
5466  axis.assign(dimensionSequence,
5467  dimensionSequence + rank);
5468  }
5469  else if (!GetTensorInt32Values(*axisOperand, axis, model, data))
5470  {
5471  return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
5472  }
5473 
5474  std::vector<uint32_t> outputDims;
5475  for (unsigned int i = 0; i < rank; i++)
5476  {
5477  bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
5478  auto currentDimension = inputInfo.GetShape()[i];
5479  if (skipSqueeze || currentDimension != 1)
5480  {
5481  outputDims.push_back(currentDimension);
5482  }
5483  }
5484 
5485  armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
5486 
5487  armnn::TensorInfo outputInfo = inputInfo;
5488  outputInfo.SetShape(outShape);
5489 
5490  armnn::ReshapeDescriptor reshapeDesc;
5491  reshapeDesc.m_TargetShape = outputInfo.GetShape();
5492 
5493  bool isSupported = false;
5494  armnn::BackendId setBackend;
5495  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5496  IsReshapeSupported,
5497  data.m_Backends,
5498  isSupported,
5499  setBackend,
5500  inputInfo,
5501  outputInfo,
5502  reshapeDesc);
5503 
5504  if (!isSupported)
5505  {
5506  return false;
5507  }
5508 
5509  armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
5510  layer->SetBackendId(setBackend);
5511  assert(layer != nullptr);
5512  input.Connect(layer->GetInputSlot(0));
5513 
5514  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
5515 }
5516 
5517 bool Converter::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
5518 {
5519  VLOG(DRIVER) << "Converter::ConvertStridedSlice()";
5520 
5521  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5522  if (!input.IsValid())
5523  {
5524  return Fail("%s: Operation has invalid inputs", __func__);
5525  }
5526 
5527  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5528  unsigned int rank = inputInfo.GetNumDimensions();
5529  if (rank > 4)
5530  {
5531  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5532  }
5533 
5534  const Operand* output = GetOutputOperand(operation, 0, model);
5535  if (!output)
5536  {
5537  return Fail("%s: Could not read output 0", __func__);
5538  }
5539 
5540  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5541 
5542  const Operand* beginOperand = GetInputOperand(operation, 1, model);
5543  const Operand* endOperand = GetInputOperand(operation, 2, model);
5544  const Operand* stridesOperand = GetInputOperand(operation, 3, model);
5545 
5546  std::vector<int32_t> beginValues;
5547  std::vector<int32_t> endValues;
5548  std::vector<int32_t> stridesValues;
5549 
5550  // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
5551  auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
5552  {
5553  if (!GetTensorInt32Values(operand, operandValues, model, data))
5554  {
5555  return false;
5556  }
5557 
5558  if (operandValues.size() != rank)
5559  {
5560  return false;
5561  }
5562 
5563  return true;
5564  };
5565 
5566  if (!ValidateInputOperands(*beginOperand, beginValues)
5567  || !ValidateInputOperands(*endOperand, endValues)
5568  || !ValidateInputOperands(*stridesOperand, stridesValues))
5569  {
5570  return Fail("%s: Operation has invalid input operand", __func__);
5571  }
5572 
5573  // Stride cannot have value '0'
5574  if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
5575  {
5576  return Fail("%s: Stride must be non-zero value.", __func__);
5577  }
5578 
5579  armnn::StridedSliceDescriptor descriptor;
5580  descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
5581  descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
5582  descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
5584 
5585  // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
5586  if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) ||
5587  !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) ||
5588  !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
5589  {
5590  return Fail("%s: Operation has invalid inputs", __func__);
5591  }
5592 
5593  bool isSupported = false;
5594  armnn::BackendId setBackend;
5595  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5596  {
5597  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5598  IsStridedSliceSupported,
5599  data.m_Backends,
5600  isSupported,
5601  setBackend,
5602  inputInfo,
5603  outputInfo,
5604  descriptor);
5605  };
5606 
5607  if(IsDynamicTensor(outputInfo))
5608  {
5609  isSupported = AreDynamicTensorsSupported();
5610  }
5611  else
5612  {
5613  validateFunc(outputInfo, isSupported);
5614  }
5615 
5616  if (!isSupported)
5617  {
5618  return false;
5619  }
5620 
5621  // Check if slice can fit in a inferred output
5622  armnn::TensorShape inputShape = inputInfo.GetShape();
5623  for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
5624  {
5625  int stride = descriptor.m_Stride[i];
5626 
5627  if (descriptor.m_ShrinkAxisMask & (1 << i))
5628  {
5629  // If the difference between the start point and the end point of the slice on an axis being shrunk
5630  // is greater than 1 then throw an error as the output will not be large enough to hold the slice
5631  if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
5632  || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
5633  {
5634  return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
5635  }
5636 
5637  if(stride < 0)
5638  {
5639  return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
5640  }
5641  }
5642  }
5643 
5644  armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
5645  layer->SetBackendId(setBackend);
5646  assert(layer != nullptr);
5647  input.Connect(layer->GetInputSlot(0));
5648 
5649  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5650 }
5651 
5652 bool Converter::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
5653 {
5654  VLOG(DRIVER) << "Converter::ConvertTranspose()";
5655 
5656  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5657  if (!input.IsValid())
5658  {
5659  return Fail("%s: Operation has invalid inputs", __func__);
5660  }
5661 
5662  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5663  unsigned int rank = inputInfo.GetNumDimensions();
5664  if (rank > 4)
5665  {
5666  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5667  }
5668 
5669  // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
5670  // if the operand index is out of bounds.
5671  const Operand* permOperand = GetInputOperand(operation, 1, model, false);
5672 
5673  std::vector<int32_t> perm(rank);
5674  if (!permOperand || (permOperand->lifetime == OperandLifeTime::NO_VALUE))
5675  {
5676  for (unsigned int i = rank; i > 0; i--)
5677  {
5678  perm[rank - i] = armnn::numeric_cast<int> (i - 1);
5679  }
5680  }
5681  else if (!GetTensorInt32Values(*permOperand, perm, model, data))
5682  {
5683  return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
5684  }
5685 
5686  std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
5687 
5688  armnn::TransposeDescriptor transposeDesc;
5689  transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
5690 
5691  const Operand* output = GetOutputOperand(operation, 0, model);
5692  if (!output)
5693  {
5694  return Fail("%s: Could not read output 0", __func__);
5695  }
5696 
5697  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5698 
5699  bool isSupported = false;
5700  armnn::BackendId setBackend;
5701  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5702  {
5703  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5704  IsTransposeSupported,
5705  data.m_Backends,
5706  isSupported,
5707  setBackend,
5708  inputInfo,
5709  outputInfo,
5710  transposeDesc);
5711  };
5712 
5713  if(IsDynamicTensor(outputInfo))
5714  {
5715  isSupported = AreDynamicTensorsSupported();
5716  }
5717  else
5718  {
5719  validateFunc(outputInfo, isSupported);
5720  }
5721 
5722  if (!isSupported)
5723  {
5724  return false;
5725  }
5726 
5727  armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
5728  layer->SetBackendId(setBackend);
5729  assert(layer != nullptr);
5730  input.Connect(layer->GetInputSlot(0));
5731 
5732  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5733 }
5734 
5735 } // namespace armnn_driver
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:570
armnn::InstanceNormalizationDescriptor::m_Beta
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:867
armnn::GetBinaryOperationAsCString
constexpr char const * GetBinaryOperationAsCString(BinaryOperation operation)
Definition: TypesUtils.hpp:75
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn_driver::GetOptionalInputActivation
bool GetOptionalInputActivation(const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:853
armnn::LstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
armnn::LstmInputParamsInfo::m_InputToInputWeights
const TensorInfo * m_InputToInputWeights
Definition: LstmParams.hpp:89
armnn::BinaryOperation::Mul
@ Mul
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:530
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::LstmInputParams::m_OutputLayerNormWeights
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
armnn::QLstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1407
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1448
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1591
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:822
armnn_driver::Converter::ConvertOperation
static bool ConvertOperation(const Operation &operation, const Model &model, ConversionData &data)
Definition: Converter.cpp:21
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
armnn::GetLogicalBinaryOperationAsCString
constexpr char const * GetLogicalBinaryOperationAsCString(LogicalBinaryOperation operation)
Definition: TypesUtils.hpp:108
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1359
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1456
armnn::Optional
Definition: Optional.hpp:270
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::QuantizedLstmInputParamsInfo::m_InputToCellWeights
const TensorInfo * m_InputToCellWeights
Definition: QuantizedLstmParams.hpp:141
armnn::GetResizeMethodAsCString
constexpr const char * GetResizeMethodAsCString(ResizeMethod method)
Definition: TypesUtils.hpp:275
armnn::QLstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1401
armnn::ResizeDescriptor::m_HalfPixelCenters
bool m_HalfPixelCenters
Half Pixel Centers.
Definition: Descriptors.hpp:997
armnn::ResizeMethod
ResizeMethod
Definition: Types.hpp:165
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::LstmInputParamsInfo::m_InputLayerNormWeights
const TensorInfo * m_InputLayerNormWeights
Definition: LstmParams.hpp:106
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1050
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:528
armnn::QuantizedLstmInputParamsInfo::m_ForgetGateBias
const TensorInfo * m_ForgetGateBias
Definition: QuantizedLstmParams.hpp:150
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:988
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:708
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1321
armnn::LstmInputParams::m_ProjectionBias
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
armnn::LstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
armnn::DataLayout::NHWC
@ NHWC
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:44
armnn::QuantizedLstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: QuantizedLstmParams.hpp:36
armnn::LstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:964
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:488
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn_driver::ConvertPooling2d
bool ConvertPooling2d(const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:380
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:32
armnn_driver::IsWeightsValid
bool IsWeightsValid(const Operation &operation, uint32_t inputIndex, const Model &model)
Utility functions.
Definition: ConversionUtils.cpp:134
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:718
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:944
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:710
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:824
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToOutputWeights
const TensorInfo * m_RecurrentToOutputWeights
Definition: QuantizedLstmParams.hpp:147
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::NormalizationAlgorithmMethod::LocalBrightness
@ LocalBrightness
Krichevsky 2012: Local Brightness Normalization.
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:801
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:204
armnn_driver::isQuantizedOperand
bool isQuantizedOperand(const OperandType &operandType)
Definition: CanonicalUtils.cpp:507
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::QuantizedLstmInputParamsInfo::m_CellBias
const TensorInfo * m_CellBias
Definition: QuantizedLstmParams.hpp:151
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:898
armnn::LstmInputParamsInfo::m_OutputGateBias
const TensorInfo * m_OutputGateBias
Definition: LstmParams.hpp:103
armnnUtils::DataLayoutIndexed
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
Definition: DataLayoutIndexed.hpp:17
armnn_driver::ConversionData
Definition: ConversionUtils.hpp:51
armnn_driver::Converter::Model
::android::nn::Model Model
Definition: Converter.hpp:24
armnn_driver::GetOptionalBool
bool GetOptionalBool(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:900
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:993
armnn::ActivationFunction::TanH
@ TanH
armnn::LstmInputParamsInfo::m_CellToInputWeights
const TensorInfo * m_CellToInputWeights
Definition: LstmParams.hpp:97
armnn::QuantizedLstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: QuantizedLstmParams.hpp:38
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:692
armnn::LstmInputParamsInfo::m_CellLayerNormWeights
const TensorInfo * m_CellLayerNormWeights
Definition: LstmParams.hpp:108
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:576
armnn::QuantizedLstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: QuantizedLstmParams.hpp:39
armnn::ConcatDescriptor
OriginsDescriptor ConcatDescriptor
Definition: DescriptorsFwd.hpp:57
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:153
armnn_driver::Half
half_float::half Half
Definition: Converter.cpp:14
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:662
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn_driver::Operation
::android::nn::Operation Operation
Definition: ConversionUtils.hpp:47
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1045
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1329
armnn::LstmInputParamsInfo::m_RecurrentToCellWeights
const TensorInfo * m_RecurrentToCellWeights
Definition: LstmParams.hpp:95
armnn::LstmInputParams::m_CellToOutputWeights
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
armnn::LstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
armnn_driver::ConvertToActivation
bool ConvertToActivation(const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:592
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn_driver::LayerInputHandle::SanitizeQuantizationScale
void SanitizeQuantizationScale(LayerInputHandle &weight, LayerInputHandle &input)
Definition: ConversionUtils.cpp:55
armnn::QLstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1405
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:102
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::QuantizedLstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: QuantizedLstmParams.hpp:44
armnn::QuantizedLstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: QuantizedLstmParams.hpp:40
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:853
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:797
armnnUtils::TransposeTensorShape
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Transpose.cpp:98
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::LogicalBinaryOperation
LogicalBinaryOperation
Definition: Types.hpp:118
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:566
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:900
armnn::LstmInputParamsInfo::m_OutputLayerNormWeights
const TensorInfo * m_OutputLayerNormWeights
Definition: LstmParams.hpp:109
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:702
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:580
armnn::BoostLogSeverityMapping::error
@ error
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::QLstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1409
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:795
armnn::QuantizedLstmInputParamsInfo::m_OutputGateBias
const TensorInfo * m_OutputGateBias
Definition: QuantizedLstmParams.hpp:152
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1454
armnn::LstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
armnn_driver::OperandType
::android::nn::OperandType OperandType
Definition: ConversionUtils.hpp:46
armnn::NormalizationAlgorithmChannel::Across
@ Across
TensorUtils.hpp
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:761
armnn::LstmInputParams::m_CellToInputWeights
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1171
armnn::FillDescriptor::m_Value
float m_Value
Definition: Descriptors.hpp:940
armnn::LstmInputParamsInfo::m_RecurrentToOutputWeights
const TensorInfo * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:96
armnn::ElementwiseBinaryDescriptor
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
Definition: Descriptors.hpp:109
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:991
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1048
armnn::LstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1127
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::QuantizedLstmInputParamsInfo::m_InputToOutputWeights
const TensorInfo * m_InputToOutputWeights
Definition: QuantizedLstmParams.hpp:142
armnn::LstmInputParamsInfo::m_ForgetLayerNormWeights
const TensorInfo * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:107
armnn::TensorInfo::SetQuantizationScale
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:793
armnn::BinaryOperation::Maximum
@ Maximum
armnn::LstmInputParams::m_CellToForgetWeights
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
armnn::LstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1123
armnn_driver::ConvertPaddings
bool ConvertPaddings(const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
Definition: ConversionUtils.cpp:338
armnn::LstmInputParamsInfo::m_RecurrentToForgetWeights
const TensorInfo * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:94
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1342
armnn::LstmInputParamsInfo::m_CellToForgetWeights
const TensorInfo * m_CellToForgetWeights
Definition: LstmParams.hpp:98
armnn::LstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:706
armnn::QuantizedLstmInputParamsInfo::m_InputGateBias
const TensorInfo * m_InputGateBias
Definition: QuantizedLstmParams.hpp:149
armnn::QuantizedLstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: QuantizedLstmParams.hpp:41
armnn::LstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1175
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1469
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1592
armnn::TensorInfo::GetQuantizationDim
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:43
armnn::InstanceNormalizationDescriptor::m_Gamma
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:865
armnn::LstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:582
armnn_driver::GetOperandType
bool GetOperandType(const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
Definition: ConversionUtils.hpp:683
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1002
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:782
armnn_driver::GetOptionalConvolutionDilationParams
bool GetOptionalConvolutionDilationParams(const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:874
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1200
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:742
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:694
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:66
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:805
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1563
armnn::QuantizedLstmInputParamsInfo::m_InputToInputWeights
const TensorInfo * m_InputToInputWeights
Definition: QuantizedLstmParams.hpp:139
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:963
armnn::ReduceOperation::Sum
@ Sum
armnnUtils::ExpandDims
armnn::TensorShape ExpandDims(const armnn::TensorShape &tensorShape, int axis)
Definition: TensorUtils.cpp:140
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:172
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:115
armnn::BaseTensor::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:297
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:584
armnn::QLstmDescriptor::m_ProjectionClip
float m_ProjectionClip
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1395
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:960
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1022
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:572
armnn::PermutationVector
Definition: Types.hpp:308
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1018
armnn::QuantizedLstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: QuantizedLstmParams.hpp:34
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1458
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1332
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
armnn::QuantizedLstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: QuantizedLstmParams.hpp:46
armnn::LstmInputParamsInfo::m_InputToCellWeights
const TensorInfo * m_InputToCellWeights
Definition: LstmParams.hpp:91
armnn::LstmInputParamsInfo::m_CellBias
const TensorInfo * m_CellBias
Definition: LstmParams.hpp:102
armnn::UnaryOperation
UnaryOperation
Definition: Types.hpp:124
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:574
armnn_driver::LayerInputHandle::GetOutputSlot
armnn::IOutputSlot * GetOutputSlot() const
Definition: ConversionUtils.cpp:70
armnn::QLstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1411
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:568
armnn_driver::ConstTensorPin::IsOptional
bool IsOptional() const
Definition: ConversionUtils.cpp:110
armnn::OriginsDescriptor::SetConcatAxis
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
Definition: Descriptors.cpp:158
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:643
armnn::LayerType::Shape
@ Shape
armnn::GetArgMinMaxFunctionAsCString
constexpr char const * GetArgMinMaxFunctionAsCString(ArgMinMaxFunction function)
Definition: TypesUtils.hpp:51
armnn::LstmInputParams::m_InputLayerNormWeights
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
armnn::LstmInputParamsInfo::m_RecurrentToInputWeights
const TensorInfo * m_RecurrentToInputWeights
Definition: LstmParams.hpp:93
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ReduceOperation::Prod
@ Prod
armnn::LstmInputParamsInfo::m_ForgetGateBias
const TensorInfo * m_ForgetGateBias
Definition: LstmParams.hpp:101
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:698
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1334
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1071
armnn::CreateDescriptorForConcatenation
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
Definition: Descriptors.hpp:300
armnn::TensorInfo::SetQuantizationOffset
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
armnn::LstmInputParams::m_ForgetLayerNormWeights
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:986
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1325
armnn::InstanceNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:871
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1081
armnn::ComparisonOperation
ComparisonOperation
Definition: Types.hpp:108
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1282
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1493
armnn::TransposeConvolution2dDescriptor::m_OutputShape
std::vector< unsigned int > m_OutputShape
Definition: Descriptors.hpp:1465
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:995
armnn::TileDescriptor::m_Multiples
std::vector< uint32_t > m_Multiples
The vector to multiply the input shape by.
Definition: Descriptors.hpp:1635
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1169
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1125
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1497
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:799
armnn::BinaryOperation
BinaryOperation
Definition: Types.hpp:137
armnn::GetUnaryOperationAsCString
constexpr char const * GetUnaryOperationAsCString(UnaryOperation operation)
Definition: TypesUtils.hpp:91
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn::QLstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1413
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:578
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:192
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:60
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:698
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1131
armnn::QuantizedLstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: QuantizedLstmParams.hpp:33
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1452
armnn_driver::ConstTensorPin::GetConstTensorPtr
const armnn::ConstTensor * GetConstTensorPtr() const
Definition: ConversionUtils.cpp:120
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1450
armnn::QuantizedLstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: QuantizedLstmParams.hpp:45
android::nn
Definition: support_library_service.cpp:10
armnn::LstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn::QLstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
Definition: Descriptors.hpp:1397
armnn_driver::GetInputActivationFunctionFromTensor
bool GetInputActivationFunctionFromTensor(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:837
armnn::BackendId
Definition: BackendId.hpp:75
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToCellWeights
const TensorInfo * m_RecurrentToCellWeights
Definition: QuantizedLstmParams.hpp:146
armnn::BinaryOperation::Minimum
@ Minimum
armnn_driver::ConvertReduce
bool ConvertReduce(const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
Definition: ConversionUtils.cpp:508
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::LstmInputParams::m_ProjectionWeights
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
armnn::ActivationFunction::ReLu
@ ReLu
armnn::LstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1419
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::OriginsDescriptor::SetViewOriginCoord
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
Definition: Descriptors.cpp:167
armnn::LstmInputParamsInfo::m_CellToOutputWeights
const TensorInfo * m_CellToOutputWeights
Definition: LstmParams.hpp:99
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::QLstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1415
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:803
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1129
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1323
armnn::ReduceOperation::Min
@ Min
armnn_driver::Converter::Operation
::android::nn::Operation Operation
Definition: Converter.hpp:28
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:50
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToInputWeights
const TensorInfo * m_RecurrentToInputWeights
Definition: QuantizedLstmParams.hpp:144
armnn::QuantizedLstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: QuantizedLstmParams.hpp:140
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:502
armnn::TransposeConvolution2dDescriptor::m_OutputShapeEnabled
bool m_OutputShapeEnabled
Output shape if it has been specified.
Definition: Descriptors.hpp:1464
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1460
armnn::QLstmDescriptor::m_CellClip
float m_CellClip
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1393
armnn::LstmInputParamsInfo::m_InputGateBias
const TensorInfo * m_InputGateBias
Definition: LstmParams.hpp:100
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:90
armnn::QLstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1403
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:105
armnn_driver::DequantizeAndMakeConstTensorPin
ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
Definition: ConversionUtils.cpp:731
armnn::BinaryOperation::Div
@ Div
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1462
armnn::QuantizedLstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: QuantizedLstmParams.hpp:43
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1119
armnn::LstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: LstmParams.hpp:90
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:902
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:704
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:823
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1121
armnn::LstmInputParamsInfo::m_InputToOutputWeights
const TensorInfo * m_InputToOutputWeights
Definition: LstmParams.hpp:92
armnn::LstmInputParamsInfo::m_ProjectionBias
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
armnn::LstmInputParams
Definition: LstmParams.hpp:13
armnn::LstmInputParamsInfo::m_ProjectionWeights
const TensorInfo * m_ProjectionWeights
Definition: LstmParams.hpp:104
armnn::LstmInputParams::m_CellLayerNormWeights
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
armnn::GetComparisonOperationAsCString
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:61
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1151
armnn::QLstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1399
armnn::QuantizedLstmInputParams
Definition: QuantizedLstmParams.hpp:13
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:59
armnn::OptionalReferenceSwitch::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1619
Converter.hpp
armnn_driver::IsConnectedToDequantize
bool IsConnectedToDequantize(armnn::IOutputSlot *ioutputSlot)
Definition: ConversionUtils.cpp:1040
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::QuantizedLstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: QuantizedLstmParams.hpp:35
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToForgetWeights
const TensorInfo * m_RecurrentToForgetWeights
Definition: QuantizedLstmParams.hpp:145
armnn::InstanceNormalizationDescriptor::m_Eps
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
Definition: Descriptors.hpp:869
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1054
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
armnn::ReduceOperation::Max
@ Max
Connect
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:14
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:27
armnn_driver::GetInputFloat32
bool GetInputFloat32(const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:791
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::SpaceToDepthDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1074
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:700
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:696