ArmNN
 24.02
Converter.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "Converter.hpp"
7 #include <half/half.hpp>
8 #include <armnn/Exceptions.hpp>
10 
11 namespace armnn_driver
12 {
13 
14 using namespace android::nn;
15 using Half = half_float::half;
16 
17 namespace
18 {
19 
20 } // anonymouse namespace
21 
22 bool Converter::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
23 {
24  switch (operation.type)
25  {
26  case OperationType::ABS:
27  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
28  case OperationType::ADD:
29  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Add);
30  case OperationType::ARGMAX:
31  return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
32  case OperationType::ARGMIN:
33  return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
34  case OperationType::AVERAGE_POOL_2D:
35  return ConvertAveragePool2d(operation, model, data);
36  case OperationType::BATCH_MATMUL:
37  return ConvertBatchMatMul(operation, model, data);
38  case OperationType::BATCH_TO_SPACE_ND:
39  return ConvertBatchToSpaceNd(operation, model, data);
40  case OperationType::CAST:
41  return ConvertCast(operation, model, data);
42  case OperationType::CONCATENATION:
43  return ConvertConcatenation(operation, model, data);
44  case OperationType::CONV_2D:
45  return ConvertConv2d(operation, model, data);
46  case OperationType::DEPTH_TO_SPACE:
47  return ConvertDepthToSpace(operation, model, data);
48  case OperationType::DEPTHWISE_CONV_2D:
49  return ConvertDepthwiseConv2d(operation, model, data);
50  case OperationType::DEQUANTIZE:
51  return ConvertDequantize(operation, model, data);
52  case OperationType::DIV:
53  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Div);
54  case OperationType::ELU:
55  return ConvertElu(operation, model, data);
56  case OperationType::EQUAL:
57  return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
58  case OperationType::EXP:
59  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Exp);
60  case OperationType::EXPAND_DIMS:
61  return ConvertExpandDims(operation, model, data);
62  case OperationType::FILL:
63  return ConvertFill(operation, model, data);
64  case OperationType::FLOOR:
65  return ConvertFloor(operation, model, data);
66  case OperationType::FULLY_CONNECTED:
67  return ConvertFullyConnected(operation, model, data);
68  case OperationType::GATHER:
69  return ConvertGather(operation, model, data);
70  case OperationType::GREATER:
71  return ConvertComparison(operation, model, data, ComparisonOperation::Greater);
72  case OperationType::GREATER_EQUAL:
73  return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
74  case OperationType::GROUPED_CONV_2D:
75  return ConvertGroupedConv2d(operation, model, data);
76  case OperationType::HARD_SWISH:
77  return ConvertHardSwish(operation, model, data);
78  case OperationType::INSTANCE_NORMALIZATION:
79  return ConvertInstanceNormalization(operation, model, data);
80  case OperationType::L2_NORMALIZATION:
81  return ConvertL2Normalization(operation, model, data);
82  case OperationType::L2_POOL_2D:
83  return ConvertL2Pool2d(operation, model, data);
84  case OperationType::LESS:
85  return ConvertComparison(operation, model, data, ComparisonOperation::Less);
86  case OperationType::LESS_EQUAL:
87  return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
88  case OperationType::LOCAL_RESPONSE_NORMALIZATION:
89  return ConvertLocalResponseNormalization(operation, model, data);
90  case OperationType::LOG:
91  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Log);
92  case OperationType::LOGICAL_AND:
93  return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
94  case OperationType::LOGICAL_NOT:
95  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
96  case OperationType::LOGICAL_OR:
97  return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
98  case OperationType::LOGISTIC:
99  return ConvertLogistic(operation, model, data);
100  case OperationType::LOG_SOFTMAX:
101  return ConvertLogSoftmax(operation, model, data);
102  case OperationType::LSTM:
103  return ConvertLstm(operation, model, data);
104  case OperationType::MAX_POOL_2D:
105  return ConvertMaxPool2d(operation, model, data);
106  case OperationType::MAXIMUM:
107  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Maximum);
108  case OperationType::MEAN:
109  return ConvertMean(operation, model, data);
110  case OperationType::MINIMUM:
111  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Minimum);
112  case OperationType::MUL:
113  return ConvertElementwiseBinary(operation, model, data, armnn::BinaryOperation::Mul);
114  case OperationType::NEG:
115  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
116  case OperationType::NOT_EQUAL:
117  return ConvertComparison(operation, model, data, ComparisonOperation::NotEqual);
118  case OperationType::PAD:
119  return ConvertPad(operation, model, data);
120  case OperationType::PAD_V2:
121  return ConvertPadV2(operation, model, data);
122  case OperationType::PRELU:
123  return ConvertPrelu(operation, model, data);
124  case OperationType::POW:
125  return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Power);
126  case OperationType::QUANTIZE:
127  return ConvertQuantize(operation, model, data);
128  case OperationType::QUANTIZED_LSTM:
129  return ConvertQuantizedLstm(operation, model, data);
130  case OperationType::QUANTIZED_16BIT_LSTM:
131  return ConvertQuantized16BitLstm(operation, model, data);
132  case OperationType::RANK:
133  return ConvertRank(operation, model, data);
134  case OperationType::REDUCE_MAX:
135  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Max);
136  case OperationType::REDUCE_MIN:
137  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Min);
138  case OperationType::REDUCE_PROD:
139  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Prod);
140  case OperationType::REDUCE_SUM:
141  return ConvertReduce(operation, model, data, armnn::ReduceOperation::Sum);
142  case OperationType::RELU:
143  return ConvertReLu(operation, model, data);
144  case OperationType::RELU1:
145  return ConvertReLu1(operation, model, data);
146  case OperationType::RELU6:
147  return ConvertReLu6(operation, model, data);
148  case OperationType::RESHAPE:
149  return ConvertReshape(operation, model, data);
150  case OperationType::RESIZE_BILINEAR:
151  return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
152  case OperationType::RESIZE_NEAREST_NEIGHBOR:
153  return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
154  case OperationType::REVERSE:
155  return ConvertReverseV2(operation, model, data);
156  case OperationType::RSQRT:
157  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
158  case OperationType::SIN:
159  return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Sin);
160  case OperationType::SOFTMAX:
161  return ConvertSoftmax(operation, model, data);
162  case OperationType::SPACE_TO_BATCH_ND :
163  return ConvertSpaceToBatchNd(operation, model, data);
164  case OperationType::SPACE_TO_DEPTH:
165  return ConvertSpaceToDepth(operation, model, data);
166  case OperationType::SQRT:
167  return ConvertSqrt(operation, model, data);
168  case OperationType::SQUEEZE:
169  return ConvertSqueeze(operation, model, data);
170  case OperationType::STRIDED_SLICE:
171  return ConvertStridedSlice(operation, model, data);
172  case OperationType::SUB:
173  return ConvertElementwiseBinary(operation, model, data, BinaryOperation::Sub);
174  case OperationType::TILE:
175  return ConvertTile(operation, model, data);
176  case OperationType::TRANSPOSE:
177  return ConvertTranspose(operation, model, data);
178  case OperationType::TRANSPOSE_CONV_2D:
179  return ConvertTransposeConv2d(operation, model, data);
180  case OperationType::TANH:
181  return ConvertTanH(operation, model, data);
182  default:
183  VLOG(DRIVER) << "Operation type: " << operation.type << "is not supported in ArmnnDriver";
184  return false;
185  }
186 }
187 
188 bool Converter::ConvertArgMinMax(const Operation& operation,
189  const Model& model,
190  ConversionData& data,
191  armnn::ArgMinMaxFunction argMinMaxFunction)
192 {
193  VLOG(DRIVER) << "Converter::ConvertArgMinMax()";
194  VLOG(DRIVER) << "argMinMaxFunction = " << GetArgMinMaxFunctionAsCString(argMinMaxFunction);
195 
196  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
197 
198  if (!input0.IsValid())
199  {
200  return Fail("%s: Operation has invalid inputs", __func__);
201  }
202 
203  int32_t axis;
204  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
205  {
206  return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
207  }
208 
209  const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
210  int rank = static_cast<int>(inputInfo.GetNumDimensions());
211 
212  if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
213  {
214  // Square bracket denotes inclusive n while parenthesis denotes exclusive n
215  // E.g. Rank 4 tensor can have axis in range [-4, 3)
216  // -1 == 3, -2 == 2, -3 == 1, -4 == 0
217  return Fail("%s: Axis must be in range [-n, n)", __func__);
218  }
219 
220  const Operand* output = GetOutputOperand(operation, 0, model);
221  if (!output)
222  {
223  return Fail("%s: Could not read output 0", __func__);
224  }
225 
226  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
227 
228  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
229 
230  armnn::ArgMinMaxDescriptor descriptor;
231  descriptor.m_Function = argMinMaxFunction;
232  descriptor.m_Axis = axis;
233 
234  bool isSupported = false;
235  armnn::BackendId setBackend;
236  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
237  {
239  IsArgMinMaxSupported,
240  data.m_Backends,
241  isSupported,
242  setBackend,
243  inputInfo0,
244  outputInfo,
245  descriptor);
246  };
247 
248  if(IsDynamicTensor(outputInfo))
249  {
250  isSupported = AreDynamicTensorsSupported();
251  }
252  else
253  {
254  validateFunc(outputInfo, isSupported);
255  }
256 
257  if (!isSupported)
258  {
259  return false;
260  }
261 
262  armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
263  layer->SetBackendId(setBackend);
264  assert(layer != nullptr);
265 
266  input0.Connect(layer->GetInputSlot(0));
267 
268  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
269 }
270 
271 bool Converter::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
272 {
273  VLOG(DRIVER) << "Converter::ConvertAveragePool2d()";
274  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Average, model, data);
275 }
276 
277 bool Converter::ConvertBatchMatMul(const Operation& operation, const Model& model, ConversionData& data)
278 {
279  VLOG(DRIVER) << "Converter::ConvertBatchMatMul()";
280  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
281  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
282 
283  if (!input0.IsValid() || !input1.IsValid())
284  {
285  return Fail("%s: Operation has invalid inputs", __func__);
286  }
287 
288  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
289  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
290 
291  unsigned int rankInput0 = inputInfo0.GetNumDimensions();
292  if (rankInput0 > 4 || rankInput0 < 2)
293  {
294  Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
295  }
296 
297  unsigned int rankInput1 = inputInfo1.GetNumDimensions();
298  if (rankInput1 > 4 || rankInput1 < 2)
299  {
300  Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
301  }
302 
303  // Determine data type of input tensor 0
304  OperandType input0Type;
305  if (!GetOperandType(operation, 0, model, input0Type))
306  {
307  return Fail("%s: Operation has invalid inputs", __func__);
308  }
309 
310  // Determine data type of input tensor 0
311  OperandType input1Type;
312  if (!GetOperandType(operation, 0, model, input1Type))
313  {
314  return Fail("%s: Operation has invalid inputs", __func__);
315  }
316 
317  if (input0Type != input1Type)
318  {
319  return Fail("%s: Operation has invalid inputs (Inputs must have same OperandCode)", __func__);
320  }
321 
322  const Operand* output = GetOutputOperand(operation, 0, model);
323  if (!output)
324  {
325  return Fail("%s: Could not read output 0", __func__);
326  }
327 
328  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
329 
330  armnn::BatchMatMulDescriptor batchMatMulDesc;
331 
332  // Inputs 2 and 3 are adjoint in Android NeuralNetworks, but they perform transpose.
333  // This is why we are linking them with transpose parameters in the descriptor
334  batchMatMulDesc.m_TransposeX = GetOptionalBool(operation, 2, model, data);
335  batchMatMulDesc.m_TransposeY = GetOptionalBool(operation, 3, model, data);
336 
337  bool isSupported = false;
338  armnn::BackendId setBackend;
339  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
340  {
342  IsBatchMatMulSupported,
343  data.m_Backends,
344  isSupported,
345  setBackend,
346  inputInfo0,
347  inputInfo1,
348  outputInfo,
349  batchMatMulDesc);
350  };
351 
352  if(!IsDynamicTensor(outputInfo))
353  {
354  validateFunc(outputInfo, isSupported);
355  }
356  else
357  {
358  isSupported = AreDynamicTensorsSupported();
359  }
360 
361 
362  if (!isSupported)
363  {
364  return false;
365  }
366 
367  armnn::IConnectableLayer* const layer = data.m_Network->AddBatchMatMulLayer(batchMatMulDesc);
368  layer->SetBackendId(setBackend);
369  assert(layer != nullptr);
370  input0.Connect(layer->GetInputSlot(0));
371  input1.Connect(layer->GetInputSlot(1));
372 
373  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
374 }
375 
376 bool Converter::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
377 {
378  VLOG(DRIVER) << "Converter::ConvertBatchToSpaceNd()";
379  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
380  if (!input.IsValid())
381  {
382  return Fail("%s: Operation has invalid inputs", __func__);
383  }
384 
385  const Operand* output = GetOutputOperand(operation, 0, model);
386  if (!output)
387  {
388  return Fail("%s: Could not read output 0", __func__);
389  }
390 
391  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
392 
393  const Operand* blockOperand = GetInputOperand(operation, 1, model);
394  if (!blockOperand)
395  {
396  return Fail("%s: Could not read input 1", __func__);
397  }
398 
399  // Convert the block operand to int32
400  std::vector<int32_t> block;
401  if (!GetTensorInt32Values(*blockOperand, block, model, data))
402  {
403  return Fail("%s: Input 1 has invalid values", __func__);
404  }
405 
406  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
407 
408  unsigned int rank = inputInfo.GetNumDimensions();
409  if (rank != 4)
410  {
411  Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
412  }
413 
414  if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
415  {
416  return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
417  " greater than or equal to 1", __func__);
418  }
419 
420  armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
421  batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
422  batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
423 
424  if (Is12OrLaterOperand(*output))
425  {
426  batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
427  }
428  // Setting crops to 0,0 0,0 as it is not supported in Android NN API
429  batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
430 
431  bool isSupported = false;
432  armnn::BackendId setBackend;
433  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
434  {
436  IsBatchToSpaceNdSupported,
437  data.m_Backends,
438  isSupported,
439  setBackend,
440  inputInfo,
441  outputInfo,
442  batchToSpaceNdDesc);
443  };
444 
445  if(!IsDynamicTensor(outputInfo))
446  {
447  validateFunc(outputInfo, isSupported);
448  }
449  else
450  {
451  isSupported = AreDynamicTensorsSupported();
452  }
453 
454 
455  if (!isSupported)
456  {
457  return false;
458  }
459 
460  armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
461  layer->SetBackendId(setBackend);
462  assert(layer != nullptr);
463  input.Connect(layer->GetInputSlot(0));
464 
465  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
466 }
467 
468 bool Converter::ConvertCast(const Operation& operation, const Model& model, ConversionData& data)
469 {
470  VLOG(DRIVER) << "Converter::ConvertCast()";
471 
472  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
473 
474  if (!input.IsValid())
475  {
476  return Fail("%s: Operation has invalid inputs", __func__);
477  }
478 
479  const Operand* output = GetOutputOperand(operation, 0, model);
480  if (!output)
481  {
482  return Fail("%s: Could not read output 0", __func__);
483  }
484 
485  const TensorInfo& inputInfo = input.GetTensorInfo();
486  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
487 
488  bool isSupported = false;
489  armnn::BackendId setBackend;
490  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
491  {
493  IsCastSupported,
494  data.m_Backends,
495  isSupported,
496  setBackend,
497  inputInfo,
498  outputInfo);
499  };
500 
501  if(!IsDynamicTensor(outputInfo))
502  {
503  validateFunc(outputInfo, isSupported);
504  }
505  else
506  {
507  isSupported = AreDynamicTensorsSupported();
508  }
509 
510  if (!isSupported)
511  {
512  return false;
513  }
514 
515  IConnectableLayer* layer = data.m_Network->AddCastLayer();
516  layer->SetBackendId(setBackend);
517  assert(layer != nullptr);
518  input.Connect(layer->GetInputSlot(0));
519 
520  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
521 }
522 
523 bool Converter::ConvertComparison(const Operation& operation,
524  const Model& model,
525  ConversionData& data,
526  ComparisonOperation comparisonOperation)
527 {
528  VLOG(DRIVER) << "Converter::ConvertComparison()";
529  VLOG(DRIVER) << "comparisonOperation = " << GetComparisonOperationAsCString(comparisonOperation);
530 
531  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
532  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
533 
534  if (!(input0.IsValid() && input1.IsValid()))
535  {
536  return Fail("%s: Operation has invalid inputs", __func__);
537  }
538 
539  const Operand* output = GetOutputOperand(operation, 0, model);
540  if (!output)
541  {
542  return Fail("%s: Could not read output 0", __func__);
543  }
544 
545  const TensorInfo& inputInfo0 = input0.GetTensorInfo();
546  const TensorInfo& inputInfo1 = input1.GetTensorInfo();
547  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
548 
549  ComparisonDescriptor descriptor(comparisonOperation);
550 
551  bool isSupported = false;
552  armnn::BackendId setBackend;
553  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
554  {
556  IsComparisonSupported,
557  data.m_Backends,
558  isSupported,
559  setBackend,
560  inputInfo0,
561  inputInfo1,
562  outputInfo,
563  descriptor);
564  };
565 
566  if(!IsDynamicTensor(outputInfo))
567  {
568  validateFunc(outputInfo, isSupported);
569  }
570  else
571  {
572  isSupported = AreDynamicTensorsSupported();
573  }
574 
575  if (!isSupported)
576  {
577  return false;
578  }
579 
580  IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
581  layer->SetBackendId(setBackend);
582  assert(layer != nullptr);
583 
584  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
585  if (!isReshapeSupported)
586  {
587  return false;
588  }
589 
590  if(IsDynamicTensor(outputInfo))
591  {
592  input0.Connect(layer->GetInputSlot(0));
593  input1.Connect(layer->GetInputSlot(1));
594  }
595 
596  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
597 }
598 
599 
600 bool Converter::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
601 {
602  VLOG(DRIVER) << "Converter::ConvertConcatenation()";
603 
604  // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
605  if (operation.inputs.size() <= 1)
606  {
607  return Fail("%s: Operation has insufficient arguments", __func__);
608  }
609 
610  // Get inputs and outputs
611  const std::size_t numInputTensors = operation.inputs.size() - 1;
612 
613  int32_t concatDim;
614  if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
615  {
616  return Fail("%s: Operation has invalid inputs", __func__);
617  }
618 
619  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
620  if (!outputOperand)
621  {
622  return Fail("%s: Operation has no outputs", __func__);
623  }
624 
625  armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
626  armnn::TensorShape outputShape = outputInfo.GetShape();
627  const bool isDynamicTensor = IsDynamicTensor(outputInfo);
628  //
629  // handle negative concat dims along the lines of tensorflow as described here:
630  // https://www.tensorflow.org/api_docs/python/tf/concat
631  // "negative axis refers to axis + rank(values)-th dimension"
632  //
633  if (concatDim < 0)
634  {
635  concatDim += outputShape.GetNumDimensions();
636  }
637 
638  if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
639  {
640  return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
641  }
642 
643  std::vector<LayerInputHandle> inputHandles;
644  std::vector<armnn::TensorShape> inputShapes;
645 
646  inputHandles.reserve(numInputTensors);
647  inputShapes.reserve(numInputTensors);
648 
649  bool inputsHaveBeenReshaped = false;
650  unsigned int tensorDimensionsAdded = 0;
651  for (uint32_t i = 0; i < numInputTensors; ++i)
652  {
653  const Operand* operand = GetInputOperand(operation, i, model);
654  if (!operand)
655  {
656  return Fail("%s: Operation has invalid inputs", __func__);
657  }
658 
659  LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
660  if (!operandInputHandle.IsValid())
661  {
662  return Fail("%s: Operation has invalid inputs", __func__);
663  }
664 
665  armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
666  if (operandShape.GetNumDimensions() == 0)
667  {
668  return Fail("%s: Operands with rank 0 are not supported", __func__);
669  }
670 
671  if (RequiresReshape(operandShape))
672  {
673  inputsHaveBeenReshaped = true;
674 
675  armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
676 
677  // Expand the tensor to three dimensions
678  if (operandShape.GetNumDimensions() == 2)
679  {
680  reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
681  tensorDimensionsAdded = 1;
682  }
683  else
684  {
685  reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
686  tensorDimensionsAdded = 2;
687  }
688 
689  armnn::ReshapeDescriptor reshapeDescriptor;
690  reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
691 
692  bool isSupported = false;
693  armnn::BackendId setBackendReshape;
695  IsReshapeSupported,
696  data.m_Backends,
697  isSupported,
698  setBackendReshape,
699  operandInputHandle.GetTensorInfo(),
700  reshapeInfo,
701  reshapeDescriptor);
702 
703  if (!isSupported)
704  {
705  return false;
706  }
707  armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
708  newReshape.SetBackendId(setBackendReshape);
709 
710  // Point to the reshape operation rather then the input operation
711  operandShape = reshapeInfo.GetShape();
712  operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
713  }
714 
715  inputShapes.emplace_back(operandShape);
716  inputHandles.emplace_back(operandInputHandle);
717 
718  if (!inputHandles.back().IsValid())
719  {
720  return Fail("%s: Operation has invalid inputs", __func__);
721  }
722  }
723 
724  if (inputShapes.size() != inputHandles.size())
725  {
726  return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
727  inputShapes.size(), inputHandles.size());
728  }
729 
730  if (inputsHaveBeenReshaped)
731  {
732  // Adjust the concatenation dimension by the amount of dimensions added (if any)
733  concatDim += tensorDimensionsAdded;
734 
735  // Add extra dimensions to the output shape to reflect the addition of the reshape layers
736  if (tensorDimensionsAdded == 1)
737  {
738  if (IsDynamicTensor(outputInfo))
739  {
740  outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
741  }
742  else
743  {
744  outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
745  }
746  }
747  else if (tensorDimensionsAdded == 2)
748  {
749  if (IsDynamicTensor(outputInfo))
750  {
751  outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
752  }
753  else
754  {
755  outputShape = armnn::TensorShape({1, 1, outputShape[0]});
756  }
757  }
758  }
759 
760  // Check if permutations is required and get the pair of permutations required for the concatenation.
761  // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
762  std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
763  std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
764  bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
765  concatDim,
766  permutationPair);
767 
768  // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
769  if (!isDynamicTensor)
770  {
771  if (needPermute)
772  {
773  outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
774  }
775 
776  outputInfo.SetShape(outputShape);
777  }
778  // this is no-op for identity swizzles, otherwise it replaces both
779  // the handles and shapes with the swizzled layer output handles and shapes
780  if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
781  {
782  return false;
783  }
784 
785  // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
786  armnn::OriginsDescriptor concatDescriptor;
787 
788  try
789  {
790  // The concat descriptor is always created across the only supported concat dimension
791  // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
792  concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
793  inputShapes.end(),
794  concatDim);
795  } catch (std::exception& error)
796  {
797  return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
798  }
799 
800  // Validate the output shape is correct given the input shapes based on the
801  // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
802  if (!isDynamicTensor)
803  {
804  if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
805  {
806  return Fail("%s: Error validating the output shape for concat", __func__);
807  }
808  }
809 
810  std::vector<const armnn::TensorInfo*> inputTensorInfos;
811  std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
812  [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
813 
814  bool isSupported = false;
815  armnn::BackendId setBackendConcat;
816  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
818  IsConcatSupported,
819  data.m_Backends,
820  isSupported,
821  setBackendConcat,
822  inputTensorInfos,
823  outputInfo,
824  concatDescriptor);
825  };
826 
827  if (!isDynamicTensor)
828  {
829  validateFunc(outputInfo, isSupported);
830  }
831  else
832  {
833  isSupported = AreDynamicTensorsSupported();
834  }
835 
836  if (!isSupported)
837  {
838  return false;
839  }
840 
841  armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
842  layer->SetBackendId(setBackendConcat);
843  assert(layer != nullptr);
844  layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
845  // Connect inputs to the layer
846  const int numInputSlots = layer->GetNumInputSlots();
847  assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
848  for (int i = 0; i < numInputSlots; ++i)
849  {
850  // connect the input directly to the merge (concat) layer
851  inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
852  }
853 
854  // Transpose the output shape
855  auto transposeOutputShape = [&](){
856  armnn::TransposeDescriptor transposeDesc;
857  transposeDesc.m_DimMappings = permutationPair.second;
858  armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
859  armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
860  permutationPair.second);
861  isSupported = false;
862  armnn::BackendId setBackendTranspose;
864  IsTransposeSupported,
865  data.m_Backends,
866  isSupported,
867  setBackendTranspose,
868  inputTransposeInfo,
869  outputTransposeInfo,
870  transposeDesc);
871  if (!isSupported)
872  {
873  return false;
874  }
875  // Add permutation layer and connect the output to it, the permutation becomes the output layer
876  armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
877  permutationPair.second);
878  deswizzleLayer.SetBackendId(setBackendTranspose);
879  layer = &deswizzleLayer;
880 
881  return true;
882  };
883 
884  if (needPermute && !isDynamicTensor)
885  {
886  transposeOutputShape();
887  }
888 
889  if (inputsHaveBeenReshaped)
890  {
891  if (isDynamicTensor)
892  {
893  // Infer the output shapes of concat if outputs are type 1 dynamic
894  if (!layer->GetOutputSlot(0).IsTensorInfoSet())
895  {
896  throw armnn::Exception(
897  "tensor info is not set on output slot, cannot process dynamic tensor after input reshape");
898  }
899  if (!ValidateConcatOutputShape(inputShapes,
900  layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
901  concatDim))
902  {
903  return Fail("%s: Error validating the output shape for concat", __func__);
904  }
905  transposeOutputShape();
906  }
907 
908  armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
909  // Undo the reshape knowing the amount of dimensions added
910  if (tensorDimensionsAdded == 1)
911  {
912  afterConcatInfo.SetShape(
913  armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
914  }
915  else if (tensorDimensionsAdded == 2)
916  {
917  afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
918  }
919 
920  armnn::ReshapeDescriptor reshapeDescriptor;
921  reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
922  armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
923 
924  isSupported = false;
925  armnn::BackendId setBackendReshape2;
926  auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
928  IsReshapeSupported,
929  data.m_Backends,
930  isSupported,
931  setBackendReshape2,
932  concatInfo,
933  afterConcatInfo,
934  reshapeDescriptor);
935  };
936 
937  if (!IsDynamicTensor(afterConcatInfo))
938  {
939  validateReshapeFunc(afterConcatInfo, isSupported);
940  }
941  else
942  {
943  isSupported = AreDynamicTensorsSupported();
944  }
945 
946  if (!isSupported)
947  {
948  return false;
949  }
950  layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
951  layer->SetBackendId(setBackendReshape2);
952  return SetupAndTrackLayerOutputSlot(operation,
953  0,
954  *layer,
955  model,
956  data,
957  nullptr,
958  validateReshapeFunc);
959  }
960 
961  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
962 }
963 
964 bool Converter::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
965 {
966  VLOG(DRIVER) << "Converter::ConvertConv2d()";
967 
968  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
969  if (!input.IsValid())
970  {
971  return Fail("%s: Operation has invalid inputs", __func__);
972  }
973 
974  const Operand* output = GetOutputOperand(operation, 0, model);
975  if (!output)
976  {
977  return Fail("%s: Could not read output 0", __func__);
978  }
979 
980  const TensorInfo& inputInfo = input.GetTensorInfo();
981  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
982 
984  desc.m_DataLayout = DataLayout::NHWC;
985 
986  // Determine whether padding is implicit or explicit
987  bool implicitPadding = operation.inputs.size() == 7
988  || (operation.inputs.size() >= 8
989  && GetInputOperand(operation, 7, model)->type == OperandType::BOOL);
990 
991  if (implicitPadding)
992  {
993  desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
994  }
995  else if (operation.inputs.size() >= 10)
996  {
997  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
998  }
999 
1000  const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
1001 
1002  // ArmNN does not currently support non-fixed weights or bias
1003  // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
1004  // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
1005  // the DataLayout is NCHW
1006 
1007  if (!IsWeightsValid(operation, 1, model, false) && desc.m_DataLayout == DataLayout::NCHW)
1008  {
1009  return Fail("%s: Operation has unsupported weights OperandLifeTime", __func__);
1010  }
1011 
1012  LayerInputHandle weightsInput = (desc.m_DataLayout == DataLayout::NCHW)
1013  ? ConvertToLayerInputHandle(operation, 1, model, data, OHWIToOIHW, &input)
1014  : ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
1015 
1016  if (!weightsInput.IsValid())
1017  {
1018  return Fail("%s: Operation has invalid inputs", __func__);
1019  }
1020 
1021  LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
1022  if (!biasInput.IsValid())
1023  {
1024  return Fail("%s: Operation has invalid inputs", __func__);
1025  }
1026 
1027  biasInput.SanitizeQuantizationScale(weightsInput, input);
1028  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1029  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1030 
1031  ActivationFn activation;
1032  if (implicitPadding)
1033  {
1034  ::android::nn::PaddingScheme paddingScheme;
1035  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1036  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1037  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1038  || !GetInputActivationFunction(operation, 6, activation, model, data)
1039  || !GetOptionalConvolutionDilationParams(operation, 8, desc, model, data))
1040  {
1041  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1042  }
1043 
1044  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1045  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1046  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1047  const uint32_t kernelX = weightsInfo.GetShape()[widthIndex];
1048  const uint32_t kernelY = weightsInfo.GetShape()[heightIndex];
1049  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1050  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1051 
1052  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1053  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1054 
1055  }
1056  else if (operation.inputs.size() >= 10)
1057  {
1058  // explicit padding
1059  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1060  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1061  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1062  || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1063  || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1064  || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1065  || !GetInputActivationFunction(operation, 9, activation, model, data)
1066  || !GetOptionalConvolutionDilationParams(operation, 11, desc, model, data))
1067  {
1068  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1069  }
1070  }
1071  else
1072  {
1073  return Fail("%s: Unsupported number of operation inputs", __func__);
1074  }
1075 
1076  desc.m_BiasEnabled = true;
1077  Optional<TensorInfo> biases(biasInfo);
1078 
1079  bool requiresValidation = true;
1080  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1081  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1082  if (IsConnectedToDequantize(weightsInput.GetOutputSlot())
1083  || IsConnectedToDequantize(biasInput.GetOutputSlot()))
1084  {
1085  // Do not require validation for now. There will be an optimization step
1086  // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1087  // then at the end of the optimization there will be layer supported validation.
1088  requiresValidation = false;
1089  VLOG(DRIVER) << "Converter::ConvertConv2d(): Weights and Biases are as INPUTS.";
1090  }
1091 
1092  armnn::BackendId setBackend;
1093  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
1094  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1095  IsConvolution2dSupported,
1096  data.m_Backends,
1097  isSupported,
1098  setBackend,
1099  inputInfo,
1100  outputInfo,
1101  desc,
1102  weightsInfo,
1103  biases);
1104  };
1105 
1106  if (requiresValidation)
1107  {
1108  VLOG(DRIVER) << "Converter::ConvertConv2d(): Requires Validation!";
1109  bool isSupported = false;
1110  if (!IsDynamicTensor(outputInfo))
1111  {
1112  validateFunc(outputInfo, isSupported);
1113  }
1114  else
1115  {
1116  isSupported = AreDynamicTensorsSupported();
1117  }
1118 
1119  if (!isSupported)
1120  {
1121  return false;
1122  }
1123  }
1124 
1125  armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
1126  startLayer->SetBackendId(setBackend);
1127 
1128  if (!startLayer)
1129  {
1130  return Fail("%s: AddConvolution2dLayer failed", __func__);
1131  }
1132 
1133  input.Connect(startLayer->GetInputSlot(0));
1134  weightsInput.Connect(startLayer->GetInputSlot(1));
1135  biasInput.Connect(startLayer->GetInputSlot(2));
1136 
1137  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1138 }
1139 
1140 bool Converter::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
1141 {
1142  VLOG(DRIVER) << "Converter::ConvertDepthToSpace()";
1143 
1144  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1145  if (!input.IsValid() )
1146  {
1147  return Fail("%s: Operation has invalid inputs", __func__);
1148  }
1149 
1150  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1151  unsigned int rank = inputInfo.GetNumDimensions();
1152  if (rank != 4)
1153  {
1154  return Fail("%s: Only inputs with rank 4 are supported", __func__);
1155  }
1156 
1157  const Operand* output = GetOutputOperand(operation, 0, model);
1158  if (!output)
1159  {
1160  return Fail("%s: Could not read output 0", __func__);
1161  }
1162 
1163  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1164 
1165  armnn::DepthToSpaceDescriptor descriptor;
1166 
1167  GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_BlockSize, model, data);
1168  if (descriptor.m_BlockSize <= 1)
1169  {
1170  return Fail("%s: Block size must be at least 1 in all dimensions", __func__);
1171  }
1172 
1174  if (Is12OrLaterOperand(*output))
1175  {
1176  descriptor.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
1177  }
1178 
1179  bool isSupported = false;
1180  armnn::BackendId setBackend;
1181  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1182  {
1183  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1184  IsDepthToSpaceSupported,
1185  data.m_Backends,
1186  isSupported,
1187  setBackend,
1188  inputInfo,
1189  outputInfo,
1190  descriptor);
1191  };
1192 
1193  if(!IsDynamicTensor(outputInfo))
1194  {
1195  validateFunc(outputInfo, isSupported);
1196  }
1197  else
1198  {
1199  isSupported = AreDynamicTensorsSupported();
1200  }
1201 
1202  if (!isSupported)
1203  {
1204  return false;
1205  }
1206 
1207  armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
1208  layer->SetBackendId(setBackend);
1209  assert(layer != nullptr);
1210  input.Connect(layer->GetInputSlot(0));
1211 
1212  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1213 }
1214 
1215 bool Converter::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
1216 {
1217  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d()";
1218 
1219  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1220 
1221  if (!input.IsValid())
1222  {
1223  return Fail("%s: Operation has invalid inputs", __func__);
1224  }
1225 
1226  const Operand* output = GetOutputOperand(operation, 0, model);
1227 
1228  if (!output)
1229  {
1230  return Fail("%s: Could not read output 0", __func__);
1231  }
1232 
1233  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1234  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1235 
1236  // ArmNN does not currently support non-fixed weights or bias
1237  if (!IsWeightsValid(operation, 1, model, false))
1238  {
1239  return Fail("%s: This Operation has unsupported weights OperandLifeTime", __func__);
1240  }
1241 
1242  // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1243  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1244 
1245  // Basic sanity check on the weights shape.
1246  // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
1247  // [1, filter_height, filter_width, depth_out]
1248  if (weightsOperand->dimensions[0] != 1)
1249  {
1250  return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
1251  }
1252 
1255 
1256  // Determine whether padding is implicit or explicit
1257  bool implicitPadding = operation.inputs.size() == 8
1258  || (operation.inputs.size() >= 9
1259  && GetInputOperand(operation, 8, model)->type == OperandType::BOOL);
1260 
1261  // Look ahead to find the optional DataLayout, if present
1262  const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
1263  desc.m_DataLayout = OptionalDataLayout(operation, dataLayoutFlagIndex, model, data);
1264 
1265  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1266  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1267  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1268 
1269  LayerInputHandle weightsInput = ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
1270  if (!weightsInput.IsValid())
1271  {
1272  return Fail("%s: Operation has invalid inputs", __func__);
1273  }
1274 
1275  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1276  if (!biasOperand)
1277  {
1278  return Fail("%s: Could not read bias", __func__);
1279  }
1280 
1281  LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
1282  if (!biasInput.IsValid())
1283  {
1284  return Fail("%s: Operation has invalid inputs", __func__);
1285  }
1286 
1287  biasInput.SanitizeQuantizationScale(weightsInput, input);
1288  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1289  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1290 
1291  ActivationFn activation;
1292  if (implicitPadding)
1293  {
1294  ::android::nn::PaddingScheme paddingScheme;
1295  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1296  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1297  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1298  || !GetInputActivationFunction(operation, 7, activation, model, data)
1299  || !GetOptionalConvolutionDilationParams(operation, 9, desc, model, data))
1300  {
1301  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1302  }
1303 
1304  const uint32_t kernelX = weightsInfo.GetShape()[2];
1305  const uint32_t kernelY = weightsInfo.GetShape()[1];
1306  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1307  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1308 
1309  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1310  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1311  }
1312  else if (operation.inputs.size() >= 11)
1313  {
1314  // explicit padding
1315  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1316  || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1317  || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1318  || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1319  || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1320  || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1321  || !GetInputActivationFunction(operation, 10, activation, model, data)
1322  || !GetOptionalConvolutionDilationParams(operation, 12, desc, model, data))
1323  {
1324  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1325  }
1326  }
1327  else
1328  {
1329  return Fail("%s: Unsupported number of operation inputs", __func__);
1330  }
1331 
1332  desc.m_BiasEnabled = true;
1333  Optional<TensorInfo> biases(biasInfo);
1334 
1335  bool requiresValidation = true;
1336  if (IsConnectedToDequantize(weightsInput.GetOutputSlot()) || IsConnectedToDequantize(biasInput.GetOutputSlot()))
1337  {
1338  // Do not require validation for now. There will be an optimization step
1339  // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1340  // then at the end of the optimization there will be layer supported validation.
1341  requiresValidation = false;
1342  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Weights and Biases are as INPUTS.";
1343  }
1344 
1345  armnn::BackendId setBackend;
1346  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
1347  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1348  IsDepthwiseConvolutionSupported,
1349  data.m_Backends,
1350  isSupported,
1351  setBackend,
1352  inputInfo,
1353  outputInfo,
1354  desc,
1355  weightsInfo,
1356  biases);
1357  };
1358 
1359  if (requiresValidation)
1360  {
1361  VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Requires Validation!";
1362  bool isSupported = false;
1363  if (!IsDynamicTensor(outputInfo))
1364  {
1365  validateFunc(outputInfo, isSupported);
1366  }
1367  else
1368  {
1369  isSupported = AreDynamicTensorsSupported();
1370  }
1371 
1372  if (!isSupported)
1373  {
1374  return false;
1375  }
1376  }
1377 
1378  armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
1379  startLayer->SetBackendId(setBackend);
1380 
1381  if (!startLayer)
1382  {
1383  return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1384  }
1385 
1386  input.Connect(startLayer->GetInputSlot(0));
1387 
1388  // Connect weights and bias inputs
1389  weightsInput.Connect(startLayer->GetInputSlot(1));
1390  biasInput.Connect(startLayer->GetInputSlot(2));
1391 
1392  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1393 }
1394 
1395 bool Converter::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
1396 {
1397  VLOG(DRIVER) << "Converter::ConvertDequantize()";
1398 
1399  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1400  if (!input.IsValid())
1401  {
1402  return Fail("%s: Operation has invalid input", __func__);
1403  }
1404 
1405  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1406  const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
1407  if (quantizationDim.has_value() && quantizationDim.value() != 0)
1408  {
1409  return Fail("%s: Operation has quantization dimension different than 0", __func__);
1410  }
1411 
1412  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1413  if (!outputOperand)
1414  {
1415  return Fail("%s: Operation has invalid outputs", __func__);
1416  }
1417 
1418  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1419 
1420  bool isSupported = false;
1421  armnn::BackendId setBackend;
1422  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1423  {
1424  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1425  IsDequantizeSupported,
1426  data.m_Backends,
1427  isSupported,
1428  setBackend,
1429  inputInfo,
1430  outputInfo);
1431  };
1432 
1433  if(IsDynamicTensor(outputInfo))
1434  {
1435  isSupported = AreDynamicTensorsSupported();
1436  }
1437  else
1438  {
1439  validateFunc(outputInfo, isSupported);
1440  }
1441 
1442  if (!isSupported)
1443  {
1444  return false;
1445  }
1446 
1447  armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
1448  layer->SetBackendId(setBackend);
1449  assert(layer != nullptr);
1450  input.Connect(layer->GetInputSlot(0));
1451 
1452  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1453 }
1454 
1455 bool Converter::ConvertElementwiseBinary(const Operation& operation,
1456  const Model& model,
1457  ConversionData& data,
1458  armnn::BinaryOperation binaryOperation)
1459 {
1460  VLOG(DRIVER) << "Converter::ConvertElementwiseBinary()";
1461  VLOG(DRIVER) << "binaryOperation = " << GetBinaryOperationAsCString(binaryOperation);
1462 
1463  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1464  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1465 
1466  if (!input0.IsValid() || !input1.IsValid())
1467  {
1468  return Fail("%s: Operation has invalid inputs", __func__);
1469  }
1470 
1471  // The FuseActivation parameter is always the input index 2, and it should be optional
1472  ActivationFn activationFunction;
1473  if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1474  {
1475  return Fail("%s: Operation has invalid optional input: activation function", __func__);
1476  }
1477 
1478  const Operand* output = GetOutputOperand(operation, 0, model);
1479  if (!output)
1480  {
1481  return Fail("%s: Could not read output", __func__);
1482  }
1483 
1484  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1485 
1486  armnn::ElementwiseBinaryDescriptor descriptor(binaryOperation);
1487 
1488  bool isSupported = false;
1489  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1490  {
1491  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1492  IsElementwiseBinarySupported,
1493  data.m_Backends,
1494  isSupported,
1495  armnn::BackendId(),
1496  input0.GetTensorInfo(),
1497  input1.GetTensorInfo(),
1498  outputInfo,
1499  binaryOperation);
1500  };
1501 
1502  if (!IsDynamicTensor(outputInfo))
1503  {
1504  validateFunc(outputInfo, isSupported);
1505  }
1506  else
1507  {
1508  isSupported = AreDynamicTensorsSupported();
1509  }
1510 
1511  if (!isSupported)
1512  {
1513  return false;
1514  }
1515 
1516  armnn::IConnectableLayer* layer = data.m_Network->AddElementwiseBinaryLayer(descriptor);
1517  if (!layer)
1518  {
1519  return Fail("%s: Could not add the ElementwiseBinaryLayer", __func__);
1520  }
1521  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
1522  if (!isReshapeSupported)
1523  {
1524  return false;
1525  }
1526 
1527  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model,
1528  data, nullptr, validateFunc, activationFunction);
1529 }
1530 
1531 bool Converter::ConvertElementwiseUnary(const Operation& operation,
1532  const Model& model,
1533  ConversionData& data,
1534  UnaryOperation unaryOperation)
1535 {
1536  VLOG(DRIVER) << "Converter::ConvertElementwiseUnary()";
1537  VLOG(DRIVER) << "unaryOperation = " << GetUnaryOperationAsCString(unaryOperation);
1538 
1539  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1540 
1541  if (!input.IsValid())
1542  {
1543  return Fail("%s: Operation has invalid input", __func__);
1544  }
1545 
1546  const Operand* output = GetOutputOperand(operation, 0, model);
1547  if (!output)
1548  {
1549  return Fail("%s: Could not read output 0", __func__);
1550  }
1551 
1552  const TensorInfo& inputInfo = input.GetTensorInfo();
1553  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1554 
1555  ElementwiseUnaryDescriptor descriptor(unaryOperation);
1556 
1557  bool isSupported = false;
1558  armnn::BackendId setBackend;
1559  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1560  {
1561  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1562  IsElementwiseUnarySupported,
1563  data.m_Backends,
1564  isSupported,
1565  setBackend,
1566  inputInfo,
1567  outputInfo,
1568  descriptor);
1569  };
1570 
1571  if(!IsDynamicTensor(outputInfo))
1572  {
1573  validateFunc(outputInfo, isSupported);
1574  }
1575  else
1576  {
1577  isSupported = AreDynamicTensorsSupported();
1578  }
1579 
1580  if (!isSupported)
1581  {
1582  return false;
1583  }
1584 
1585  IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
1586  layer->SetBackendId(setBackend);
1587  assert(layer != nullptr);
1588  input.Connect(layer->GetInputSlot(0));
1589 
1590  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1591 }
1592 
1593 bool Converter::ConvertElu(const Operation& operation, const Model& model, ConversionData& data)
1594 {
1595  VLOG(DRIVER) << "Converter::ConvertElu()";
1596 
1597  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1598  if (!input0.IsValid())
1599  {
1600  return Fail("%s: Operation has invalid inputs", __func__);
1601  }
1602 
1603  // Determine data type of input tensor
1604  OperandType inputType;
1605  if (!GetOperandType(operation, 0, model, inputType))
1606  {
1607  return Fail("%s: Operation has invalid inputs", __func__);
1608  }
1609 
1610  ActivationDescriptor desc;
1611  desc.m_Function = ActivationFunction::Elu;
1612 
1613  // Read alpha
1614  if (inputType == OperandType::TENSOR_FLOAT16)
1615  {
1616  Half alpha;
1617 
1618  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, alpha, model, data))
1619  {
1620  return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
1621  }
1622 
1623  desc.m_A = static_cast<float>(alpha);
1624  }
1625  else if (inputType == OperandType::TENSOR_FLOAT32)
1626  {
1627  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_A, model, data))
1628  {
1629  return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
1630  }
1631  }
1632  else
1633  {
1634  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
1635  }
1636 
1637  return ::ConvertToActivation(operation, __func__, desc, model, data);
1638 }
1639 
1640 bool Converter::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
1641 {
1642  VLOG(DRIVER) << "Converter::ConvertExpandDims()";
1643 
1644  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1645 
1646  if (!input.IsValid())
1647  {
1648  return Fail("%s: Operation has invalid input", __func__);
1649  }
1650 
1651  const Operand* output = GetOutputOperand(operation, 0, model);
1652  if (!output)
1653  {
1654  return Fail("%s: Operation has invalid output", __func__);
1655  }
1656 
1657  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1658 
1659  int32_t axis;
1660  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
1661  {
1662  return Fail("%s: failed to get axis input value", __func__);
1663  }
1664 
1665  TensorShape targetShape;
1666 
1667  try
1668  {
1669  targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
1670  }
1671  catch (const std::exception& e)
1672  {
1673  return Fail("%s: %s", __func__, e.what());
1674  }
1675 
1676  ReshapeDescriptor reshapeDescriptor;
1677  reshapeDescriptor.m_TargetShape = targetShape;
1678 
1679  bool isSupported = false;
1680  armnn::BackendId setBackend;
1681  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1682  {
1683  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1684  IsReshapeSupported,
1685  data.m_Backends,
1686  isSupported,
1687  setBackend,
1688  input.GetTensorInfo(),
1689  outputInfo,
1690  reshapeDescriptor);
1691  };
1692 
1693  if(!IsDynamicTensor(outputInfo))
1694  {
1695  if (targetShape != outputInfo.GetShape())
1696  {
1697  return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
1698  }
1699  validateFunc(outputInfo, isSupported);
1700  }
1701  else
1702  {
1703  isSupported = AreDynamicTensorsSupported();
1704  }
1705 
1706  if (!isSupported)
1707  {
1708  return false;
1709  }
1710 
1711  IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1712  layer->SetBackendId(setBackend);
1713  assert(layer != nullptr);
1714  input.Connect(layer->GetInputSlot(0));
1715 
1716  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1717 }
1718 
1719 bool Converter::ConvertFill(const Operation& operation, const Model& model, ConversionData& data)
1720 {
1721  VLOG(DRIVER) << "Converter::ConvertFill()";
1722  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1723  if (!input.IsValid())
1724  {
1725  return Fail("%s: Operation has invalid inputs", __func__);
1726  }
1727 
1728  const Operand* output = GetOutputOperand(operation, 0, model);
1729  if (!output)
1730  {
1731  return Fail("%s: Could not read output", __func__);
1732  }
1733 
1734  const TensorInfo& inputInfo = input.GetTensorInfo();
1735  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1736  if (IsDynamicTensor(outputInfo))
1737  {
1738  return Fail("%s: Dynamic output tensors are not supported", __func__);
1739  }
1740 
1741  // Determine data type of output tensor
1742  OperandType outputType = output->type;
1743  FillDescriptor descriptor;
1744  // Read the scalar fill value
1745  if (outputType == OperandType::TENSOR_FLOAT16)
1746  {
1747  Half value;
1748 
1749  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
1750  {
1751  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1752  }
1753 
1754  descriptor.m_Value = static_cast<float>(value);
1755  }
1756  else if (outputType == OperandType::TENSOR_FLOAT32)
1757  {
1758  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Value, model, data))
1759  {
1760  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1761  }
1762  }
1763  else if (outputType == OperandType::TENSOR_INT32)
1764  {
1765  int32_t value;
1766 
1767  if (!GetInputScalar(operation, 1, OperandType::INT32, value, model, data))
1768  {
1769  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1770  }
1771 
1772  descriptor.m_Value = static_cast<float>(value);
1773  }
1774  else
1775  {
1776  return Fail("%s: Unsupported input tensor type: %d", __func__, outputType);
1777  }
1778 
1779  bool isSupported = false;
1780  armnn::BackendId setBackend;
1781  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1782  IsFillSupported,
1783  data.m_Backends,
1784  isSupported,
1785  setBackend,
1786  inputInfo,
1787  outputInfo,
1788  descriptor);
1789  if (!isSupported)
1790  {
1791  return false;
1792  }
1793 
1794  IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
1795  layer->SetBackendId(setBackend);
1796  assert(layer != nullptr);
1797  input.Connect(layer->GetInputSlot(0));
1798 
1799  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1800 }
1801 
1802 bool Converter::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
1803 {
1804  VLOG(DRIVER) << "Converter::ConvertFloor()";
1805  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1806  if (!input.IsValid())
1807  {
1808  return Fail("%s: Operation has invalid inputs", __func__);
1809  }
1810 
1811  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1812  if (!outputOperand)
1813  {
1814  return Fail("%s: Operation has invalid outputs", __func__);
1815  }
1816 
1817  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1818 
1819  bool isSupported = false;
1820  armnn::BackendId setBackend;
1821  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1822  {
1823  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1824  IsFloorSupported,
1825  data.m_Backends,
1826  isSupported,
1827  setBackend,
1828  input.GetTensorInfo(),
1829  outputInfo);
1830  };
1831 
1832  if(!IsDynamicTensor(outputInfo))
1833  {
1834  validateFunc(outputInfo, isSupported);
1835  }
1836  else
1837  {
1838  isSupported = AreDynamicTensorsSupported();
1839  }
1840 
1841  if (!isSupported)
1842  {
1843  return false;
1844  }
1845 
1846  armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
1847  layer->SetBackendId(setBackend);
1848  assert(layer != nullptr);
1849  input.Connect(layer->GetInputSlot(0));
1850 
1851  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1852 }
1853 
1854 bool Converter::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
1855 {
1856  VLOG(DRIVER) << "Converter::ConvertFullyConnected()";
1857  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1858  if (!input.IsValid())
1859  {
1860  return Fail("%s: Operation has invalid inputs", __func__);
1861  }
1862 
1863  const Operand* output = GetOutputOperand(operation, 0, model);
1864  if (!output)
1865  {
1866  return Fail("%s: Could not read output 0", __func__);
1867  }
1868 
1869  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1870  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1871 
1872  LayerInputHandle weightsInput = LayerInputHandle();
1873  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1874  if (!weightsOperand)
1875  {
1876  return Fail("%s: Could not read weights", __func__);
1877  }
1878 
1879  // If weights are constant a separate constant layer will be created to store data.
1880  // Otherwise handle non const weights as inputs.
1881  weightsInput = ConvertToLayerInputHandle(operation, 1, model, data);
1882  if (!weightsInput.IsValid())
1883  {
1884  return Fail("%s: Operation has invalid inputs", __func__);
1885  }
1886 
1887  LayerInputHandle biasInput = LayerInputHandle();
1888  const Operand* biasOperand = GetInputOperand(operation, 2, model);
1889  if (!biasOperand)
1890  {
1891  return Fail("%s: Could not read bias", __func__);
1892  }
1893 
1894  // If bias are constant a separate constant layer will be created to store data.
1895  // Otherwise handle non const bias as inputs.
1896  biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D
1897  if (!biasInput.IsValid())
1898  {
1899  return Fail("%s: Operation has invalid inputs", __func__);
1900  }
1901 
1902  armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1903  armnn::TensorInfo reshapedInfo = inputInfo;
1904  try
1905  {
1906  reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
1907  }
1908  catch (const std::exception& e)
1909  {
1910  return Fail("%s: %s", __func__, e.what());
1911  }
1912 
1913  // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
1914  armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1915  SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
1916 
1917  ActivationFn activationFunction;
1918  if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
1919  {
1920  return Fail("%s: Operation has invalid inputs", __func__);
1921  }
1922 
1924  desc.m_TransposeWeightMatrix = true;
1925  desc.m_BiasEnabled = true;
1926  desc.m_ConstantWeights = IsOperandConstant(*weightsOperand);
1927 
1928  bool isSupported = false;
1929  armnn::BackendId setBackend;
1930  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1931  {
1932  if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
1933  weightsInfo.GetShape(),
1934  outputInfo.GetShape(),
1936  {
1937  isSupported = false;
1938  Fail("%s: Expected outputShape does not match actual outputShape", __func__);
1939  return;
1940  }
1941 
1942  FORWARD_LAYER_SUPPORT_FUNC(__func__,
1943  IsFullyConnectedSupported,
1944  data.m_Backends,
1945  isSupported,
1946  setBackend,
1947  reshapedInfo,
1948  outputInfo,
1949  weightsInfo,
1950  biasInfo,
1951  desc);
1952  };
1953 
1954  if(!IsDynamicTensor(outputInfo))
1955  {
1956  validateFunc(outputInfo, isSupported);
1957  }
1958  else
1959  {
1960  isSupported = AreDynamicTensorsSupported();
1961  }
1962 
1963  if (!isSupported)
1964  {
1965  return false;
1966  }
1967 
1968  // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
1969  armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
1970  startLayer->SetBackendId(setBackend);
1971 
1972  if (inputInfo.GetNumDimensions() > 2U)
1973  {
1974  armnn::ReshapeDescriptor reshapeDescriptor;
1975  reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
1976 
1977  armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1978  assert(reshapeLayer != nullptr);
1979  input.Connect(reshapeLayer->GetInputSlot(0));
1980  reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
1981  reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
1982  }
1983  else
1984  {
1985  input.Connect(startLayer->GetInputSlot(0));
1986  }
1987 
1988  // Connect weights and bias inputs
1989  weightsInput.Connect(startLayer->GetInputSlot(1));
1990  biasInput.Connect(startLayer->GetInputSlot(2));
1991 
1992  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
1993  data, nullptr, validateFunc, activationFunction);
1994 }
1995 
1996 bool Converter::ConvertGather(const Operation& operation, const Model& model, ConversionData& data)
1997 {
1998  VLOG(DRIVER) << "Converter::ConvertGather()";
1999 
2000  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2001  if (!input.IsValid())
2002  {
2003  return Fail("%s: Operation has invalid input", __func__);
2004  }
2005  auto inputDimensions = input.GetTensorInfo().GetNumDimensions();
2006 
2007  LayerInputHandle indices = ConvertToLayerInputHandle(operation, 2, model, data);
2008  if (!indices.IsValid())
2009  {
2010  return Fail("%s: Operation has invalid indices", __func__);
2011  }
2012  auto indicesDimensions = indices.GetTensorInfo().GetNumDimensions();
2013 
2014  const Operand* output = GetOutputOperand(operation, 0, model);
2015  if (!output)
2016  {
2017  return Fail("%s: Operation has invalid output", __func__);
2018  }
2019  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2020  auto outputDimensions = outputInfo.GetNumDimensions();
2021  if (outputDimensions != inputDimensions + indicesDimensions - 1)
2022  {
2023  return Fail("%s: Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
2024  __func__, outputDimensions, inputDimensions, indicesDimensions);
2025  }
2026 
2027  int32_t axis;
2028  if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
2029  {
2030  return Fail("%s: Operation has invalid or unsupported axis operand", __func__);
2031  }
2032  int32_t inputDimensions_int = static_cast<int32_t>(inputDimensions);
2033  if ((axis < -inputDimensions_int) || (inputDimensions_int <= axis))
2034  {
2035  return Fail("%s: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", __func__, axis,
2036  inputDimensions, inputDimensions);
2037  }
2038 
2039  GatherDescriptor desc;
2040  desc.m_Axis = axis;
2041 
2042  bool isSupported = false;
2043  armnn::BackendId setBackend;
2044  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2045  {
2046  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2047  IsGatherSupported,
2048  data.m_Backends,
2049  isSupported,
2050  setBackend,
2051  input.GetTensorInfo(),
2052  indices.GetTensorInfo(),
2053  outputInfo,
2054  desc);
2055  };
2056 
2057  if(!IsDynamicTensor(outputInfo))
2058  {
2059  validateFunc(outputInfo, isSupported);
2060  }
2061  else
2062  {
2063  isSupported = AreDynamicTensorsSupported();
2064  }
2065 
2066  if (!isSupported)
2067  {
2068  return false;
2069  }
2070 
2071  IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
2072  layer->SetBackendId(setBackend);
2073  assert(layer != nullptr);
2074  input.Connect(layer->GetInputSlot(0));
2075  indices.Connect(layer->GetInputSlot(1));
2076 
2077  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2078 }
2079 
2080 bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data)
2081 {
2082  VLOG(DRIVER) << "Converter::ConvertGroupedConv2d()";
2083  //
2084  // Parse data
2085  //
2086  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2087  if (!input.IsValid())
2088  {
2089  return Fail("%s: Operation has invalid inputs", __func__);
2090  }
2091  const TensorInfo& inputInfo = input.GetTensorInfo();
2092 
2093  const Operand* output = GetOutputOperand(operation, 0, model);
2094  if (!output)
2095  {
2096  return Fail("%s: Could not read output 0", __func__);
2097  }
2098  TensorInfo outputInfo = GetTensorInfoForOperand(*output);
2099 
2100  // Look ahead to determine data layout
2101  DataLayout dataLayout = DataLayout::NHWC;
2102  if (operation.inputs.size() == 12)
2103  {
2104  dataLayout = OptionalDataLayout(operation, 11, model, data);
2105  }
2106  else
2107  {
2108  dataLayout = OptionalDataLayout(operation, 8, model, data);
2109  }
2110 
2111  // NOTE:
2112  // NNAPI weights are always OHWI, i.e. [depth_out, filter_height, filter_width, depth_group],
2113  // but Arm NN expects the filter's height and width indices to match the input's height and
2114  // width indices so when the DataLayout is NCHW, we need to permute the weights to OIHW
2115  const PermutationVector ohwiToOihw = { 0u, 2u, 3u, 1u };
2116  const ConstTensorPin weightsPin = (dataLayout == DataLayout::NCHW) ?
2118  model, data, ohwiToOihw) :
2119  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
2120  const ConstTensorPin biasesPin =
2121  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
2122  if (!weightsPin.IsValid() || !biasesPin.IsValid())
2123  {
2124  return Fail("%s: Operation has invalid inputs", __func__);
2125  }
2126 
2127  ConstTensor weights = weightsPin.GetConstTensor();
2128  ConstTensor biases = biasesPin.GetConstTensor();
2129  SanitizeBiasQuantizationScale(biases.GetInfo(), weights.GetInfo(), inputInfo);
2130 
2131  const TensorShape& inputShape = inputInfo.GetShape();
2132  const TensorShape& outputShape = outputInfo.GetShape();
2133  const TensorShape& weightsShape = weights.GetShape();
2134  const TensorShape& biasesShape = biases.GetShape();
2135 
2136  armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
2137  const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
2138  const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
2139  const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
2140 
2142  desc.m_DataLayout = dataLayout;
2143  desc.m_BiasEnabled = true;
2144 
2145  int numGroups;
2146  ActivationFn activation;
2147 
2148  if (operation.inputs.size() == 12)
2149  {
2150  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
2151  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
2152  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
2153  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
2154  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
2155  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
2156  !GetInputScalar(operation, 9, OperandType::INT32, numGroups, model, data) ||
2157  !GetInputActivationFunction(operation, 10, activation, model, data))
2158  {
2159  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
2160  }
2161 
2162  }
2163  else if (operation.inputs.size() == 9)
2164  {
2165  ::android::nn::PaddingScheme paddingScheme;
2166  if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
2167  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
2168  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
2169  !GetInputScalar(operation, 6, OperandType::INT32, numGroups, model, data) ||
2170  !GetInputActivationFunction(operation, 7, activation, model, data))
2171  {
2172  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
2173  }
2174 
2175  const uint32_t inputX = inputInfo.GetShape()[widthIndex];
2176  const uint32_t inputY = inputInfo.GetShape()[heightIndex];
2177 
2178  const uint32_t kernelX = weightsShape[widthIndex];
2179  const uint32_t kernelY = weightsShape[heightIndex];
2180 
2181  CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2182  CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2183  }
2184  else
2185  {
2186  return Fail("%s: Unsupported number of operation inputs", __func__);
2187  }
2188 
2189  // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2190  const unsigned int outputChannels = weightsShape[0];
2191 
2192  const unsigned int channelsPerGroup = weightsShape[channelsIndex];
2193  const unsigned int channelMultiplier = outputChannels / numGroups;
2194 
2195  //
2196  // Validate all relevant inputs
2197  //
2198  if (numGroups <= 0)
2199  {
2200  return Fail("%s: Number of groups must be greater than 0. Got: %d", __func__, numGroups);
2201  }
2202 
2203  if (outputChannels % numGroups != 0u)
2204  {
2205  return Fail("%s: Output channels must be divisible by the number of groups", __func__);
2206  }
2207 
2208  //
2209  // Set up Splitter layer
2210  //
2211  unsigned int splitterDimSizes[4] = { inputShape[0], inputShape[1], inputShape[2], inputShape[3] };
2212  splitterDimSizes[channelsIndex] /= numGroups; // split in depth
2213 
2214  TensorInfo splitterOutputInfo(4,
2215  splitterDimSizes,
2216  inputInfo.GetDataType(),
2217  inputInfo.GetQuantizationScale(),
2218  inputInfo.GetQuantizationOffset());
2219 
2220  std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(numGroups, std::ref(splitterOutputInfo));
2221 
2222  ViewsDescriptor splitterDesc(numGroups);
2223  for (unsigned int group = 0u; group < numGroups; ++group)
2224  {
2225  splitterDesc.SetViewOriginCoord(group, channelsIndex, splitterDimSizes[channelsIndex] * group);
2226  for (unsigned int dimIdx = 0u; dimIdx < 4u; dimIdx++)
2227  {
2228  splitterDesc.SetViewSize(group, dimIdx, splitterDimSizes[dimIdx]);
2229  }
2230  }
2231 
2232  bool isSupported = false;
2233  armnn::BackendId setBackendSplit;
2234  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2235  IsSplitterSupported,
2236  data.m_Backends,
2237  isSupported,
2238  setBackendSplit,
2239  inputInfo,
2240  splitterOutputInfos,
2241  splitterDesc);
2242  if (!isSupported)
2243  {
2244  return false;
2245  }
2246 
2247  IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
2248  splitterLayer->SetBackendId(setBackendSplit);
2249  if (!splitterLayer)
2250  {
2251  return Fail("%s: Failed to add SplitterLayer", __func__);
2252  }
2253 
2254  input.Connect(splitterLayer->GetInputSlot(0));
2255  for (unsigned int group = 0u; group < splitterLayer->GetNumOutputSlots(); ++group)
2256  {
2257  splitterLayer->GetOutputSlot(group).SetTensorInfo(splitterOutputInfo);
2258  }
2259 
2260  //
2261  // Set up Convolution2d layers for each group
2262  //
2263 
2264  // Set up group tensor shapes
2265  TensorShape groupInputShape(inputShape);
2266  groupInputShape[channelsIndex] = channelsPerGroup;
2267 
2268  TensorShape groupWeightsShape(weightsShape);
2269  groupWeightsShape[0] /= channelMultiplier * numGroups;
2270 
2271  TensorShape groupBiasesShape({ 1 });
2272 
2273  // Set up group tensor infos
2274  TensorInfo groupInputInfo(inputInfo);
2275  groupInputInfo.SetShape(groupInputShape);
2276 
2277  const TensorInfo& weightsInfo = weights.GetInfo();
2278  TensorInfo groupWeightsInfo(weightsInfo);
2279  groupWeightsInfo.SetShape(groupWeightsShape);
2280 
2281  const TensorInfo& biasesInfo = biases.GetInfo();
2282  TensorInfo groupBiasesInfo(biasesInfo);
2283  groupBiasesInfo.SetShape(groupBiasesShape);
2284 
2285  TensorInfo groupOutputInfo(outputInfo);
2286 
2287  TensorShape groupOutputShape(outputShape);
2288  const bool isDynamic = IsDynamicTensor(outputInfo);
2289  if (!isDynamic)
2290  {
2291  groupOutputShape[channelsIndex] = 1;
2292  }
2293  groupOutputInfo.SetShape(groupOutputShape);
2294 
2295  const unsigned int weightsDataTypeSize = GetDataTypeSize(groupWeightsInfo.GetDataType());
2296  const unsigned int biasesDataTypeSize = GetDataTypeSize(groupBiasesInfo.GetDataType());
2297 
2298  std::vector<IConnectableLayer*> convLayers(numGroups * channelMultiplier, nullptr);
2299  for (unsigned int group = 0u; group < numGroups; ++group)
2300  {
2301  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2302  {
2303  auto index = group * channelMultiplier + m;
2304 
2305  const unsigned int weightsDataOffset = groupWeightsShape.GetNumElements() * index * weightsDataTypeSize;
2306  const unsigned int biasesDataOffset = groupBiasesShape.GetNumElements() * index * biasesDataTypeSize;
2307 
2308  if (weightsInfo.HasPerAxisQuantization())
2309  {
2310  // Extract per-axis quantization scales for group weights
2311  const std::vector<float>& weightsQuantScales = weightsInfo.GetQuantizationScales();
2312  groupWeightsInfo.SetQuantizationScales(
2313  std::vector<float>(weightsQuantScales.begin() + index,
2314  weightsQuantScales.begin() + index + groupWeightsShape[0]));
2315 
2316  // Extract per-axis quantization scales for group biases
2317  const std::vector<float>& biasesQuantScales = biasesInfo.GetQuantizationScales();
2318  groupBiasesInfo.SetQuantizationScales(
2319  std::vector<float>(biasesQuantScales.begin() + index,
2320  biasesQuantScales.begin() + index + groupWeightsShape[0]));
2321  }
2322 
2323  // Extract weights and biases data for current group convolution
2324  ConstTensor groupWeights(groupWeightsInfo,
2325  static_cast<const void *>(reinterpret_cast<const char *>(weights.GetMemoryArea()) +
2326  weightsDataOffset));
2327  ConstTensor groupBiases(groupBiasesInfo,
2328  static_cast<const void *>(reinterpret_cast<const char *>(biases.GetMemoryArea()) +
2329  biasesDataOffset));
2330 
2331  isSupported = false;
2332  armnn::BackendId setBackendConv;
2333  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2334  {
2335  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2336  IsConvolution2dSupported,
2337  data.m_Backends,
2338  isSupported,
2339  setBackendConv,
2340  groupInputInfo,
2341  outputInfo,
2342  desc,
2343  groupWeightsInfo,
2344  Optional<TensorInfo>(groupBiasesInfo));
2345  };
2346 
2347  if(!isDynamic)
2348  {
2349  validateFunc(groupOutputInfo, isSupported);
2350  }
2351  else
2352  {
2353  isSupported = AreDynamicTensorsSupported();
2354  }
2355 
2356  if (!isSupported)
2357  {
2358  return false;
2359  }
2360 
2361  IConnectableLayer* weightsLayer = data.m_Network->AddConstantLayer(groupWeights);
2362  IConnectableLayer* biasLayer = data.m_Network->AddConstantLayer(groupBiases);
2363  IConnectableLayer* convLayer = data.m_Network->AddConvolution2dLayer(desc);
2364 
2365  convLayer->SetBackendId(setBackendConv);
2366 
2367  if (!convLayer)
2368  {
2369  return Fail("%s: AddConvolution2dLayer failed", __func__);
2370  }
2371 
2372  splitterLayer->GetOutputSlot(group).Connect(convLayer->GetInputSlot(0));
2373  weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
2374  biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
2375 
2376  weightsLayer->GetOutputSlot(0).SetTensorInfo(groupWeightsInfo);
2377  biasLayer->GetOutputSlot(0).SetTensorInfo(groupBiasesInfo);
2378  convLayer->GetOutputSlot(0).SetTensorInfo(groupOutputInfo);
2379 
2380  if(isDynamic)
2381  {
2382  convLayer->GetOutputSlot(0).IsTensorInfoSet();
2383 
2384  validateFunc(convLayer->GetOutputSlot(0).GetTensorInfo(), isSupported);
2385 
2386  outputInfo = convLayer->GetOutputSlot(0).GetTensorInfo();
2387 
2388  if (!isSupported)
2389  {
2390  return false;
2391  }
2392  }
2393 
2394  convLayers[index] = convLayer;
2395  }
2396  }
2397 
2398  //
2399  // Set up Concat layer
2400  //
2401  ConcatDescriptor concatDescriptor;
2402  // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2403  concatDescriptor = ConcatDescriptor(weightsShape[0]);
2404  for (unsigned int group = 0u; group < numGroups; ++group)
2405  {
2406  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2407  {
2408  auto index = group * channelMultiplier + m;
2409  concatDescriptor.SetViewOriginCoord(index, channelsIndex, index);
2410  concatDescriptor.SetConcatAxis(channelsIndex);
2411  }
2412  }
2413 
2414  isSupported = false;
2415  armnn::BackendId setBackendConcat;
2416  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2417  IsConcatSupported,
2418  data.m_Backends,
2419  isSupported,
2420  setBackendConcat,
2421  std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
2422  outputInfo,
2423  concatDescriptor);
2424 
2425  if (!isSupported)
2426  {
2427  return false;
2428  }
2429 
2430  IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
2431  concatLayer->SetBackendId(setBackendConcat);
2432  if (!concatLayer)
2433  {
2434  return Fail("%s: AddConcatLayer failed", __func__);
2435  }
2436 
2437  for (unsigned int group = 0u; group < numGroups; ++group)
2438  {
2439  for (unsigned int m = 0u; m < channelMultiplier; ++m)
2440  {
2441  auto index = group * channelMultiplier + m;
2442  convLayers[index]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(index));
2443  }
2444  }
2445  concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2446 
2447  return SetupAndTrackLayerOutputSlot(operation, 0, *concatLayer, model,
2448  data, nullptr, nullptr, activation);
2449 }
2450 
2451 bool Converter::ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data)
2452 {
2453  VLOG(DRIVER) << "Converter::ConvertHardSwish()";
2454  ActivationDescriptor desc;
2455  desc.m_Function = ActivationFunction::HardSwish;
2456 
2457  return ::ConvertToActivation(operation, __func__, desc, model, data);
2458 }
2459 
2460 bool Converter::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
2461 {
2462  VLOG(DRIVER) << "Converter::ConvertInstanceNormalization()";
2463 
2464  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2465  if (!input.IsValid())
2466  {
2467  return Fail("%s: Operation has an invalid input 0", __func__);
2468  }
2469 
2470  const Operand* output = GetOutputOperand(operation, 0, model);
2471  if (!output)
2472  {
2473  return Fail("%s: Operation has an invalid output", __func__);
2474  }
2475 
2476  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2477 
2478  // Determine data type of input tensor
2479  OperandType inputType;
2480  if (!GetOperandType(operation, 0, model, inputType))
2481  {
2482  return Fail("%s: Operation has invalid inputs", __func__);
2483  }
2484 
2486 
2487  // Read gamma, beta & epsilon
2488  if (inputType == OperandType::TENSOR_FLOAT16)
2489  {
2490  Half fp16Gamma;
2491  Half fp16Beta;
2492  Half fp16Epsilon;
2493 
2494  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Gamma, model, data) ||
2495  !GetInputScalar(operation, 2, OperandType::FLOAT16, fp16Beta, model, data) ||
2496  !GetInputScalar(operation, 3, OperandType::FLOAT16, fp16Epsilon, model, data))
2497  {
2498  return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
2499  }
2500 
2501  desc.m_Gamma = static_cast<float>(fp16Gamma);
2502  desc.m_Beta = static_cast<float>(fp16Beta);
2503  desc.m_Eps = static_cast<float>(fp16Epsilon);
2504  }
2505  else if (inputType == OperandType::TENSOR_FLOAT32)
2506  {
2507  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_Gamma, model, data) ||
2508  !GetInputScalar(operation, 2, OperandType::FLOAT32, desc.m_Beta, model, data) ||
2509  !GetInputScalar(operation, 3, OperandType::FLOAT32, desc.m_Eps, model, data))
2510  {
2511  return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
2512  }
2513  }
2514  else
2515  {
2516  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2517  }
2518 
2519  desc.m_DataLayout = OptionalDataLayout(operation, 4, model, data);
2520 
2521  bool isSupported = false;
2522  armnn::BackendId setBackend;
2523  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2524  {
2525  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2526  IsInstanceNormalizationSupported,
2527  data.m_Backends,
2528  isSupported,
2529  setBackend,
2530  input.GetTensorInfo(),
2531  outputInfo,
2532  desc);
2533  };
2534 
2535  if(IsDynamicTensor(outputInfo))
2536  {
2537  isSupported = AreDynamicTensorsSupported();
2538  }
2539  else
2540  {
2541  validateFunc(outputInfo, isSupported);
2542  }
2543 
2544  if (!isSupported)
2545  {
2546  return false;
2547  }
2548 
2549  IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
2550  layer->SetBackendId(setBackend);
2551  input.Connect(layer->GetInputSlot(0));
2552 
2553  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2554 }
2555 
2556 bool Converter::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2557 {
2558  VLOG(DRIVER) << "Converter::ConvertL2Normalization()";
2559 
2560  if (operation.inputs.size() != 1)
2561  {
2562  return Fail("%s: Optional inputs are not supported", __func__);
2563  }
2564 
2565  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2566  if (!input.IsValid())
2567  {
2568  return Fail("%s: Operation has invalid inputs", __func__);
2569  }
2570 
2571  const Operand* output = GetOutputOperand(operation, 0, model);
2572  if (!output)
2573  {
2574  return Fail("%s: Could not read output 0", __func__);
2575  }
2576 
2577  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2578  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2579 
2580  if (outputInfo.GetNumDimensions() != 4u)
2581  {
2582  return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2583  }
2584 
2587 
2588  bool isSupported = false;
2589  armnn::BackendId setBackend;
2590  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2591  {
2592  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2593  IsL2NormalizationSupported,
2594  data.m_Backends,
2595  isSupported,
2596  setBackend,
2597  inputInfo,
2598  outputInfo,
2599  desc);
2600  };
2601 
2602  if(!IsDynamicTensor(outputInfo))
2603  {
2604  validateFunc(outputInfo, isSupported);
2605  }
2606  else
2607  {
2608  isSupported = AreDynamicTensorsSupported();
2609  }
2610 
2611  if (!isSupported)
2612  {
2613  return false;
2614  }
2615 
2616  armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2617  layer->SetBackendId(setBackend);
2618  assert(layer != nullptr);
2619  input.Connect(layer->GetInputSlot(0));
2620 
2621  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2622 }
2623 
2624 bool Converter::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
2625 {
2626  VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
2627  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::L2, model, data);
2628 }
2629 
2630 bool Converter::ConvertLocalResponseNormalization(const Operation& operation,
2631  const Model& model,
2632  ConversionData& data)
2633 {
2634  VLOG(DRIVER) << "Converter::ConvertLocalResponseNormalization()";
2635 
2636  if (operation.inputs.size() != 5)
2637  {
2638  return Fail("%s: Optional inputs are not supported", __func__);
2639  }
2640 
2641  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2642  if (!input.IsValid())
2643  {
2644  return Fail("%s: Operation has invalid inputs", __func__);
2645  }
2646 
2647  const Operand* output = GetOutputOperand(operation, 0, model);
2648  if (!output)
2649  {
2650  return Fail("%s: Could not read output 0", __func__);
2651  }
2652 
2653  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2654  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2655 
2656  if (outputInfo.GetNumDimensions() != 4u)
2657  {
2658  return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2659  }
2660 
2661  armnn::NormalizationDescriptor descriptor;
2665 
2666  if (!input.IsValid() ||
2667  !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2668  !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
2669  !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
2670  !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
2671  {
2672  return Fail("%s: Operation has invalid inputs", __func__);
2673  }
2674 
2675  // ArmNN expects normSize to be the full size of the normalization
2676  // window rather than the radius as in AndroidNN.
2677  descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2678 
2679  bool isSupported = false;
2680  armnn::BackendId setBackend;
2681  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2682  {
2683  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2684  IsNormalizationSupported,
2685  data.m_Backends,
2686  isSupported,
2687  setBackend,
2688  inputInfo,
2689  outputInfo,
2690  descriptor);
2691  };
2692 
2693  if(!IsDynamicTensor(outputInfo))
2694  {
2695  validateFunc(outputInfo, isSupported);
2696  }
2697  else
2698  {
2699  isSupported = AreDynamicTensorsSupported();
2700  }
2701 
2702  if (!isSupported)
2703  {
2704  return false;
2705  }
2706 
2707 
2708  armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2709  layer->SetBackendId(setBackend);
2710  assert(layer != nullptr);
2711  input.Connect(layer->GetInputSlot(0));
2712 
2713  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2714 }
2715 
2716 bool Converter::ConvertLogicalBinary(const Operation& operation,
2717  const Model& model,
2718  ConversionData& data,
2719  armnn::LogicalBinaryOperation logicalOperation)
2720 {
2721  VLOG(DRIVER) << "Converter::ConvertLogicalBinary()";
2722  VLOG(DRIVER) << "ConvertLogicalBinary()";
2723  VLOG(DRIVER) << "logicalOperation = " << GetLogicalBinaryOperationAsCString(logicalOperation);
2724 
2725  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
2726  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
2727 
2728  if (!(input0.IsValid() && input1.IsValid()))
2729  {
2730  return Fail("%s: Operation has invalid inputs", __func__);
2731  }
2732 
2733  const Operand* output = GetOutputOperand(operation, 0, model);
2734  if (!output)
2735  {
2736  return Fail("%s: Could not read output 0", __func__);
2737  }
2738 
2739  const TensorInfo& inputInfo0 = input0.GetTensorInfo();
2740  const TensorInfo& inputInfo1 = input1.GetTensorInfo();
2741  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2742 
2743  LogicalBinaryDescriptor descriptor(logicalOperation);
2744 
2745  bool isSupported = false;
2746  armnn::BackendId setBackend;
2747  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2748  {
2749  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2750  IsLogicalBinarySupported,
2751  data.m_Backends,
2752  isSupported,
2753  setBackend,
2754  inputInfo0,
2755  inputInfo1,
2756  outputInfo,
2757  descriptor);
2758  };
2759 
2760  if(!IsDynamicTensor(outputInfo))
2761  {
2762  validateFunc(outputInfo, isSupported);
2763  }
2764  else
2765  {
2766  isSupported = AreDynamicTensorsSupported();
2767  }
2768 
2769  if (!isSupported)
2770  {
2771  return false;
2772  }
2773 
2774  IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
2775  layer->SetBackendId(setBackend);
2776  assert(layer != nullptr);
2777 
2778  bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
2779  if (!isReshapeSupported)
2780  {
2781  return false;
2782  }
2783 
2784  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2785 }
2786 
2787 bool Converter::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2788 {
2789  VLOG(DRIVER) << "Converter::ConvertLogistic()";
2792 
2793  return ConvertToActivation(operation, __func__, desc, model, data);
2794 }
2795 
2796 bool Converter::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data)
2797 {
2798  VLOG(DRIVER) << "Converter::ConvertLogSoftmax()";
2799 
2800  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2801  if (!input.IsValid())
2802  {
2803  return Fail("%s: Failed to read input 0", __func__);
2804  }
2805 
2806  const Operand* output = GetOutputOperand(operation, 0, model);
2807  if (!output)
2808  {
2809  return Fail("%s: Failed to read output", __func__);
2810  }
2811 
2812  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2813 
2814  // Determine data type of input tensor
2815  OperandType inputType;
2816  if (!GetOperandType(operation, 0, model, inputType))
2817  {
2818  return Fail("%s: Operation has invalid inputs", __func__);
2819  }
2820 
2821  LogSoftmaxDescriptor descriptor;
2822 
2823  // Read beta
2824  if (inputType == OperandType::TENSOR_FLOAT16)
2825  {
2826  Half fp16Beta;
2827  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Beta, model, data))
2828  {
2829  return Fail("%s: Failed to read input 1 (FLOAT16)", __func__);
2830  }
2831 
2832  descriptor.m_Beta = static_cast<float>(fp16Beta);
2833  }
2834  else if (inputType == OperandType::TENSOR_FLOAT32)
2835  {
2836  if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Beta, model, data))
2837  {
2838  return Fail("%s: Failed to read input 1 (FLOAT32)", __func__);
2839  }
2840  }
2841  else
2842  {
2843  return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2844  }
2845 
2846  // Read axis
2847  if (!GetInputInt32(operation, 2, descriptor.m_Axis, model, data))
2848  {
2849  return Fail("%s: Failed to read input 2", __func__);
2850  }
2851 
2852  bool isSupported = false;
2853  armnn::BackendId setBackend;
2854  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2855  {
2856  FORWARD_LAYER_SUPPORT_FUNC(__func__,
2857  IsLogSoftmaxSupported,
2858  data.m_Backends,
2859  isSupported,
2860  setBackend,
2861  input.GetTensorInfo(),
2862  outputInfo,
2863  descriptor);
2864  };
2865 
2866  if(IsDynamicTensor(outputInfo))
2867  {
2868  isSupported = AreDynamicTensorsSupported();
2869  }
2870  else
2871  {
2872  validateFunc(outputInfo, isSupported);
2873  }
2874 
2875  if (!isSupported)
2876  {
2877  return false;
2878  }
2879 
2880  IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
2881  layer->SetBackendId(setBackend);
2882  if (!layer)
2883  {
2884  return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
2885  }
2886 
2887  input.Connect(layer->GetInputSlot(0));
2888 
2889  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2890 }
2891 
2892 bool Converter::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
2893 {
2894  VLOG(DRIVER) << "Converter::ConvertLstm()";
2895 
2896  // Inputs:
2897  // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2898  // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2899  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2900  if (!input.IsValid())
2901  {
2902  return Fail("%s: Could not read input 0: input", __func__);
2903  }
2904  // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2905  LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
2906  if (!outputStateIn.IsValid())
2907  {
2908  return Fail("%s: Could not read input 18: outputStateIn", __func__);
2909  }
2910  // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2911  LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
2912  if (!cellStateIn.IsValid())
2913  {
2914  return Fail("%s: Could not read input 19: cellStateIn", __func__);
2915  }
2916 
2917  // Get the mandatory input tensors:
2918  // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2919  // [num_units, input_size].
2920  const ConstTensorPin inputToForgetWeightsPin =
2921  (DequantizeAndMakeConstTensorPin(operation, model, data, 2));
2922  // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2923  // [num_units, input_size].
2924  const ConstTensorPin inputToCellWeightsPin =
2925  (DequantizeAndMakeConstTensorPin(operation, model, data, 3));
2926  // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2927  // [num_units, input_size].
2928  const ConstTensorPin inputToOutputWeightsPin =
2929  (DequantizeAndMakeConstTensorPin(operation, model, data, 4));
2930  // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2931  // [num_units, output_size].
2932  const ConstTensorPin recurrentToForgetWeightsPin =
2933  (DequantizeAndMakeConstTensorPin(operation, model, data, 6));
2934  // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2935  // [num_units, output_size].
2936  const ConstTensorPin recurrentToCellWeightsPin =
2937  (DequantizeAndMakeConstTensorPin(operation, model, data, 7));
2938  // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2939  // [num_units, output_size].
2940  const ConstTensorPin recurrentToOutputWeightsPin =
2941  (DequantizeAndMakeConstTensorPin(operation, model, data, 8));
2942  // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2943  const ConstTensorPin forgetGateBiasPin =
2944  ConvertOperationInputToConstTensorPin(operation, 13, model, data);
2945  // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2946  const ConstTensorPin cellBiasPin =
2947  ConvertOperationInputToConstTensorPin(operation, 14, model, data);
2948  // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2949  const ConstTensorPin outputGateBiasPin =
2950  ConvertOperationInputToConstTensorPin(operation, 15, model, data);
2951 
2952  if (!inputToForgetWeightsPin.IsValid() ||
2953  !inputToCellWeightsPin.IsValid() ||
2954  !inputToOutputWeightsPin.IsValid() ||
2955  !recurrentToForgetWeightsPin.IsValid() ||
2956  !recurrentToCellWeightsPin.IsValid() ||
2957  !recurrentToOutputWeightsPin.IsValid() ||
2958  !forgetGateBiasPin.IsValid() ||
2959  !cellBiasPin.IsValid() ||
2960  !outputGateBiasPin.IsValid())
2961  {
2962  return Fail("%s: Operation has invalid tensor inputs", __func__);
2963  }
2964 
2965  // Get the optional input tensors:
2966  // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2967  // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2968  const ConstTensorPin inputToInputWeightsPin =
2969  (DequantizeAndMakeConstTensorPin(operation, model, data, 1, true));
2970  // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2971  // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2972  // “num_units”), or the second dimension of the “projection_weights”, if defined.
2973  const ConstTensorPin recurrentToInputWeightsPin =
2974  (DequantizeAndMakeConstTensorPin(operation, model, data, 5, true));
2975  // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2976  const ConstTensorPin cellToInputWeightsPin =
2977  (DequantizeAndMakeConstTensorPin(operation, model, data, 9, true));
2978  // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2979  const ConstTensorPin cellToForgetWeightsPin =
2980  (DequantizeAndMakeConstTensorPin(operation, model, data, 10, true));
2981  // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2982  const ConstTensorPin cellToOutputWeightsPin =
2983  (DequantizeAndMakeConstTensorPin(operation, model, data, 11, true));
2984  // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2985  const ConstTensorPin inputGateBiasPin =
2987  12,
2988  model,
2989  data,
2990  g_DontPermute,
2991  nullptr,
2992  true);
2993 
2994  // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2995  // [output_size, num_units].
2996  const ConstTensorPin projectionWeightsPin =
2997  (DequantizeAndMakeConstTensorPin(operation, model, data, 16, true));
2998  // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2999  const ConstTensorPin projectionBiasPin =
3001  17,
3002  model,
3003  data,
3004  g_DontPermute,
3005  nullptr,
3006  true);
3007 
3008  if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
3009  (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
3010  (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
3011  (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
3012  (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
3013  (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
3014  (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
3015  (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
3016  {
3017  return Fail("%s: Operation has invalid tensor inputs", __func__);
3018  }
3019 
3020  // Get the mandatory input scalars (actually 1-D tensors of size 1):
3021  // 20: The activation function: A value indicating the activation function:
3022  // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
3023  // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
3024  // If set to 0.0 then clipping is disabled.
3025  // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
3026  // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
3027  ActivationFn activation = ActivationFn::kActivationNone;
3028  float cellClip;
3029  float projClip;
3030  if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
3031  !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
3032  !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
3033  {
3034  return Fail("%s: Operation has invalid scalar inputs", __func__);
3035  }
3036 
3037  // Get the normalization tensors
3038  // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
3039  // Used to rescale normalized inputs to activation at input gate.
3040  const ConstTensorPin inputLayerNormWeightsPin
3041  (DequantizeAndMakeConstTensorPin(operation, model, data, 23, true));
3042 
3043  // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
3044  // Used to rescale normalized inputs to activation at forget gate.
3045  const ConstTensorPin forgetLayerNormWeightsPin =
3047  24,
3048  model,
3049  data,
3050  g_DontPermute,
3051  nullptr,
3052  true);
3053 
3054  // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
3055  // Used to rescale normalized inputs to activation at cell gate.
3056  const ConstTensorPin cellLayerNormWeightsPin =
3058  25,
3059  model,
3060  data,
3061  g_DontPermute,
3062  nullptr,
3063  true);
3064 
3065  // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
3066  // Used to rescale normalized inputs to activation at output gate.
3067  const ConstTensorPin outputLayerNormWeightsPin =
3069  26,
3070  model,
3071  data,
3072  g_DontPermute,
3073  nullptr,
3074  true);
3075 
3076  // Outputs:
3077  // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
3078  // with CIFG, or [batch_size, num_units * 3] without CIFG.
3079  const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
3080  if (!scratchBuffer)
3081  {
3082  return Fail("%s: Could not read output 0: scratchBuffer", __func__);
3083  }
3084  // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
3085  const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
3086  if (!outputStateOut)
3087  {
3088  return Fail("%s: Could not read output 1: outputStateOut", __func__);
3089  }
3090  // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
3091  const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
3092  if (!cellStateOut)
3093  {
3094  return Fail("%s: Could not read output 2: cellStateOut", __func__);
3095  }
3096  // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
3097  // effectively the same as the current “output state (out)” value.
3098  const Operand* output = GetOutputOperand(operation, 3, model);
3099  if (!output)
3100  {
3101  return Fail("%s: Could not read output 3: output", __func__);
3102  }
3103 
3104  // set the params structure for the AddLstmLayer call
3105  LstmInputParams params;
3106  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
3107  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
3108  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
3109  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
3110  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
3111  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
3112  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
3113  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
3114  params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
3115  params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
3116  params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
3117  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
3118  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
3119  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
3120  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
3121  params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
3122  params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
3123  params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
3124  params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
3125  params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
3126  params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
3127 
3128  // set the layer descriptor
3129  LstmDescriptor desc;
3130  desc.m_ActivationFunc = activation;
3131  desc.m_ClippingThresCell = cellClip;
3132  desc.m_ClippingThresProj = projClip;
3133  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
3134  params.m_RecurrentToInputWeights == nullptr ||
3135  params.m_InputGateBias == nullptr);
3136  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
3137  params.m_CellToOutputWeights != nullptr);
3138  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
3139  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
3140  params.m_ForgetLayerNormWeights != nullptr ||
3141  params.m_CellLayerNormWeights != nullptr ||
3142  params.m_OutputLayerNormWeights != nullptr);
3143 
3144  // validate the optional input groups
3145  if (desc.m_CifgEnabled &&
3146  (params.m_InputToInputWeights != nullptr ||
3147  params.m_RecurrentToInputWeights != nullptr ||
3148  params.m_InputGateBias != nullptr))
3149  {
3150  return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
3151  " and input gate bias must be provided", __func__);
3152  }
3153 
3154  if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
3155  {
3156  return Fail("%s: projection bias should not be provided without projection weights", __func__);
3157  }
3158 
3159  if (desc.m_PeepholeEnabled &&
3160  (params.m_CellToForgetWeights == nullptr ||
3161  params.m_CellToOutputWeights == nullptr ||
3162  (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
3163  {
3164  return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
3165  " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
3166  }
3167 
3168  if (desc.m_LayerNormEnabled &&
3169  (params.m_ForgetLayerNormWeights == nullptr ||
3170  params.m_CellLayerNormWeights == nullptr ||
3171  params.m_OutputLayerNormWeights == nullptr ||
3172  (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
3173  {
3174  return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
3175  " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
3176  }
3177 
3178  // Check if the layer is supported
3179  // Inputs
3180  const TensorInfo& inputInfo = input.GetTensorInfo();
3181  const TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
3182  const TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
3183 
3184  // Outputs
3185  const TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
3186  const TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
3187  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
3188  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3189 
3190  // Basic parameters
3191  LstmInputParamsInfo paramsInfo;
3192  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
3193  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
3194  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
3196  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
3198  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
3199  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
3200  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
3201 
3202  // Optional parameters
3203  if (!desc.m_CifgEnabled)
3204  {
3205  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
3207  if (params.m_CellToInputWeights != nullptr)
3208  {
3209  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
3210  }
3211  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
3212  }
3213 
3214  if (desc.m_ProjectionEnabled)
3215  {
3216  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
3217  if (params.m_ProjectionBias != nullptr)
3218  {
3219  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
3220  }
3221  }
3222 
3223  if (desc.m_PeepholeEnabled)
3224  {
3225  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
3226  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
3227  }
3228 
3229  if (desc.m_LayerNormEnabled)
3230  {
3231  if(!desc.m_CifgEnabled)
3232  {
3233  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
3234  }
3235  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
3236  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
3237  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
3238  }
3239 
3240  bool isSupported = false;
3241  armnn::BackendId setBackend;
3242  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3243  {
3244  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3245  IsLstmSupported,
3246  data.m_Backends,
3247  isSupported,
3248  setBackend,
3249  inputInfo,
3250  outputStateInInfo,
3251  cellStateInInfo,
3252  scratchBufferInfo,
3253  outputStateOutInfo,
3254  cellStateOutInfo,
3255  outputInfo,
3256  desc,
3257  paramsInfo);
3258  };
3259 
3260  bool isDynamic = false;
3261  if (!IsDynamicTensor(outputStateOutInfo) &&
3262  !IsDynamicTensor(scratchBufferInfo) &&
3263  !IsDynamicTensor(cellStateOutInfo) &&
3264  !IsDynamicTensor(outputInfo))
3265  {
3266  validateFunc(outputInfo, isSupported);
3267  }
3268  else
3269  {
3270  isDynamic = true;
3271  isSupported = AreDynamicTensorsSupported();
3272  }
3273 
3274  if (!isSupported)
3275  {
3276  return false;
3277  }
3278 
3279  // Add the layer
3280  IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
3281  layer->SetBackendId(setBackend);
3282 
3283  input.Connect(layer->GetInputSlot(0));
3284  outputStateIn.Connect(layer->GetInputSlot(1));
3285  cellStateIn.Connect(layer->GetInputSlot(2));
3286 
3287  if (!isDynamic)
3288  {
3289  return (
3290  SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3291  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3292  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3293  SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
3294  }
3295  else
3296  {
3297  return (
3298  SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3299  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3300  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3302  operation, 3, *layer, 3, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
3303  }
3304 
3305 }
3306 
3307 bool Converter::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
3308 {
3309  VLOG(DRIVER) << "Converter::ConvertMaxPool2d()";
3310  return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Max, model, data);
3311 }
3312 
3313 bool Converter::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
3314 {
3315  VLOG(DRIVER) << "Converter::ConvertMean()";
3316 
3317  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3318  if (!input.IsValid())
3319  {
3320  return Fail("%s: Operation has invalid inputs", __func__);
3321  }
3322 
3323  const Operand* output = GetOutputOperand(operation, 0, model);
3324  if (!output)
3325  {
3326  return Fail("%s: Could not read output 0", __func__);
3327  }
3328 
3329  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3330 
3331  const Operand* axisOperand = GetInputOperand(operation, 1, model);
3332  if (!axisOperand)
3333  {
3334  return Fail("%s: Could not read input 1", __func__);
3335  }
3336 
3337  std::vector<int32_t> axis;
3338  if (!GetTensorInt32Values(*axisOperand, axis, model, data))
3339  {
3340  return Fail("%s: Input 1 has invalid values", __func__);
3341  }
3342 
3343  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3344 
3345  // Convert the axis to unsigned int and remove duplicates.
3346  unsigned int rank = inputInfo.GetNumDimensions();
3347  std::set<unsigned int> uniqueAxis;
3348  std::transform(axis.begin(), axis.end(),
3349  std::inserter(uniqueAxis, uniqueAxis.begin()),
3350  [rank](int i) -> unsigned int { return (i + rank) % rank; });
3351 
3352  // Get the "keep dims" flag.
3353  int32_t keepDims = 0;
3354  if (!GetInputInt32(operation, 2, keepDims, model, data))
3355  {
3356  return Fail("%s: Could not read input 2", __func__);
3357  }
3358 
3359  armnn::MeanDescriptor descriptor;
3360  descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3361  descriptor.m_KeepDims = keepDims > 0;
3362 
3363  bool isSupported = false;
3364  armnn::BackendId setBackend;
3365  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3366  {
3367  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3368  IsMeanSupported,
3369  data.m_Backends,
3370  isSupported,
3371  setBackend,
3372  inputInfo,
3373  outputInfo,
3374  descriptor);
3375  };
3376 
3377  if(!IsDynamicTensor(outputInfo))
3378  {
3379  validateFunc(outputInfo, isSupported);
3380  }
3381  else
3382  {
3383  isSupported = AreDynamicTensorsSupported();
3384  }
3385 
3386  if (!isSupported)
3387  {
3388  return false;
3389  }
3390 
3391  armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
3392  layer->SetBackendId(setBackend);
3393  assert(layer != nullptr);
3394  input.Connect(layer->GetInputSlot(0));
3395 
3396  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3397 }
3398 
3399 bool Converter::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
3400 {
3401  VLOG(DRIVER) << "Converter::ConvertPad()";
3402 
3403  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3404  if (!input.IsValid())
3405  {
3406  return Fail("%s: Operation has invalid inputs", __func__);
3407  }
3408 
3409  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3410  unsigned int rank = inputInfo.GetNumDimensions();
3411 
3412  armnn::PadDescriptor descriptor;
3413  if (!ConvertPaddings(operation, model, data, rank, descriptor))
3414  {
3415  return Fail("%s: Could not convert paddings", __func__);
3416  }
3417 
3418  // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3419  // the scale and zeroPoint must be the same as input0
3420  // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3421  // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3422  // (QuantizationOffset - QuantizationOffset) * scale = 0.
3423  if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
3424  {
3425  descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3426  }
3427 
3428  const Operand* output = GetOutputOperand(operation, 0, model);
3429  if (!output)
3430  {
3431  return Fail("%s: Could not read output", __func__);
3432  }
3433 
3434  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3435 
3436  bool isSupported = false;
3437  armnn::BackendId setBackend;
3438  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3439  {
3440  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3441  IsPadSupported,
3442  data.m_Backends,
3443  isSupported,
3444  setBackend,
3445  inputInfo,
3446  outputInfo,
3447  descriptor);
3448  };
3449 
3450  if(!IsDynamicTensor(outputInfo))
3451  {
3452  validateFunc(outputInfo, isSupported);
3453  }
3454  else
3455  {
3456  isSupported = AreDynamicTensorsSupported();
3457  }
3458 
3459  if (!isSupported)
3460  {
3461  return false;
3462  }
3463 
3464  armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3465  layer->SetBackendId(setBackend);
3466  assert(layer != nullptr);
3467  input.Connect(layer->GetInputSlot(0));
3468 
3469  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3470 }
3471 
3472 bool Converter::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
3473 {
3474  VLOG(DRIVER) << "Converter::ConvertPadV2()";
3475 
3476  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3477  if (!input.IsValid())
3478  {
3479  return Fail("%s: Could not read input 0", __func__);
3480  }
3481 
3482  const Operand* output = GetOutputOperand(operation, 0, model);
3483  if (!output)
3484  {
3485  return Fail("%s: Could not read output", __func__);
3486  }
3487 
3488  const TensorInfo& inputInfo = input.GetTensorInfo();
3489  unsigned int rank = inputInfo.GetNumDimensions();
3490 
3491  PadDescriptor descriptor;
3492  if (!ConvertPaddings(operation, model, data, rank, descriptor))
3493  {
3494  return Fail("%s: Could not convert paddings", __func__);
3495  }
3496 
3497  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3498 
3499  // Determine type of padding value
3500  OperandType operandType0;
3501  OperandType operandType2;
3502 
3503  if (!GetOperandType(operation, 0, model, operandType0) ||
3504  !GetOperandType(operation, 2, model, operandType2))
3505  {
3506  return Fail("%s: Operation has invalid inputs", __func__);
3507  }
3508 
3509  // Read value to use for padding
3510  if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
3511  {
3512  Half f16PadValue;
3513  if (!GetInputScalar(operation, 2, operandType2, f16PadValue, model, data))
3514  {
3515  return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
3516  }
3517 
3518  descriptor.m_PadValue = f16PadValue;
3519  }
3520  else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
3521  {
3522  if (!GetInputFloat32(operation, 2, descriptor.m_PadValue, model, data))
3523  {
3524  return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
3525  }
3526  }
3527  else if (isQuantizedOperand(operandType0) && operandType2 == OperandType::INT32)
3528  {
3529  int32_t intPadValue = 0;
3530  if (!GetInputInt32(operation, 2, intPadValue, model, data))
3531  {
3532  return Fail("%s: Could not read input 2 (INT32)", __func__);
3533  }
3534  descriptor.m_PadValue = intPadValue;
3535  }
3536  else
3537  {
3538  return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
3539  }
3540 
3541  bool isSupported = false;
3542  armnn::BackendId setBackend;
3543  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3544  {
3545  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3546  IsPadSupported,
3547  data.m_Backends,
3548  isSupported,
3549  setBackend,
3550  inputInfo,
3551  outputInfo,
3552  descriptor);
3553  };
3554 
3555  if(IsDynamicTensor(outputInfo))
3556  {
3557  isSupported = AreDynamicTensorsSupported();
3558  }
3559  else
3560  {
3561  validateFunc(outputInfo, isSupported);
3562  }
3563 
3564  if (!isSupported)
3565  {
3566  return false;
3567  }
3568 
3569  IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
3570  layer->SetBackendId(setBackend);
3571  assert(layer != nullptr);
3572  input.Connect(layer->GetInputSlot(0));
3573 
3574  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3575 }
3576 
3577 bool Converter::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
3578 {
3579  VLOG(DRIVER) << "Converter::ConvertPrelu()";
3580 
3581  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3582  LayerInputHandle alpha = ConvertToLayerInputHandle(operation, 1, model, data);
3583 
3584  if (!input.IsValid() || !alpha.IsValid())
3585  {
3586  return Fail("%s: Operation has invalid inputs", __func__);
3587  }
3588 
3589  const Operand* output = GetOutputOperand(operation, 0, model);
3590 
3591  if (!output)
3592  {
3593  return Fail("%s: Could not read output", __func__);
3594  }
3595 
3596  const TensorInfo& inputInfo = input.GetTensorInfo();
3597  const TensorInfo& alphaInfo = alpha.GetTensorInfo();
3598  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3599 
3600  bool isSupported = false;
3601  armnn::BackendId setBackend;
3602  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3603  {
3604  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3605  IsPreluSupported,
3606  data.m_Backends,
3607  isSupported,
3608  setBackend,
3609  inputInfo,
3610  alphaInfo,
3611  outputInfo);
3612  };
3613 
3614  if(IsDynamicTensor(outputInfo))
3615  {
3616  isSupported = AreDynamicTensorsSupported();
3617  }
3618  else
3619  {
3620  validateFunc(outputInfo, isSupported);
3621  }
3622 
3623  if (!isSupported)
3624  {
3625  return false;
3626  }
3627 
3628  IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
3629  layer->SetBackendId(setBackend);
3630 
3631  if (!layer)
3632  {
3633  return Fail("%s: AddPreluLayer failed", __func__);
3634  }
3635 
3636  bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
3637  if (!isReshapeSupported)
3638  {
3639  return false;
3640  }
3641 
3642  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3643 }
3644 
3645 bool Converter::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
3646 {
3647  VLOG(DRIVER) << "Converter::ConvertQuantize()";
3648 
3649  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3650  if (!input.IsValid())
3651  {
3652  return Fail("%s: Operation has invalid input", __func__);
3653  }
3654 
3655  const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
3656  if (!outputOperand)
3657  {
3658  return Fail("%s: Operation has invalid outputs", __func__);
3659  }
3660 
3661  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3662 
3663  bool isSupported = false;
3664  armnn::BackendId setBackend;
3665  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3666  {
3667  FORWARD_LAYER_SUPPORT_FUNC(__func__,
3668  IsQuantizeSupported,
3669  data.m_Backends,
3670  isSupported,
3671  setBackend,
3672  input.GetTensorInfo(),
3673  outputInfo);
3674  };
3675 
3676  if(IsDynamicTensor(outputInfo))
3677  {
3678  isSupported = AreDynamicTensorsSupported();
3679  }
3680  else
3681  {
3682  validateFunc(outputInfo, isSupported);
3683  }
3684 
3685  if (!isSupported)
3686  {
3687  return false;
3688  }
3689 
3690  IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
3691  layer->SetBackendId(setBackend);
3692  assert(layer != nullptr);
3693  input.Connect(layer->GetInputSlot(0));
3694 
3695  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3696 }
3697 
3698 bool Converter::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
3699 {
3700  VLOG(DRIVER) << "Converter::ConvertQuantizedLstm()";
3701 
3702  VLOG(DRIVER) << "ConvertQuantizedLstm()";
3703 
3704  //Inputs:
3705  // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
3706  // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
3707  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3708  if (!input.IsValid())
3709  {
3710  return Fail("%s: Could not read input 0: input", __func__);
3711  }
3712 
3713  // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, of shape [batch_size, output_size].
3714  LayerInputHandle outputStatePrevTimeStep = ConvertToLayerInputHandle(operation, 18, model, data);
3715  if (!outputStatePrevTimeStep.IsValid())
3716  {
3717  return Fail("%s: Could not read input 18: outputStatePrevTimeStep", __func__);
3718  }
3719 
3720  // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
3721  LayerInputHandle cellStatePrevTimeStep = ConvertToLayerInputHandle(operation, 19, model, data);
3722  if (!cellStatePrevTimeStep.IsValid())
3723  {
3724  return Fail("%s: Could not read input 19: cellStatePrevTimeStep", __func__);
3725  }
3726 
3727  // Get the mandatory input tensors:
3728 
3729  // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3730  // [num_units, input_size].
3731  const ConstTensorPin inputToForgetWeightsPin =
3732  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
3733 
3734  // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3735  // [num_units, input_size].
3736  const ConstTensorPin inputToCellWeightsPin =
3737  ConvertOperationInputToConstTensorPin(operation, 3, model, data);
3738 
3739  // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3740  // [num_units, input_size].
3741  const ConstTensorPin inputToOutputWeightsPin =
3742  ConvertOperationInputToConstTensorPin(operation, 4, model, data);
3743 
3744  // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3745  // [num_units, output_size].
3746  const ConstTensorPin recurrentToForgetWeightsPin =
3747  ConvertOperationInputToConstTensorPin(operation, 6, model, data);
3748 
3749  // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3750  // [num_units, output_size].
3751  const ConstTensorPin recurrentToCellWeightsPin =
3752  ConvertOperationInputToConstTensorPin(operation, 7, model, data);
3753 
3754  // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3755  // [num_units, output_size].
3756  const ConstTensorPin recurrentToOutputWeightsPin =
3757  ConvertOperationInputToConstTensorPin(operation, 8, model, data);
3758 
3759  // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3760  const ConstTensorPin forgetGateBiasPin =
3761  ConvertOperationInputToConstTensorPin(operation, 13, model, data);
3762 
3763  // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3764  const ConstTensorPin cellBiasPin =
3765  ConvertOperationInputToConstTensorPin(operation, 14, model, data);
3766 
3767  // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3768  const ConstTensorPin outputGateBiasPin =
3769  ConvertOperationInputToConstTensorPin(operation, 15, model, data);
3770 
3771  if (!inputToForgetWeightsPin.IsValid() ||
3772  !inputToCellWeightsPin.IsValid() ||
3773  !inputToOutputWeightsPin.IsValid() ||
3774  !recurrentToForgetWeightsPin.IsValid() ||
3775  !recurrentToCellWeightsPin.IsValid() ||
3776  !recurrentToOutputWeightsPin.IsValid() ||
3777  !forgetGateBiasPin.IsValid() ||
3778  !cellBiasPin.IsValid() ||
3779  !outputGateBiasPin.IsValid())
3780  {
3781  return Fail("%s: Operation has invalid tensor inputs", __func__);
3782  }
3783 
3784  // Get the optional input tensors:
3785 
3786  // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3787  // [num_units, input_size], where “num_units” corresponds to the number of cell units.
3788  const ConstTensorPin inputToInputWeightsPin =
3790  1,
3791  model,
3792  data,
3793  g_DontPermute,
3794  nullptr,
3795  true);
3796 
3797  // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3798  // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
3799  // “num_units”), or the second dimension of the “projection_weights”, if defined.
3800  const ConstTensorPin recurrentToInputWeightsPin =
3802  5,
3803  model,
3804  data,
3805  g_DontPermute,
3806  nullptr,
3807  true);
3808 
3809  // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
3810  // [num_units].
3811  const ConstTensorPin cellToInputWeightsPin =
3813  9,
3814  model,
3815  data,
3816  g_DontPermute,
3817  nullptr,
3818  true);
3819 
3820  // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
3821  // [num_units].
3822  const ConstTensorPin cellToForgetWeightsPin =
3824  10,
3825  model,
3826  data,
3827  g_DontPermute,
3828  nullptr,
3829  true);
3830 
3831  // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
3832  // [num_units].
3833  const ConstTensorPin cellToOutputWeightsPin =
3835  11,
3836  model,
3837  data,
3838  g_DontPermute,
3839  nullptr,
3840  true);
3841 
3842  // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3843  const ConstTensorPin inputGateBiasPin =
3845  12,
3846  model,
3847  data,
3848  g_DontPermute,
3849  nullptr,
3850  true);
3851 
3852  // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3853  // [output_size, num_units].
3854  const ConstTensorPin projectionWeightsPin =
3856  16,
3857  model,
3858  data,
3859  g_DontPermute,
3860  nullptr,
3861  true);
3862 
3863  // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [output_size].
3864  const ConstTensorPin projectionBiasPin =
3866  17,
3867  model,
3868  data,
3869  g_DontPermute,
3870  nullptr,
3871  true);
3872 
3873  if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional())
3874  || (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional())
3875  || (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional())
3876  || (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional())
3877  || (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional())
3878  || (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional())
3879  || (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional())
3880  || (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
3881  {
3882  return Fail("%s: Operation has invalid tensor inputs", __func__);
3883  }
3884 
3885 
3886  // Get the optional normalization tensors
3887 
3888  // 20: The input layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
3889  // Used to rescale normalized inputs to activation at input gate.
3890  const ConstTensorPin inputLayerNormWeightsPin =
3892  20,
3893  model,
3894  data,
3895  g_DontPermute,
3896  nullptr,
3897  true);
3898 
3899  // 21: The forget layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM
3900  // Used to rescale normalized inputs to activation at forget gate.
3901  const ConstTensorPin forgetLayerNormWeightsPin =
3903  21,
3904  model,
3905  data,
3906  g_DontPermute,
3907  nullptr,
3908  true);
3909 
3910  // 22: The cell layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
3911  // Used to rescale normalized inputs to activation at cell gate.
3912  const ConstTensorPin cellLayerNormWeightsPin =
3914  22,
3915  model,
3916  data,
3917  g_DontPermute,
3918  nullptr,
3919  true);
3920 
3921  // 23: The output layer normalization weights. A 1-D tensor of shape [num_units].
3922  // Used to rescale normalized inputs to activation at output gate.
3923  const ConstTensorPin outputLayerNormWeightsPin =
3925  23,
3926  model,
3927  data,
3928  g_DontPermute,
3929  nullptr,
3930  true);
3931 
3932  if ((!inputLayerNormWeightsPin.IsValid() && !inputLayerNormWeightsPin.IsOptional())
3933  || (!forgetLayerNormWeightsPin.IsValid() && !forgetLayerNormWeightsPin.IsOptional())
3934  || (!cellLayerNormWeightsPin.IsValid() && !cellLayerNormWeightsPin.IsOptional())
3935  || (!outputLayerNormWeightsPin.IsValid() && !outputLayerNormWeightsPin.IsOptional()))
3936  {
3937  return Fail("%s: Operation has invalid tensor inputs", __func__);
3938  }
3939 
3940  // Get the optional input scalars:
3941  // 24: The cell clip: If provided the cell state is clipped by this value prior to the cell output activation.
3942  // 25: The projection clip: If provided and projection is enabled, this is used for clipping the projected values.
3943 
3944  // Get the mandatory input scalars:
3945  // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
3946  // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
3947  // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
3948  // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
3949  // 30: The zero point of the hidden state, i.e. input to projection.
3950  // 31: The scale of the hidden state, i.e. input to projection.
3951  float cellClip, projClip, matMulInputGate, matMulForgetGate, matMulCellGate, matMulOutputGate, projInputScale;
3952  int projInputZeroPoint;
3953 
3954  if (!GetInputScalar(operation, 24, OperandType::FLOAT32, cellClip, model, data, true) ||
3955  !GetInputScalar(operation, 25, OperandType::FLOAT32, projClip, model, data, true) ||
3956  !GetInputScalar(operation, 26, OperandType::FLOAT32, matMulInputGate, model, data) ||
3957  !GetInputScalar(operation, 27, OperandType::FLOAT32, matMulForgetGate, model, data) ||
3958  !GetInputScalar(operation, 28, OperandType::FLOAT32, matMulCellGate, model, data) ||
3959  !GetInputScalar(operation, 29, OperandType::FLOAT32, matMulOutputGate, model, data) ||
3960  !GetInputScalar(operation, 30, OperandType::INT32, projInputZeroPoint, model, data) ||
3961  !GetInputScalar(operation, 31, OperandType::FLOAT32, projInputScale, model, data))
3962  {
3963  return Fail("%s: Operation has invalid scalar inputs", __func__);
3964  }
3965 
3966  // Outputs:
3967  // 0: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size,
3968  // output_size].
3969  const Operand* outputStateOut = GetOutputOperand(operation, 0, model);
3970  if (!outputStateOut)
3971  {
3972  return Fail("%s: Could not read output 0: outputStateOut", __func__);
3973  }
3974 
3975  // 1: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
3976  const Operand* cellStateOut = GetOutputOperand(operation, 1, model);
3977  if (!cellStateOut)
3978  {
3979  return Fail("%s: Could not read output 1: cellStateOut", __func__);
3980  }
3981 
3982  // 2: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size, output_size].
3983  // This is effectively the same as the current “output state (out)” value.
3984  const Operand* output = GetOutputOperand(operation, 2, model);
3985  if (!output)
3986  {
3987  return Fail("%s: Could not read output 2: output", __func__);
3988  }
3989 
3990  // set the params structure for the AddLstmLayer call
3991  LstmInputParams params;
3992  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
3993  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
3994  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
3995  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
3996  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
3997  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
3998  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
3999  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4000  params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
4001  params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
4002  params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
4003  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4004  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4005  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4006  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4007  params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
4008  params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
4009  params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
4010  params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
4011  params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
4012  params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
4013 
4014  // set the layer descriptor
4015  QLstmDescriptor desc;
4016  desc.m_CellClip = cellClip;
4017  desc.m_ProjectionClip = projClip;
4018  desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
4019  params.m_RecurrentToInputWeights == nullptr ||
4020  params.m_InputGateBias == nullptr);
4021  desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
4022  params.m_CellToOutputWeights != nullptr);
4023  desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
4024  desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
4025  params.m_ForgetLayerNormWeights != nullptr ||
4026  params.m_CellLayerNormWeights != nullptr ||
4027  params.m_OutputLayerNormWeights != nullptr);
4028  desc.m_InputIntermediateScale = matMulInputGate;
4029  desc.m_ForgetIntermediateScale = matMulForgetGate;
4030  desc.m_CellIntermediateScale = matMulCellGate;
4031  desc.m_OutputIntermediateScale = matMulOutputGate;
4032  desc.m_HiddenStateScale = projInputScale;
4033  desc.m_HiddenStateZeroPoint = projInputZeroPoint;
4034 
4035  // validate the optional input groups
4036  if (desc.m_CifgEnabled &&
4037  (params.m_InputToInputWeights != nullptr ||
4038  params.m_RecurrentToInputWeights != nullptr ||
4039  params.m_InputGateBias != nullptr))
4040  {
4041  return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
4042  " and input gate bias must be provided", __func__);
4043  }
4044 
4045  if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
4046  {
4047  return Fail("%s: projection bias should not be provided without projection weights", __func__);
4048  }
4049 
4050  if (desc.m_PeepholeEnabled &&
4051  (params.m_CellToForgetWeights == nullptr ||
4052  params.m_CellToOutputWeights == nullptr ||
4053  (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
4054  {
4055  return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
4056  " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
4057  }
4058 
4059  if (desc.m_LayerNormEnabled &&
4060  (params.m_ForgetLayerNormWeights == nullptr ||
4061  params.m_CellLayerNormWeights == nullptr ||
4062  params.m_OutputLayerNormWeights == nullptr ||
4063  (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
4064  {
4065  return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
4066  " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
4067  }
4068 
4069  // Basic parameters
4070  LstmInputParamsInfo paramsInfo;
4071  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4072  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4073  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4075  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4077  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4078  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4079  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4080 
4081  // Inputs
4082  const TensorInfo& inputInfo = input.GetTensorInfo();
4083  const TensorInfo& outputStatePrevTimeStepInfo = outputStatePrevTimeStep.GetTensorInfo();
4084  const TensorInfo& cellStatePrevTimeStepInfo = cellStatePrevTimeStep.GetTensorInfo();
4085 
4086  // Outputs
4087  TensorInfo outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
4088  TensorInfo outputInfo = GetTensorInfoForOperand(*output);
4089  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4090 
4091  // Optional parameters
4092  if (!desc.m_CifgEnabled)
4093  {
4094  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4096  if (desc.m_PeepholeEnabled)
4097  {
4098  paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
4099  }
4100  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4101  }
4102 
4103 
4104  if (desc.m_ProjectionEnabled)
4105  {
4106  paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
4107  if (params.m_ProjectionBias != nullptr)
4108  {
4109  paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
4110  }
4111  }
4112  else
4113  {
4114  // If Projection is disabled, override non-const outputs to change the quant info with hidden params, then
4115  // create a new const TensorInfo based on this
4116  outputStateOutInfo.SetQuantizationScale(projInputScale);
4117  outputStateOutInfo.SetQuantizationOffset(projInputZeroPoint);
4118  outputInfo.SetQuantizationScale(projInputScale);
4119  outputInfo.SetQuantizationOffset(projInputZeroPoint);
4120  }
4121 
4122  const TensorInfo constOutputStateOutInfo(outputStateOutInfo);
4123  const TensorInfo constOutputInfo(outputInfo);
4124 
4125  if (desc.m_PeepholeEnabled)
4126  {
4127  paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
4128  paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
4129  }
4130 
4131  if (desc.m_LayerNormEnabled)
4132  {
4133  if(!desc.m_CifgEnabled)
4134  {
4135  paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
4136  }
4137  paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
4138  paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
4139  paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
4140  }
4141 
4142  // Check if the layer is supported
4143  bool isSupported = false;
4144  armnn::BackendId setBackend;
4145  auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
4146  {
4147  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4148  IsQLstmSupported,
4149  data.m_Backends,
4150  isSupported,
4151  setBackend,
4152  inputInfo,
4153  outputStatePrevTimeStepInfo,
4154  cellStatePrevTimeStepInfo,
4155  constOutputStateOutInfo,
4156  cellStateOutInfo,
4157  constOutputInfo,
4158  desc,
4159  paramsInfo);
4160  };
4161 
4162  bool isDynamic = false;
4163  if (!IsDynamicTensor(constOutputStateOutInfo) &&
4164  !IsDynamicTensor(cellStateOutInfo) &&
4165  !IsDynamicTensor(constOutputInfo))
4166  {
4167  validateFunc(outputInfo, isSupported);
4168  }
4169  else
4170  {
4171  isDynamic = true;
4172  isSupported = AreDynamicTensorsSupported();
4173  }
4174 
4175  if (!isSupported)
4176  {
4177  return false;
4178  }
4179 
4180  // Add the layer
4181  IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
4182  layer->SetBackendId(setBackend);
4183 
4184  input.Connect(layer->GetInputSlot(0));
4185  outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
4186  cellStatePrevTimeStep.Connect(layer->GetInputSlot(2));
4187 
4188  if (!isDynamic)
4189  {
4191  operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4192  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
4193  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4194  }
4195  else
4196  {
4198  operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4200  operation, 1, *layer, 1, model, data, nullptr, validateFunc,
4201  ActivationFn::kActivationNone, true) &&
4202  SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4203  }
4204 }
4205 
4206 bool Converter::ConvertQuantized16BitLstm(const Operation& operation, const Model& model, ConversionData& data)
4207 {
4208  VLOG(DRIVER) << "Converter::ConvertQuantized16BitLstm()";
4209  VLOG(DRIVER) << "Policy::ConvertQuantized16BitLstm()";
4210 
4211  //Inputs:
4212  // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
4213  // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
4214  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4215  if (!input.IsValid())
4216  {
4217  return Fail("%s: Could not read input 0: input", __func__);
4218  }
4219 
4220  //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
4221  // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
4222  // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
4223  LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle(operation, 13, model, data);
4224  if (!previousCellStateIn.IsValid())
4225  {
4226  return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
4227  }
4228 
4229  // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4230  // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
4231  // is quantized with a fixed quantization range of -1, 127/128.
4232  LayerInputHandle previousOutputIn = ConvertToLayerInputHandle(operation, 14, model, data);
4233  if (!previousOutputIn.IsValid())
4234  {
4235  return Fail("%s: Could not read input 14: previousOutputIn", __func__);
4236  }
4237 
4238  // Get the input tensors:
4239  // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4240  // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
4241  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4242  const ConstTensorPin inputToInputWeightsPin =
4243  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
4244 
4245  // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4246  // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
4247  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4248  const ConstTensorPin inputToForgetWeightsPin =
4249  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
4250 
4251  // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4252  // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
4253  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4254  const ConstTensorPin inputToCellWeightsPin =
4255  ConvertOperationInputToConstTensorPin(operation, 3, model, data);
4256 
4257  // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4258  // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
4259  // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4260  const ConstTensorPin inputToOutputWeightsPin =
4261  ConvertOperationInputToConstTensorPin(operation, 4, model, data);
4262 
4263  // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4264  // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
4265  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4266  const ConstTensorPin recurrentToInputWeightsPin =
4267  ConvertOperationInputToConstTensorPin(operation, 5, model, data);
4268 
4269  // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4270  // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
4271  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4272  const ConstTensorPin recurrentToForgetWeightsPin =
4273  ConvertOperationInputToConstTensorPin(operation, 6, model, data);
4274 
4275  // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4276  // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
4277  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4278  const ConstTensorPin recurrentToCellWeightsPin =
4279  ConvertOperationInputToConstTensorPin(operation, 7, model, data);
4280 
4281  // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4282  // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
4283  // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4284  const ConstTensorPin recurrentToOutputWeightsPin =
4285  ConvertOperationInputToConstTensorPin(operation, 8, model, data);
4286 
4287  // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
4288  // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4289  // of input and weights scales and zeroPoint equal to 0.
4290  const ConstTensorPin inputGateBiasPin =
4291  ConvertOperationInputToConstTensorPin(operation, 9, model, data);
4292 
4293  // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4294  // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4295  // of input and weights scales and zeroPoint equal to 0.
4296  const ConstTensorPin forgetGateBiasPin =
4297  ConvertOperationInputToConstTensorPin(operation, 10, model, data);
4298 
4299  // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
4300  // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
4301  // and weights scales and zeroPoint equal to 0.
4302  const ConstTensorPin cellBiasPin =
4303  ConvertOperationInputToConstTensorPin(operation, 11, model, data);
4304 
4305  // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4306  // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4307  // of input and weights scales and zeroPoint equal to 0.
4308  const ConstTensorPin outputGateBiasPin =
4309  ConvertOperationInputToConstTensorPin(operation, 12, model, data);
4310 
4311  if (!inputToInputWeightsPin.IsValid() ||
4312  !inputToForgetWeightsPin.IsValid() ||
4313  !inputToCellWeightsPin.IsValid() ||
4314  !inputToOutputWeightsPin.IsValid() ||
4315  !recurrentToInputWeightsPin.IsValid() ||
4316  !recurrentToForgetWeightsPin.IsValid() ||
4317  !recurrentToCellWeightsPin.IsValid() ||
4318  !recurrentToOutputWeightsPin.IsValid() ||
4319  !inputGateBiasPin.IsValid() ||
4320  !forgetGateBiasPin.IsValid() ||
4321  !cellBiasPin.IsValid() ||
4322  !outputGateBiasPin.IsValid())
4323  {
4324  return Fail("%s: Operation has invalid tensor inputs", __func__);
4325  }
4326 
4327  // Outputs:
4328  // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
4329  // which contains a cell state from the current time step. Tensor is quantized using a quantization range
4330  // of -2^4, 2^4 * 32767/32768.
4331  const Operand* cellStateOut = GetOutputOperand(operation, 0, model);
4332  if (!cellStateOut)
4333  {
4334  return Fail("%s: Could not read output 0: cellStateOut", __func__);
4335  }
4336 
4337  // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
4338  // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
4339  const Operand* output = GetOutputOperand(operation, 1, model);
4340  if (!output)
4341  {
4342  return Fail("%s: Could not read output 1: output", __func__);
4343  }
4344 
4345  // Inputs
4346  const TensorInfo& inputInfo = input.GetTensorInfo();
4347  const TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
4348  const TensorInfo& previousOutputInInfo = previousOutputIn.GetTensorInfo();
4349 
4350  // Outputs
4351  const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4352  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4353 
4354  // Dynamic tensors currently not supported
4355  if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
4356  {
4357  return Fail("%s: Dynamic output tensors are not supported", __func__);
4358  }
4359 
4360  QuantizedLstmInputParams params;
4361 
4362  params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
4363  params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
4364  params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
4365  params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
4366  params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
4367  params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
4368  params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
4369  params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4370  params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4371  params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4372  params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4373  params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4374 
4375  QuantizedLstmInputParamsInfo paramsInfo;
4376  paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4377  paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4378  paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4379  paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4382  paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4384  paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4385  paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4386  paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4387  paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4388 
4389  bool isSupported = false;
4390  armnn::BackendId setBackend;
4391  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4392  {
4393  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4394  IsQuantizedLstmSupported,
4395  data.m_Backends,
4396  isSupported,
4397  setBackend,
4398  inputInfo,
4399  previousCellStateInInfo,
4400  previousOutputInInfo,
4401  cellStateOutInfo,
4402  outputInfo,
4403  paramsInfo);
4404  };
4405 
4406  bool isDynamic = false;
4407  if (!IsDynamicTensor(cellStateOutInfo) &&
4408  !IsDynamicTensor(outputInfo))
4409  {
4410  validateFunc(outputInfo, isSupported);
4411  }
4412  else
4413  {
4414  isDynamic = true;
4415  isSupported = AreDynamicTensorsSupported();
4416  }
4417 
4418  if (!isSupported)
4419  {
4420  return false;
4421  }
4422 
4423  IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
4424  layer->SetBackendId(setBackend);
4425  input.Connect(layer->GetInputSlot(0));
4426  previousCellStateIn.Connect(layer->GetInputSlot(1));
4427  previousOutputIn.Connect(layer->GetInputSlot(2));
4428 
4429  if (!isDynamic)
4430  {
4431  return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4432  SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data));
4433  }
4434  else
4435  {
4436  return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4438  operation, 1, *layer, 1, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
4439  }
4440 
4441 }
4442 
4443 bool Converter::ConvertRank(const Operation& operation, const Model& model, ConversionData& data)
4444 {
4445  VLOG(DRIVER) << "Converter::ConvertRank()";
4446 
4447  const Operand* inputOperand = GetInputOperand(operation, 0, model);
4448  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4449 
4450  if (inputOperand == nullptr || outputOperand == nullptr)
4451  {
4452  return Fail("%s: Operation has invalid inputs", __func__);
4453  }
4454 
4455  const Shape inputOperandShape = GetOperandShape(*inputOperand);
4456  const Shape outputOperandShape = GetOperandShape(*outputOperand);
4457 
4458  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4459  if (!input.IsValid())
4460  {
4461  return Fail("%s: Could not read input 0", __func__);
4462  }
4463 
4464  armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
4465  if (IsDynamicTensor(outInfo))
4466  {
4467  return Fail("%s: Dynamic output tensors are not supported", __func__);
4468  }
4469 
4470  bool isSupported = false;
4471  armnn::BackendId setBackend;
4472  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4473  IsRankSupported,
4474  data.m_Backends,
4475  isSupported,
4476  setBackend,
4477  input.GetTensorInfo(),
4478  outInfo);
4479  if (!isSupported)
4480  {
4481  return false;
4482  }
4483 
4484  armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
4485  layer->SetBackendId(setBackend);
4486  assert(layer != nullptr);
4487  input.Connect(layer->GetInputSlot(0));
4488 
4489  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, &outInfo);
4490 }
4491 
4492 bool Converter::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
4493 {
4494  VLOG(DRIVER) << "Converter::ConvertReLu()";
4497 
4498 
4499  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4500  if (!input.IsValid())
4501  {
4502  return Fail("%s: Input 0 is invalid", "operationName", __func__);
4503  }
4504 
4505  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4506  if (!outputOperand)
4507  {
4508  return false;
4509  }
4510 
4511  const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
4512 
4513  bool isSupported = false;
4514  armnn::BackendId setBackend;
4515  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
4516  {
4517  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4518  IsActivationSupported,
4519  data.m_Backends,
4520  isSupported,
4521  setBackend,
4522  input.GetTensorInfo(),
4523  outInfo,
4524  desc);
4525  };
4526 
4527  if(IsDynamicTensor(outInfo))
4528  {
4529  isSupported = AreDynamicTensorsSupported();
4530  }
4531  else
4532  {
4533  validateFunc(outInfo, isSupported);
4534  }
4535 
4536  if (!isSupported)
4537  {
4538  return false;
4539  }
4540 
4541  armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
4542  if (layer == nullptr)
4543  {
4544  throw armnn::NullPointerException("failed to add Activation Layer to network");
4545  }
4546  layer->SetBackendId(setBackend);
4547  input.Connect(layer->GetInputSlot(0));
4548 
4549  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4550 }
4551 
4552 bool Converter::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
4553 {
4554  VLOG(DRIVER) << "Converter::ConvertReLu1()";
4557  desc.m_A = 1.0f;
4558  desc.m_B = -1.0f;
4559 
4560  return ConvertToActivation(operation, __func__, desc, model, data);
4561 }
4562 
4563 bool Converter::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
4564 {
4565  VLOG(DRIVER) << "Converter::ConvertReLu6()";
4568  desc.m_A = 6.0f;
4569 
4570  return ConvertToActivation(operation, __func__, desc, model, data);
4571 }
4572 
4573 bool Converter::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
4574 {
4575  VLOG(DRIVER) << "Converter::ConvertReshape()";
4576 
4577  const Operand* inputOperand = GetInputOperand(operation, 0, model);
4578  const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
4579  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4580 
4581  if (inputOperand == nullptr
4582  || requestedShapeOperand == nullptr
4583  || outputOperand == nullptr)
4584  {
4585  return Fail("%s: Operation has invalid inputs", __func__);
4586  }
4587 
4588  if (requestedShapeOperand->dimensions.size() != 1)
4589  {
4590  return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
4591  __func__, requestedShapeOperand->dimensions.size());
4592  }
4593 
4594  std::vector<int32_t> targetDimensions;
4595  if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
4596  {
4597  return Fail("%s: Could not read values of input 1", __func__);
4598  }
4599 
4600  const Shape inputOperandShape = GetOperandShape(*inputOperand);
4601 
4602  Shape requestedShape;
4603  // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
4604  // function that resolves these values into a fully specified tensor shape.
4605  if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
4606  {
4607  return Fail("%s: Failed to resolve the requested shape", __func__);
4608  }
4609 
4610  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4611  if (!input.IsValid())
4612  {
4613  return Fail("%s: Could not read input 0", __func__);
4614  }
4615 
4616  armnn::ReshapeDescriptor reshapeDescriptor;
4617  reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
4618  requestedShape.dimensions.data());
4619 
4620  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
4621 
4622  bool isSupported = false;
4623  armnn::BackendId setBackend;
4624  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4625  {
4626  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4627  IsReshapeSupported,
4628  data.m_Backends,
4629  isSupported,
4630  setBackend,
4631  input.GetTensorInfo(),
4632  outputInfo,
4633  reshapeDescriptor);
4634  };
4635 
4636  if(!IsDynamicTensor(outputInfo))
4637  {
4638  validateFunc(outputInfo, isSupported);
4639  }
4640  else
4641  {
4642  isSupported = AreDynamicTensorsSupported();
4643  }
4644 
4645  if (!isSupported)
4646  {
4647  return false;
4648  }
4649 
4650  armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
4651  layer->SetBackendId(setBackend);
4652  assert(layer != nullptr);
4653  input.Connect(layer->GetInputSlot(0));
4654 
4655  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4656 }
4657 
4658 bool Converter::ConvertResize(const Operation& operation,
4659  const Model& model,
4660  ConversionData& data,
4661  ResizeMethod resizeMethod)
4662 {
4663  VLOG(DRIVER) << "Converter::ConvertResize()";
4664  VLOG(DRIVER) << "resizeMethod = " << GetResizeMethodAsCString(resizeMethod);
4665 
4666  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4667  if (!input.IsValid())
4668  {
4669  return Fail("%s: Could not read input 0", __func__);
4670  }
4671 
4672  const Operand* output = GetOutputOperand(operation, 0, model);
4673  if (!output)
4674  {
4675  return Fail("%s: Could not read output 0", __func__);
4676  }
4677 
4678  const TensorInfo& inputInfo = input.GetTensorInfo();
4679  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4680 
4681  ResizeDescriptor descriptor;
4682  descriptor.m_Method = resizeMethod;
4683  descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
4684 
4685  OperandType operandType1;
4686  OperandType operandType2;
4687 
4688  if (!GetOperandType(operation, 1, model, operandType1) ||
4689  !GetOperandType(operation, 2, model, operandType2))
4690  {
4691  return Fail("%s: Operation has invalid inputs", __func__);
4692  }
4693 
4694  if (operandType1 != operandType2)
4695  {
4696  return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
4697  }
4698 
4699  if (operandType1 == OperandType::INT32)
4700  {
4701  // Case 1: resizing by shape
4702  int32_t targetWidth = 0;
4703  int32_t targetHeight = 0;
4704 
4705  if (!GetInputInt32(operation, 1, targetWidth, model, data) ||
4706  !GetInputInt32(operation, 2, targetHeight, model, data))
4707  {
4708  return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
4709  }
4710 
4711  if (targetWidth < 0 || targetHeight < 0)
4712  {
4713  return Fail("%s: Operation has invalid inputs for resizing by shape. "
4714  "Target width/height cannot be < 0", __func__);
4715  }
4716 
4717  descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
4718  descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
4719  }
4720  else if (operandType1 == OperandType::FLOAT32)
4721  {
4722  // Case 2: resizing by scale
4723  float widthScale = 1.0f;
4724  float heightScale = 1.0f;
4725 
4726  if (!GetInputFloat32(operation, 1, widthScale, model, data) ||
4727  !GetInputFloat32(operation, 2, heightScale, model, data))
4728  {
4729  return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4730  }
4731 
4732  const TensorShape& inputShape = inputInfo.GetShape();
4733  armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4734 
4735  float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
4736  float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
4737 
4738  descriptor.m_TargetWidth = std::floor(width * widthScale);
4739  descriptor.m_TargetHeight = std::floor(height * heightScale);
4740  }
4741  else if (operandType1 == OperandType::FLOAT16)
4742  {
4743  Half widthScale;
4744  Half heightScale;
4745 
4746  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, widthScale, model, data) ||
4747  !GetInputScalar(operation, 2, OperandType::FLOAT16, heightScale, model, data))
4748  {
4749  return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4750  }
4751 
4752  const TensorShape& inputShape = inputInfo.GetShape();
4753  armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4754 
4755  Half width = static_cast<Half>(inputShape[dataLayoutIndexed.GetWidthIndex()]);
4756  Half height = static_cast<Half>(inputShape[dataLayoutIndexed.GetHeightIndex()]);
4757 
4758  descriptor.m_TargetWidth = std::floor(width * widthScale);
4759  descriptor.m_TargetHeight = std::floor(height * heightScale);
4760  }
4761  else
4762  {
4763  return Fail("%s: Operand has invalid data type for resizing by scale", __func__);
4764  }
4765 
4766  descriptor.m_AlignCorners = GetOptionalBool(operation, 4, model, data);
4767  descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data);
4768 
4769  bool isSupported = false;
4770  armnn::BackendId setBackend;
4771  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4772  {
4773  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4774  IsResizeSupported,
4775  data.m_Backends,
4776  isSupported,
4777  setBackend,
4778  inputInfo,
4779  outputInfo,
4780  descriptor);
4781  };
4782 
4783  if(IsDynamicTensor(outputInfo))
4784  {
4785  isSupported = AreDynamicTensorsSupported();
4786  }
4787  else
4788  {
4789  validateFunc(outputInfo, isSupported);
4790  }
4791 
4792  if (!isSupported)
4793  {
4794  return false;
4795  }
4796 
4797  IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
4798  layer->SetBackendId(setBackend);
4799  assert(layer != nullptr);
4800  input.Connect(layer->GetInputSlot(0));
4801 
4802  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4803 }
4804 
4805 bool Converter::ConvertReverseV2(const Operation& operation, const Model& model, ConversionData& data)
4806 {
4807  VLOG(DRIVER) << "Converter::ConvertReverseV2()";
4808 
4809  LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
4810  LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
4811  if (!input0.IsValid() || !input1.IsValid())
4812  {
4813  return Fail("%s: Operation has invalid inputs", __func__);
4814  }
4815  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
4816  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
4817 
4818  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4819  if (!outputOperand)
4820  {
4821  return Fail("%s: Could not read output 0", __func__);
4822  }
4823  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
4824 
4825  bool isSupported = false;
4826  armnn::BackendId setBackend;
4827  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4828  {
4829  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4830  IsReverseV2Supported,
4831  data.m_Backends,
4832  isSupported,
4833  setBackend,
4834  inputInfo0,
4835  inputInfo1,
4836  outputInfo);
4837  };
4838 
4839  if(!IsDynamicTensor(outputInfo))
4840  {
4841  validateFunc(outputInfo, isSupported);
4842  }
4843  else
4844  {
4845  isSupported = AreDynamicTensorsSupported();
4846  }
4847 
4848  if (!isSupported)
4849  {
4850  return false;
4851  }
4852 
4853  armnn::IConnectableLayer* const layer = data.m_Network->AddReverseV2Layer();
4854  layer->SetBackendId(setBackend);
4855  assert(layer != nullptr);
4856  input0.Connect(layer->GetInputSlot(0));
4857  input1.Connect(layer->GetInputSlot(1));
4858 
4859  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4860 }
4861 
4862 bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
4863 {
4864  VLOG(DRIVER) << "Converter::ConvertSpaceToBatchNd()";
4865 
4866  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4867  if(!input.IsValid())
4868  {
4869  return Fail("%s: Operation has invalid inputs", __func__);
4870  }
4871 
4872  const armnn::TensorInfo &inputInfo = input.GetTensorInfo();
4873  unsigned int rank = inputInfo.GetNumDimensions();
4874  unsigned int spatialDim = rank - 2;
4875 
4876  if(rank != 4)
4877  {
4878  Fail("%s: Only inputs with rank 4 are supported", __func__);
4879  }
4880 
4881  const Operand *output = GetOutputOperand(operation, 0, model);
4882  if(!output)
4883  {
4884  return Fail("%s: Could not read output 0", __func__);
4885  }
4886 
4887  const armnn::TensorInfo &outputInfo = GetTensorInfoForOperand(*output);
4888 
4889  const Operand *blockShapeOperand = GetInputOperand(operation, 1, model);
4890  const Operand *paddingsOperand = GetInputOperand(operation, 2, model);
4891 
4892  armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
4893  if(blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
4894  {
4895  return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
4896  }
4897 
4898  std::vector<int32_t> blockShape;
4899  if(!GetTensorInt32Values(*blockShapeOperand, blockShape, model, data))
4900  {
4901  return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
4902  }
4903  if(std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i)
4904  { return i < 1; }))
4905  {
4906  return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
4907  }
4908 
4909  armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
4910  if(paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
4911  {
4912  return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
4913  }
4914 
4915  std::vector<std::pair<unsigned int, unsigned int>> paddingList;
4916  std::vector<int32_t> paddings;
4917  if(!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
4918  {
4919  return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
4920  }
4921  for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
4922  {
4923  int paddingBeforeInput = paddings[i];
4924  int paddingAfterInput = paddings[i + 1];
4925  if(paddingBeforeInput < 0 || paddingAfterInput < 0)
4926  {
4927  return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
4928  }
4929 
4930  paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
4931  }
4932 
4935  descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
4936  descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
4937 
4938  if(Is12OrLaterOperand(*output))
4939  {
4940  descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
4941  }
4942 
4943  bool isSupported = false;
4944  armnn::BackendId setBackend;
4945  auto validateFunc = [&](const armnn::TensorInfo &outputInfo, bool &isSupported)
4946  {
4947  FORWARD_LAYER_SUPPORT_FUNC(__func__,
4948  IsSpaceToBatchNdSupported,
4949  data.m_Backends,
4950  isSupported,
4951  setBackend,
4952  inputInfo,
4953  outputInfo,
4954  descriptor);
4955  };
4956 
4957  if(IsDynamicTensor(outputInfo))
4958  {
4959  isSupported = AreDynamicTensorsSupported();
4960  } else
4961  {
4962  validateFunc(outputInfo, isSupported);
4963  }
4964 
4965  if(!isSupported)
4966  {
4967  return false;
4968  }
4969 
4970  armnn::IConnectableLayer *const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
4971  layer->SetBackendId(setBackend);
4972  assert(layer != nullptr);
4973  input.Connect(layer->GetInputSlot(0));
4974 
4975  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4976 }
4977 
4978 bool Converter::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
4979 {
4980  VLOG(DRIVER) << "Converter::ConvertSpaceToDepth()";
4981 
4982  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4983  if (!input.IsValid() )
4984  {
4985  return Fail("%s: Operation has invalid inputs", __func__);
4986  }
4987 
4988  const TensorInfo& inputInfo = input.GetTensorInfo();
4989  unsigned int rank = inputInfo.GetNumDimensions();
4990  if (rank != 4)
4991  {
4992  return Fail("%s: Only inputs with rank 4 are supported", __func__);
4993  }
4994 
4995  const Operand* output = GetOutputOperand(operation, 0, model);
4996  if (!output)
4997  {
4998  return Fail("%s: Could not read output 0", __func__);
4999  }
5000 
5001  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5002 
5004 
5005  GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
5006 
5007  if (desc.m_BlockSize <= 1)
5008  {
5009  return Fail("%s: Block size must be at least 1 in all dimensions", __func__);
5010  }
5011 
5012  desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
5013 
5014  bool isSupported = false;
5015  armnn::BackendId setBackend;
5016  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5017  {
5018  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5019  IsSpaceToDepthSupported,
5020  data.m_Backends,
5021  isSupported,
5022  setBackend,
5023  inputInfo,
5024  outputInfo,
5025  desc);
5026  };
5027 
5028  if(IsDynamicTensor(outputInfo))
5029  {
5030  isSupported = AreDynamicTensorsSupported();
5031  }
5032  else
5033  {
5034  validateFunc(outputInfo, isSupported);
5035  }
5036 
5037  if (!isSupported)
5038  {
5039  return false;
5040  }
5041 
5042  IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
5043  layer->SetBackendId(setBackend);
5044  assert(layer != nullptr);
5045  input.Connect(layer->GetInputSlot(0));
5046 
5047  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5048 }
5049 
5050 bool Converter::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
5051 {
5052  VLOG(DRIVER) << "Converter::ConvertSoftmax()";
5053 
5054  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5055  if (!input.IsValid())
5056  {
5057  return Fail("%s: Operation has invalid inputs", __func__);
5058  }
5059 
5060  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
5061  if (!outputOperand)
5062  {
5063  return Fail("%s: Operation has no outputs", __func__);
5064  }
5065 
5066  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
5067 
5068  SoftmaxDescriptor desc;
5069  OperandType outputType = outputOperand->type;
5070 
5071  // Read beta value
5072  if (outputType == OperandType::TENSOR_FLOAT16)
5073  {
5074  Half value;
5075 
5076  if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
5077  {
5078  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5079  }
5080 
5081  desc.m_Beta = static_cast<float>(value);
5082  }
5083  else
5084  {
5085  if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
5086  {
5087  return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5088  }
5089  }
5090 
5091  if (operation.inputs.size() > 2 && !GetInputScalar(operation,
5092  2,
5093  OperandType::INT32,
5094  desc.m_Axis,
5095  model,
5096  data))
5097  {
5098  return Fail("%s: Operation has invalid inputs", __func__);
5099  }
5100 
5101  bool isSupported = false;
5102  armnn::BackendId setBackend;
5103  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5104  {
5105  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5106  IsSoftmaxSupported,
5107  data.m_Backends,
5108  isSupported,
5109  setBackend,
5110  input.GetTensorInfo(),
5111  outputInfo,
5112  desc);
5113  };
5114 
5115  if(IsDynamicTensor(outputInfo))
5116  {
5117  isSupported = AreDynamicTensorsSupported();
5118  }
5119  else
5120  {
5121  validateFunc(outputInfo, isSupported);
5122  }
5123 
5124  if (!isSupported)
5125  {
5126  return false;
5127  }
5128 
5129  IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
5130  layer->SetBackendId(setBackend);
5131  assert(layer != nullptr);
5132  input.Connect(layer->GetInputSlot(0));
5133 
5134  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5135 }
5136 
5137 bool Converter::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
5138 {
5139  VLOG(DRIVER) << "Converter::ConvertTanH()";
5140 
5143  desc.m_A = 1.0f; // android nn does not support tanH parameters
5144  desc.m_B = 1.0f; // set to 1.0f for unity scaling
5145 
5146  return ConvertToActivation(operation, __func__, desc, model, data);
5147 }
5148 
5149 bool Converter::ConvertTile(const Operation& operation, const Model& model, ConversionData& data)
5150 {
5151  VLOG(DRIVER) << "Converter::ConvertTile()";
5152 
5153  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5154  if (!input.IsValid())
5155  {
5156  return Fail("%s: Operation has invalid inputs", __func__);
5157  }
5158  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5159 
5160  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
5161  if (!outputOperand)
5162  {
5163  return Fail("%s: Operation has no outputs", __func__);
5164  }
5165  const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
5166 
5167  const Operand* multiplesOperand = GetInputOperand(operation, 1, model);
5168  if (!multiplesOperand)
5169  {
5170  return Fail("%s: Could not read input 1", __func__);
5171  }
5172  std::vector<int32_t> multiples;
5173  if (!GetTensorInt32Values(*multiplesOperand, multiples, model, data))
5174  {
5175  return Fail("%s: Input 1 has invalid values", __func__);
5176  }
5177 
5178  TileDescriptor descriptor;
5179  descriptor.m_Multiples.assign(multiples.begin(), multiples.end());
5180 
5181  bool isSupported = false;
5182  armnn::BackendId setBackend;
5183  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5184  {
5185  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5186  IsTileSupported,
5187  data.m_Backends,
5188  isSupported,
5189  setBackend,
5190  inputInfo,
5191  outputInfo,
5192  descriptor);
5193  };
5194 
5195  if(IsDynamicTensor(outputInfo))
5196  {
5197  isSupported = AreDynamicTensorsSupported();
5198  }
5199  else
5200  {
5201  validateFunc(outputInfo, isSupported);
5202  }
5203 
5204  if (!isSupported)
5205  {
5206  return false;
5207  }
5208 
5209  IConnectableLayer* const layer = data.m_Network->AddTileLayer(descriptor);
5210  layer->SetBackendId(setBackend);
5211  assert(layer != nullptr);
5212  input.Connect(layer->GetInputSlot(0));
5213 
5214  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5215 }
5216 
5217 bool Converter::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
5218 {
5219  VLOG(DRIVER) << "Converter::ConvertTransposeConv2d()";
5220 
5221  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5222 
5223  if (!input.IsValid())
5224  {
5225  return Fail("%s: Operation has invalid inputs", __func__);
5226  }
5227 
5228  const Operand* output = GetOutputOperand(operation, 0, model);
5229 
5230  if (!output)
5231  {
5232  return Fail("%s: Could not read output 0", __func__);
5233  }
5234 
5235  const TensorInfo& inputInfo = input.GetTensorInfo();
5236  const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5237 
5238  // ArmNN does not currently support non-fixed weights or bias
5239  // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
5240  const Operand* weightsOperand = GetInputOperand(operation, 1, model);
5241 
5242  if (weightsOperand == nullptr)
5243  {
5244  return Fail("%s: Operand is invalid", __func__);
5245  }
5247  desc.m_DataLayout = DataLayout::NHWC;
5248 
5249  // Determine whether padding is implicit or explicit
5250  bool implicitPadding = operation.inputs.size() == 9;
5251 
5252  if (implicitPadding )
5253  {
5254  desc.m_DataLayout = OptionalDataLayout(operation, 8, model, data);
5255  }
5256  else
5257  {
5258  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
5259  }
5260 
5261  armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
5262  unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
5263  unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
5264 
5265  const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
5266 
5267  // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
5268  // We have to permute it to OIHW if the data layout is NCHW.
5269  const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
5271  model, data, OHWIToOIHW) :
5272  ConvertOperationInputToConstTensorPin(operation, 1, model, data);
5273 
5274  // Bias is a 1D tensor
5275  const ConstTensorPin biasPin =
5276  ConvertOperationInputToConstTensorPin(operation, 2, model, data);
5277 
5278  if (!weightsPin.IsValid())
5279  {
5280  return Fail("%s: Operation has invalid weights", __func__);
5281  }
5282 
5283  if (!biasPin.IsValid())
5284  {
5285  return Fail("%s: Operation has invalid biases", __func__);
5286  }
5287 
5288  ConstTensor weights = weightsPin.GetConstTensor();
5289  ConstTensor bias = biasPin.GetConstTensor();
5290  SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
5291 
5292  ActivationFn activation;
5293 
5294  if (implicitPadding)
5295  {
5296  int32_t strideX{0};
5297  int32_t strideY{0};
5298  int32_t padLeft{0};
5299  int32_t padRight{0};
5300  int32_t padTop{0};
5301  int32_t padBottom{0};
5302 
5303  ::android::nn::PaddingScheme paddingScheme;
5304  if (!GetInputPaddingScheme(operation, 4, paddingScheme, model, data) ||
5305  !GetInputScalar(operation, 5, OperandType::INT32, strideX, model, data) ||
5306  !GetInputScalar(operation, 6, OperandType::INT32, strideY, model, data) ||
5307  !GetInputActivationFunction(operation, 7, activation, model, data))
5308  {
5309  return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
5310  }
5311 
5312  const uint32_t kernelX = weights.GetShape()[widthIndex];
5313  const uint32_t kernelY = weights.GetShape()[heightIndex];
5314 
5315  // If output shape has been specified as a parameter then extract it and make it available.
5316  const Operand* outputShapeOperand = GetInputOperand(operation, 3, model, false);
5317  std::vector<int32_t> outputShape;
5318  if ((outputShapeOperand) && (GetTensorInt32Values(*outputShapeOperand, outputShape, model, data)))
5319  {
5320  // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
5321  for (int dimension : outputShape)
5322  {
5323  desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
5324  }
5325  desc.m_OutputShapeEnabled = true;
5326  }
5327 
5328  uint32_t outputX;
5329  uint32_t outputY;
5330 
5331  if (IsDynamicTensor(outputInfo))
5332  {
5333  if (outputShape.size() == 0)
5334  {
5335  return Fail("%s: Padding sizes cannot be inferred", __func__);
5336  }
5337 
5338  outputX = outputShape[widthIndex];
5339  outputY = outputShape[heightIndex];
5340  }
5341  else
5342  {
5343  outputX = outputInfo.GetShape()[widthIndex];
5344  outputY = outputInfo.GetShape()[heightIndex];
5345  }
5346 
5347  CalcPaddingTransposeConv(outputX, kernelX, strideX, padLeft, padRight, paddingScheme);
5348  CalcPaddingTransposeConv(outputY, kernelY, strideY, padTop, padBottom, paddingScheme);
5349 
5350  // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
5351  // but Arm NN only supports values >= 0
5352  if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
5353  {
5354  return Fail("%s: Negative padding values are not supported", __func__);
5355  }
5356 
5357  desc.m_StrideX = armnn::numeric_cast<uint32_t>(strideX);
5358  desc.m_StrideY = armnn::numeric_cast<uint32_t>(strideY);
5359  desc.m_PadLeft = armnn::numeric_cast<uint32_t>(padLeft);
5360  desc.m_PadRight = armnn::numeric_cast<uint32_t>(padRight);
5361  desc.m_PadTop = armnn::numeric_cast<uint32_t>(padTop);
5362  desc.m_PadBottom = armnn::numeric_cast<uint32_t>(padBottom);
5363  }
5364  else if (operation.inputs.size() == 11)
5365  {
5366  // explicit padding
5367  if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
5368  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
5369  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
5370  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
5371  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
5372  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
5373  !GetInputActivationFunction(operation, 9, activation, model, data))
5374  {
5375  return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
5376  }
5377  }
5378  else
5379  {
5380  return Fail("%s: Unsupported number of operation inputs", __func__);
5381  }
5382 
5383  desc.m_BiasEnabled = true;
5384  Optional<TensorInfo> biases(bias.GetInfo());
5385 
5386  bool isSupported = false;
5387  armnn::BackendId setBackend;
5388  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5389  {
5390  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5391  IsTransposeConvolution2dSupported,
5392  data.m_Backends,
5393  isSupported,
5394  setBackend,
5395  inputInfo,
5396  outputInfo,
5397  desc,
5398  weights.GetInfo(),
5399  biases);
5400  };
5401 
5402  if(IsDynamicTensor(outputInfo))
5403  {
5404  isSupported = AreDynamicTensorsSupported();
5405  }
5406  else
5407  {
5408  validateFunc(outputInfo, isSupported);
5409  }
5410  if (!isSupported)
5411  {
5412  return false;
5413  }
5414 
5415  IConnectableLayer* startLayer =
5416  data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
5417  startLayer->SetBackendId(setBackend);
5418  if (!startLayer)
5419  {
5420  return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
5421  }
5422 
5423  input.Connect(startLayer->GetInputSlot(0));
5424 
5425  return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
5426  data, nullptr, validateFunc, activation);
5427 }
5428 
5429 bool Converter::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
5430 {
5431  VLOG(DRIVER) << "Converter::ConvertSqrt()";
5432  ActivationDescriptor desc;
5433  desc.m_Function = ActivationFunction::Sqrt;
5434 
5435  return ::ConvertToActivation(operation, __func__, desc, model, data);
5436 }
5437 
5438 bool Converter::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
5439 {
5440  VLOG(DRIVER) << "Converter::ConvertSqueeze()";
5441 
5442  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5443  if (!input.IsValid())
5444  {
5445  return Fail("%s: Operation has invalid inputs", __func__);
5446  }
5447 
5448  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5449  unsigned int rank = inputInfo.GetNumDimensions();
5450  if (rank > 4)
5451  {
5452  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5453  }
5454 
5455  const Operand* output = GetOutputOperand(operation, 0, model);
5456  if (!output)
5457  {
5458  return Fail("%s: Could not read output 0", __func__);
5459  }
5460 
5462  {
5463  return Fail("%s: Dynamic output tensors are not supported", __func__);
5464  }
5465 
5466  // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
5467  // if the operand index is out of bounds.
5468  const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
5469 
5470  const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
5471 
5472  std::vector<int32_t> axis;
5473  if (!axisOperand)
5474  {
5475  axis.assign(dimensionSequence,
5476  dimensionSequence + rank);
5477  }
5478  else if (!GetTensorInt32Values(*axisOperand, axis, model, data))
5479  {
5480  return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
5481  }
5482 
5483  std::vector<uint32_t> outputDims;
5484  for (unsigned int i = 0; i < rank; i++)
5485  {
5486  bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
5487  auto currentDimension = inputInfo.GetShape()[i];
5488  if (skipSqueeze || currentDimension != 1)
5489  {
5490  outputDims.push_back(currentDimension);
5491  }
5492  }
5493 
5494  armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
5495 
5496  armnn::TensorInfo outputInfo = inputInfo;
5497  outputInfo.SetShape(outShape);
5498 
5499  armnn::ReshapeDescriptor reshapeDesc;
5500  reshapeDesc.m_TargetShape = outputInfo.GetShape();
5501 
5502  bool isSupported = false;
5503  armnn::BackendId setBackend;
5504  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5505  IsReshapeSupported,
5506  data.m_Backends,
5507  isSupported,
5508  setBackend,
5509  inputInfo,
5510  outputInfo,
5511  reshapeDesc);
5512 
5513  if (!isSupported)
5514  {
5515  return false;
5516  }
5517 
5518  armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
5519  layer->SetBackendId(setBackend);
5520  assert(layer != nullptr);
5521  input.Connect(layer->GetInputSlot(0));
5522 
5523  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
5524 }
5525 
5526 bool Converter::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
5527 {
5528  VLOG(DRIVER) << "Converter::ConvertStridedSlice()";
5529 
5530  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5531  if (!input.IsValid())
5532  {
5533  return Fail("%s: Operation has invalid inputs", __func__);
5534  }
5535 
5536  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5537  unsigned int rank = inputInfo.GetNumDimensions();
5538  if (rank > 4)
5539  {
5540  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5541  }
5542 
5543  const Operand* output = GetOutputOperand(operation, 0, model);
5544  if (!output)
5545  {
5546  return Fail("%s: Could not read output 0", __func__);
5547  }
5548 
5549  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5550 
5551  const Operand* beginOperand = GetInputOperand(operation, 1, model);
5552  const Operand* endOperand = GetInputOperand(operation, 2, model);
5553  const Operand* stridesOperand = GetInputOperand(operation, 3, model);
5554 
5555  std::vector<int32_t> beginValues;
5556  std::vector<int32_t> endValues;
5557  std::vector<int32_t> stridesValues;
5558 
5559  // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
5560  auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
5561  {
5562  if (!GetTensorInt32Values(operand, operandValues, model, data))
5563  {
5564  return false;
5565  }
5566 
5567  if (operandValues.size() != rank)
5568  {
5569  return false;
5570  }
5571 
5572  return true;
5573  };
5574 
5575  if (!ValidateInputOperands(*beginOperand, beginValues)
5576  || !ValidateInputOperands(*endOperand, endValues)
5577  || !ValidateInputOperands(*stridesOperand, stridesValues))
5578  {
5579  return Fail("%s: Operation has invalid input operand", __func__);
5580  }
5581 
5582  // Stride cannot have value '0'
5583  if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
5584  {
5585  return Fail("%s: Stride must be non-zero value.", __func__);
5586  }
5587 
5588  armnn::StridedSliceDescriptor descriptor;
5589  descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
5590  descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
5591  descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
5593 
5594  // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
5595  if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) ||
5596  !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) ||
5597  !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
5598  {
5599  return Fail("%s: Operation has invalid inputs", __func__);
5600  }
5601 
5602  bool isSupported = false;
5603  armnn::BackendId setBackend;
5604  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5605  {
5606  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5607  IsStridedSliceSupported,
5608  data.m_Backends,
5609  isSupported,
5610  setBackend,
5611  inputInfo,
5612  outputInfo,
5613  descriptor);
5614  };
5615 
5616  if(IsDynamicTensor(outputInfo))
5617  {
5618  isSupported = AreDynamicTensorsSupported();
5619  }
5620  else
5621  {
5622  validateFunc(outputInfo, isSupported);
5623  }
5624 
5625  if (!isSupported)
5626  {
5627  return false;
5628  }
5629 
5630  // Check if slice can fit in a inferred output
5631  armnn::TensorShape inputShape = inputInfo.GetShape();
5632  for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
5633  {
5634  int stride = descriptor.m_Stride[i];
5635 
5636  if (descriptor.m_ShrinkAxisMask & (1 << i))
5637  {
5638  // If the difference between the start point and the end point of the slice on an axis being shrunk
5639  // is greater than 1 then throw an error as the output will not be large enough to hold the slice
5640  if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
5641  || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
5642  {
5643  return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
5644  }
5645 
5646  if(stride < 0)
5647  {
5648  return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
5649  }
5650  }
5651  }
5652 
5653  armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
5654  layer->SetBackendId(setBackend);
5655  assert(layer != nullptr);
5656  input.Connect(layer->GetInputSlot(0));
5657 
5658  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5659 }
5660 
5661 bool Converter::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
5662 {
5663  VLOG(DRIVER) << "Converter::ConvertTranspose()";
5664 
5665  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5666  if (!input.IsValid())
5667  {
5668  return Fail("%s: Operation has invalid inputs", __func__);
5669  }
5670 
5671  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5672  unsigned int rank = inputInfo.GetNumDimensions();
5673  if (rank > 4)
5674  {
5675  Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5676  }
5677 
5678  // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
5679  // if the operand index is out of bounds.
5680  const Operand* permOperand = GetInputOperand(operation, 1, model, false);
5681 
5682  std::vector<int32_t> perm(rank);
5683  if (!permOperand || (permOperand->lifetime == OperandLifeTime::NO_VALUE))
5684  {
5685  for (unsigned int i = rank; i > 0; i--)
5686  {
5687  perm[rank - i] = armnn::numeric_cast<int> (i - 1);
5688  }
5689  }
5690  else if (!GetTensorInt32Values(*permOperand, perm, model, data))
5691  {
5692  return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
5693  }
5694 
5695  std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
5696 
5697  armnn::TransposeDescriptor transposeDesc;
5698  transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
5699 
5700  const Operand* output = GetOutputOperand(operation, 0, model);
5701  if (!output)
5702  {
5703  return Fail("%s: Could not read output 0", __func__);
5704  }
5705 
5706  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5707 
5708  bool isSupported = false;
5709  armnn::BackendId setBackend;
5710  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5711  {
5712  FORWARD_LAYER_SUPPORT_FUNC(__func__,
5713  IsTransposeSupported,
5714  data.m_Backends,
5715  isSupported,
5716  setBackend,
5717  inputInfo,
5718  outputInfo,
5719  transposeDesc);
5720  };
5721 
5722  if(IsDynamicTensor(outputInfo))
5723  {
5724  isSupported = AreDynamicTensorsSupported();
5725  }
5726  else
5727  {
5728  validateFunc(outputInfo, isSupported);
5729  }
5730 
5731  if (!isSupported)
5732  {
5733  return false;
5734  }
5735 
5736  armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
5737  layer->SetBackendId(setBackend);
5738  assert(layer != nullptr);
5739  input.Connect(layer->GetInputSlot(0));
5740 
5741  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5742 }
5743 
5744 } // namespace armnn_driver
armnn::Convolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:570
armnn::InstanceNormalizationDescriptor::m_Beta
float m_Beta
Beta, the offset scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:867
armnn::GetBinaryOperationAsCString
constexpr char const * GetBinaryOperationAsCString(BinaryOperation operation)
Definition: TypesUtils.hpp:76
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn_driver::GetOptionalInputActivation
bool GetOptionalInputActivation(const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:886
armnn::LstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:45
armnn::LstmInputParamsInfo::m_InputToInputWeights
const TensorInfo * m_InputToInputWeights
Definition: LstmParams.hpp:89
armnn::BinaryOperation::Mul
@ Mul
armnn::FullyConnectedDescriptor::m_ConstantWeights
bool m_ConstantWeights
Enable/disable constant weights and biases.
Definition: Descriptors.hpp:530
armnn::ViewsDescriptor
A ViewsDescriptor for the SplitterLayer.
Definition: Descriptors.hpp:244
armnn::LstmInputParams::m_OutputLayerNormWeights
const ConstTensor * m_OutputLayerNormWeights
Definition: LstmParams.hpp:60
armnn::QLstmDescriptor::m_ForgetIntermediateScale
float m_ForgetIntermediateScale
Forget intermediate quantization scale.
Definition: Descriptors.hpp:1428
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::TransposeConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:1469
armnn::BatchMatMulDescriptor::m_TransposeX
bool m_TransposeX
Transpose the slices of each input tensor Transpose and Adjoint can not both be set to true for the s...
Definition: Descriptors.hpp:1612
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:843
armnn_driver::Converter::ConvertOperation
static bool ConvertOperation(const Operation &operation, const Model &model, ConversionData &data)
Definition: Converter.cpp:22
armnn::FullyConnectedDescriptor
A FullyConnectedDescriptor for the FullyConnectedLayer.
Definition: Descriptors.hpp:507
armnn::BinaryOperation::Add
@ Add
armnn::GetLogicalBinaryOperationAsCString
constexpr char const * GetLogicalBinaryOperationAsCString(LogicalBinaryOperation operation)
Definition: TypesUtils.hpp:109
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:307
armnn::QLstmDescriptor
A QLstmDescriptor for the QLstmLayer.
Definition: Descriptors.hpp:1380
armnn::TransposeConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:1477
armnn::Optional
Definition: Optional.hpp:270
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::QuantizedLstmInputParamsInfo::m_InputToCellWeights
const TensorInfo * m_InputToCellWeights
Definition: QuantizedLstmParams.hpp:141
armnn::GetResizeMethodAsCString
constexpr const char * GetResizeMethodAsCString(ResizeMethod method)
Definition: TypesUtils.hpp:285
armnn::QLstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1422
armnn::ResizeDescriptor::m_HalfPixelCenters
bool m_HalfPixelCenters
Half Pixel Centers.
Definition: Descriptors.hpp:1018
armnn::ResizeMethod
ResizeMethod
Definition: Types.hpp:166
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::LstmInputParamsInfo::m_InputLayerNormWeights
const TensorInfo * m_InputLayerNormWeights
Definition: LstmParams.hpp:106
armnn::SpaceToBatchNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1071
armnn::FullyConnectedDescriptor::m_TransposeWeightMatrix
bool m_TransposeWeightMatrix
Enable/disable transpose weight matrix.
Definition: Descriptors.hpp:528
armnn::QuantizedLstmInputParamsInfo::m_ForgetGateBias
const TensorInfo * m_ForgetGateBias
Definition: QuantizedLstmParams.hpp:150
armnn::ResizeDescriptor::m_TargetHeight
uint32_t m_TargetHeight
Target height value.
Definition: Descriptors.hpp:1009
armnn::DepthwiseConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:708
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn::StridedSliceDescriptor::m_Begin
std::vector< int > m_Begin
Begin values for the input that will be sliced.
Definition: Descriptors.hpp:1342
armnn::LstmInputParams::m_ProjectionBias
const ConstTensor * m_ProjectionBias
Definition: LstmParams.hpp:56
armnn::LstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: LstmParams.hpp:46
armnn::DataLayout::NHWC
@ NHWC
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:46
armnn::QuantizedLstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: QuantizedLstmParams.hpp:36
armnn::LstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: LstmParams.hpp:53
armnn::ResizeDescriptor
A ResizeDescriptor for the ResizeLayer.
Definition: Descriptors.hpp:985
armnn::ArgMinMaxDescriptor
An ArgMinMaxDescriptor for ArgMinMaxLayer.
Definition: Descriptors.hpp:67
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:491
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn_driver::ConvertPooling2d
bool ConvertPooling2d(const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:392
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:33
armnn::InstanceNormalizationDescriptor
An InstanceNormalizationDescriptor for InstanceNormalizationLayer.
Definition: Descriptors.hpp:847
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:751
armnn::SoftmaxDescriptor::m_Beta
float m_Beta
Exponentiation value.
Definition: Descriptors.hpp:190
armnn::GatherDescriptor
A GatherDescriptor for the GatherLayer.
Definition: Descriptors.hpp:965
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
armnn::DepthwiseConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:710
armnn::L2NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:824
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToOutputWeights
const TensorInfo * m_RecurrentToOutputWeights
Definition: QuantizedLstmParams.hpp:147
armnn::L2NormalizationDescriptor
A L2NormalizationDescriptor for the L2NormalizationLayer.
Definition: Descriptors.hpp:809
armnn::NormalizationAlgorithmMethod::LocalBrightness
@ LocalBrightness
Krichevsky 2012: Local Brightness Normalization.
armnn::NormalizationDescriptor::m_Beta
float m_Beta
Beta value for the normalization equation.
Definition: Descriptors.hpp:801
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:216
armnn_driver::isQuantizedOperand
bool isQuantizedOperand(const OperandType &operandType)
Definition: CanonicalUtils.cpp:510
armnn::NormalizationDescriptor
A NormalizationDescriptor for the NormalizationLayer.
Definition: Descriptors.hpp:769
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:197
armnn::QuantizedLstmInputParamsInfo::m_CellBias
const TensorInfo * m_CellBias
Definition: QuantizedLstmParams.hpp:151
armnn::BatchToSpaceNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape values.
Definition: Descriptors.hpp:898
armnn::LstmInputParamsInfo::m_OutputGateBias
const TensorInfo * m_OutputGateBias
Definition: LstmParams.hpp:103
armnnUtils::DataLayoutIndexed
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
Definition: DataLayoutIndexed.hpp:17
armnn_driver::ConversionData
Definition: ConversionUtils.hpp:53
armnn_driver::Converter::Model
::android::nn::Model Model
Definition: Converter.hpp:24
armnn_driver::GetOptionalBool
bool GetOptionalBool(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:933
armnn::ResizeDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1014
armnn::ActivationFunction::TanH
@ TanH
armnn::LstmInputParamsInfo::m_CellToInputWeights
const TensorInfo * m_CellToInputWeights
Definition: LstmParams.hpp:97
armnn::QuantizedLstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: QuantizedLstmParams.hpp:38
armnn::DepthwiseConvolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:692
armnn::LstmInputParamsInfo::m_CellLayerNormWeights
const TensorInfo * m_CellLayerNormWeights
Definition: LstmParams.hpp:108
armnn::Convolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:576
armnn::QuantizedLstmInputParams::m_RecurrentToForgetWeights
const ConstTensor * m_RecurrentToForgetWeights
Definition: QuantizedLstmParams.hpp:39
armnn::ConcatDescriptor
OriginsDescriptor ConcatDescriptor
Definition: DescriptorsFwd.hpp:59
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:155
armnn_driver::Half
half_float::half Half
Definition: Converter.cpp:15
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:688
armnn::ArgMinMaxDescriptor::m_Function
ArgMinMaxFunction m_Function
Specify if the function is to find Min or Max.
Definition: Descriptors.hpp:81
armnn_driver::Operation
::android::nn::Operation Operation
Definition: ConversionUtils.hpp:49
armnn::SpaceToBatchNdDescriptor::m_BlockShape
std::vector< unsigned int > m_BlockShape
Block shape value.
Definition: Descriptors.hpp:1066
armnn::StridedSliceDescriptor::m_BeginMask
int32_t m_BeginMask
Begin mask value.
Definition: Descriptors.hpp:1350
armnn::LstmInputParamsInfo::m_RecurrentToCellWeights
const TensorInfo * m_RecurrentToCellWeights
Definition: LstmParams.hpp:95
armnn::LstmInputParams::m_CellToOutputWeights
const ConstTensor * m_CellToOutputWeights
Definition: LstmParams.hpp:50
armnn::LstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: LstmParams.hpp:42
armnn_driver::ConvertToActivation
bool ConvertToActivation(const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:604
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn_driver::LayerInputHandle::SanitizeQuantizationScale
void SanitizeQuantizationScale(LayerInputHandle &weight, LayerInputHandle &input)
Definition: ConversionUtils.cpp:62
armnn::QLstmDescriptor::m_InputIntermediateScale
float m_InputIntermediateScale
Input intermediate quantization scale.
Definition: Descriptors.hpp:1426
armnn::ArgMinMaxFunction
ArgMinMaxFunction
Definition: Types.hpp:103
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::QuantizedLstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: QuantizedLstmParams.hpp:44
armnn::QuantizedLstmInputParams::m_RecurrentToCellWeights
const ConstTensor * m_RecurrentToCellWeights
Definition: QuantizedLstmParams.hpp:40
armnn::Half
half_float::half Half
Definition: Half.hpp:22
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:874
armnn::NormalizationDescriptor::m_NormSize
uint32_t m_NormSize
Depth radius value.
Definition: Descriptors.hpp:797
armnnUtils::TransposeTensorShape
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Transpose.cpp:125
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::LogicalBinaryOperation
LogicalBinaryOperation
Definition: Types.hpp:119
armnn::Convolution2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:566
armnn::BatchToSpaceNdDescriptor::m_Crops
std::vector< std::pair< unsigned int, unsigned int > > m_Crops
The values to crop from the input dimension.
Definition: Descriptors.hpp:900
armnn::LstmInputParamsInfo::m_OutputLayerNormWeights
const TensorInfo * m_OutputLayerNormWeights
Definition: LstmParams.hpp:109
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn::DepthwiseConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:702
armnn::Convolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation along y axis.
Definition: Descriptors.hpp:580
armnn::BoostLogSeverityMapping::error
@ error
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::QLstmDescriptor::m_CellIntermediateScale
float m_CellIntermediateScale
Cell intermediate quantization scale.
Definition: Descriptors.hpp:1430
armnn::NormalizationDescriptor::m_NormMethodType
NormalizationAlgorithmMethod m_NormMethodType
Normalization method algorithm to use (LocalBrightness, LocalContrast).
Definition: Descriptors.hpp:795
armnn::QuantizedLstmInputParamsInfo::m_OutputGateBias
const TensorInfo * m_OutputGateBias
Definition: QuantizedLstmParams.hpp:152
armnn::TransposeConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:1475
armnn::LstmInputParams::m_ForgetGateBias
const ConstTensor * m_ForgetGateBias
Definition: LstmParams.hpp:52
armnn_driver::OperandType
::android::nn::OperandType OperandType
Definition: ConversionUtils.hpp:48
armnn::NormalizationAlgorithmChannel::Across
@ Across
TensorUtils.hpp
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:782
armnn::LstmInputParams::m_CellToInputWeights
const ConstTensor * m_CellToInputWeights
Definition: LstmParams.hpp:48
armnn::QuantizedLstmInputParamsInfo
Definition: QuantizedLstmParams.hpp:119
armnn::MeanDescriptor::m_KeepDims
bool m_KeepDims
Enable/disable keep dimensions. If true, then the reduced dimensions that are of length 1 are kept.
Definition: Descriptors.hpp:1192
armnn::FillDescriptor::m_Value
float m_Value
Definition: Descriptors.hpp:940
armnn::LstmInputParamsInfo::m_RecurrentToOutputWeights
const TensorInfo * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:96
armnn::ElementwiseBinaryDescriptor
A ElementwiseBinaryDescriptor for the ElementwiseBinaryLayer.
Definition: Descriptors.hpp:109
armnn::ResizeDescriptor::m_Method
ResizeMethod m_Method
The Interpolation method to use (Bilinear, NearestNeighbor).
Definition: Descriptors.hpp:1012
armnn::SpaceToBatchNdDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding values for the input dimension: heightPad{top, bottom} widthPad{left,...
Definition: Descriptors.hpp:1069
armnn::LstmInputParams::m_InputToOutputWeights
const ConstTensor * m_InputToOutputWeights
Definition: LstmParams.hpp:43
armnn::LstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1148
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::QuantizedLstmInputParamsInfo::m_InputToOutputWeights
const TensorInfo * m_InputToOutputWeights
Definition: QuantizedLstmParams.hpp:142
armnn::LstmInputParamsInfo::m_ForgetLayerNormWeights
const TensorInfo * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:107
armnn::TensorInfo::SetQuantizationScale
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
armnn::NormalizationDescriptor::m_NormChannelType
NormalizationAlgorithmChannel m_NormChannelType
Normalization channel algorithm to use (Across, Within).
Definition: Descriptors.hpp:793
armnn::BinaryOperation::Maximum
@ Maximum
armnn::LstmInputParams::m_CellToForgetWeights
const ConstTensor * m_CellToForgetWeights
Definition: LstmParams.hpp:49
armnn::LstmInputParams::m_RecurrentToInputWeights
const ConstTensor * m_RecurrentToInputWeights
Definition: LstmParams.hpp:44
armnn::LstmDescriptor::m_ClippingThresProj
float m_ClippingThresProj
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1144
armnn_driver::ConvertPaddings
bool ConvertPaddings(const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
Definition: ConversionUtils.cpp:350
armnn::LstmInputParamsInfo::m_RecurrentToForgetWeights
const TensorInfo * m_RecurrentToForgetWeights
Definition: LstmParams.hpp:94
armnn::StridedSliceDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1363
armnn::LstmInputParamsInfo::m_CellToForgetWeights
const TensorInfo * m_CellToForgetWeights
Definition: LstmParams.hpp:98
armnn::LstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: LstmParams.hpp:40
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::DepthwiseConvolution2dDescriptor::m_DilationY
uint32_t m_DilationY
Dilation factor value for height dimension.
Definition: Descriptors.hpp:706
armnn::QuantizedLstmInputParamsInfo::m_InputGateBias
const TensorInfo * m_InputGateBias
Definition: QuantizedLstmParams.hpp:149
armnn::QuantizedLstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: QuantizedLstmParams.hpp:41
armnn::LstmInputParams::m_RecurrentToOutputWeights
const ConstTensor * m_RecurrentToOutputWeights
Definition: LstmParams.hpp:47
armnn::FullyConnectedDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:526
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn::BatchMatMulDescriptor::m_TransposeY
bool m_TransposeY
Definition: Descriptors.hpp:1613
armnn::TensorInfo::GetQuantizationDim
Optional< unsigned int > GetQuantizationDim() const
Definition: Tensor.cpp:494
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:45
armnn::InstanceNormalizationDescriptor::m_Gamma
float m_Gamma
Gamma, the scale scalar value applied for the normalized tensor. Defaults to 1.0.
Definition: Descriptors.hpp:865
armnn::LstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: LstmParams.hpp:51
armnn::Convolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:582
armnn_driver::GetOperandType
bool GetOperandType(const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
Definition: ConversionUtils.hpp:716
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:815
armnn_driver::GetOptionalConvolutionDilationParams
bool GetOptionalConvolutionDilationParams(const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:907
armnn::PadDescriptor::m_PadValue
float m_PadValue
Optional value to use for padding, defaults to 0.
Definition: Descriptors.hpp:1221
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:775
armnn::DepthwiseConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:694
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:68
armnn::NormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:805
armnn::BatchMatMulDescriptor
A BatchMatMulDescriptor for the BatchMatMul operator.
Definition: Descriptors.hpp:1584
armnn::QuantizedLstmInputParamsInfo::m_InputToInputWeights
const TensorInfo * m_InputToInputWeights
Definition: QuantizedLstmParams.hpp:139
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:987
armnn::ReduceOperation::Sum
@ Sum
armnnUtils::ExpandDims
armnn::TensorShape ExpandDims(const armnn::TensorShape &tensorShape, int axis)
Definition: TensorUtils.cpp:142
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:182
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:122
armnn::BaseTensor::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:299
armnn::Convolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:584
armnn::QLstmDescriptor::m_ProjectionClip
float m_ProjectionClip
Clipping threshold value for the projection.
Definition: Descriptors.hpp:1416
armnn::GatherDescriptor::m_Axis
int32_t m_Axis
The axis in params to gather indices from.
Definition: Descriptors.hpp:981
armnn::SpaceToBatchNdDescriptor
A SpaceToBatchNdDescriptor for the SpaceToBatchNdLayer.
Definition: Descriptors.hpp:1043
armnn::Convolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:572
armnn::PermutationVector
Definition: Types.hpp:314
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1039
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn::QuantizedLstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: QuantizedLstmParams.hpp:34
armnn::TransposeConvolution2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:1479
armnn::StridedSliceDescriptor::m_EndMask
int32_t m_EndMask
End mask value.
Definition: Descriptors.hpp:1353
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:297
armnn::QuantizedLstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: QuantizedLstmParams.hpp:46
armnn::LstmInputParamsInfo::m_InputToCellWeights
const TensorInfo * m_InputToCellWeights
Definition: LstmParams.hpp:91
armnn::LstmInputParamsInfo::m_CellBias
const TensorInfo * m_CellBias
Definition: LstmParams.hpp:102
armnn::UnaryOperation
UnaryOperation
Definition: Types.hpp:125
armnn::Convolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:574
armnn_driver::LayerInputHandle::GetOutputSlot
armnn::IOutputSlot * GetOutputSlot() const
Definition: ConversionUtils.cpp:77
armnn::QLstmDescriptor::m_OutputIntermediateScale
float m_OutputIntermediateScale
Output intermediate quantization scale.
Definition: Descriptors.hpp:1432
armnn::Convolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:568
armnn_driver::ConstTensorPin::IsOptional
bool IsOptional() const
Definition: ConversionUtils.cpp:117
armnn::OriginsDescriptor::SetConcatAxis
void SetConcatAxis(unsigned int concatAxis)
Set the concatenation axis value.
Definition: Descriptors.cpp:158
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:662
armnn::LayerType::Shape
@ Shape
armnn::GetArgMinMaxFunctionAsCString
constexpr char const * GetArgMinMaxFunctionAsCString(ArgMinMaxFunction function)
Definition: TypesUtils.hpp:52
armnn::LstmInputParams::m_InputLayerNormWeights
const ConstTensor * m_InputLayerNormWeights
Definition: LstmParams.hpp:57
armnn::LstmInputParamsInfo::m_RecurrentToInputWeights
const TensorInfo * m_RecurrentToInputWeights
Definition: LstmParams.hpp:93
armnn::BatchToSpaceNdDescriptor
A BatchToSpaceNdDescriptor for the BatchToSpaceNdLayer.
Definition: Descriptors.hpp:875
armnn::Convolution2dDescriptor
A Convolution2dDescriptor for the Convolution2dLayer.
Definition: Descriptors.hpp:534
armnn::ReduceOperation::Prod
@ Prod
armnn::LstmInputParamsInfo::m_ForgetGateBias
const TensorInfo * m_ForgetGateBias
Definition: LstmParams.hpp:101
armnn::DepthwiseConvolution2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:698
armnn::ComparisonDescriptor
A ComparisonDescriptor for the ComparisonLayer.
Definition: Descriptors.hpp:89
armnn::FillDescriptor
A FillDescriptor for the FillLayer.
Definition: Descriptors.hpp:925
armnn::DataType::QAsymmS8
@ QAsymmS8
armnn::StridedSliceDescriptor::m_ShrinkAxisMask
int32_t m_ShrinkAxisMask
Shrink axis mask value. If set, the nth specification shrinks the dimensionality by 1.
Definition: Descriptors.hpp:1355
armnn::SpaceToDepthDescriptor::m_BlockSize
unsigned int m_BlockSize
Scalar specifying the input block size. It must be >= 1.
Definition: Descriptors.hpp:1092
armnn::CreateDescriptorForConcatenation
OriginsDescriptor CreateDescriptorForConcatenation(TensorShapeIt first, TensorShapeIt last, unsigned int concatenationDimension)
Convenience template to create an OriginsDescriptor to use when creating a ConcatLayer for performing...
Definition: Descriptors.hpp:300
armnn::TensorInfo::SetQuantizationOffset
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
armnn::LstmInputParams::m_ForgetLayerNormWeights
const ConstTensor * m_ForgetLayerNormWeights
Definition: LstmParams.hpp:58
armnn::ResizeDescriptor::m_TargetWidth
uint32_t m_TargetWidth
Target width value.
Definition: Descriptors.hpp:1007
armnn::StridedSliceDescriptor::m_Stride
std::vector< int > m_Stride
Stride values for the input that will be sliced.
Definition: Descriptors.hpp:1346
armnn::InstanceNormalizationDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:871
armnn::LstmDescriptor
An LstmDescriptor for the LstmLayer.
Definition: Descriptors.hpp:1102
armnn::ComparisonOperation
ComparisonOperation
Definition: Types.hpp:109
armnn::StridedSliceDescriptor
A StridedSliceDescriptor for the StridedSliceLayer.
Definition: Descriptors.hpp:1303
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1514
armnn::TransposeConvolution2dDescriptor::m_OutputShape
std::vector< unsigned int > m_OutputShape
Definition: Descriptors.hpp:1486
armnn::ResizeDescriptor::m_AlignCorners
bool m_AlignCorners
Aligned corners.
Definition: Descriptors.hpp:1016
armnn::TileDescriptor::m_Multiples
std::vector< uint32_t > m_Multiples
The vector to multiply the input shape by.
Definition: Descriptors.hpp:1656
armnn::MeanDescriptor::m_Axis
std::vector< unsigned int > m_Axis
Values for the dimensions to reduce.
Definition: Descriptors.hpp:1190
armnn::LstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable cifg (coupled input & forget gate).
Definition: Descriptors.hpp:1146
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::LogicalBinaryDescriptor
A LogicalBinaryDescriptor for the LogicalBinaryLayer.
Definition: Descriptors.hpp:1518
armnn::NormalizationDescriptor::m_Alpha
float m_Alpha
Alpha value for the normalization equation.
Definition: Descriptors.hpp:799
armnn::BinaryOperation
BinaryOperation
Definition: Types.hpp:138
armnn::GetUnaryOperationAsCString
constexpr char const * GetUnaryOperationAsCString(UnaryOperation operation)
Definition: TypesUtils.hpp:92
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn::QLstmDescriptor::m_HiddenStateZeroPoint
int32_t m_HiddenStateZeroPoint
Hidden State zero point.
Definition: Descriptors.hpp:1434
armnn::Convolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation along x axis.
Definition: Descriptors.hpp:578
armnn::SoftmaxDescriptor::m_Axis
int m_Axis
Scalar, defaulted to the last index (-1), specifying the dimension the activation will be performed o...
Definition: Descriptors.hpp:192
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:62
armnn_driver::IsWeightsValid
bool IsWeightsValid(const Operation &operation, uint32_t inputIndex, const Model &model, const bool isOptional=true)
Utility functions.
Definition: ConversionUtils.cpp:141
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:731
armnn::LstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1152
armnn::QuantizedLstmInputParams::m_InputToInputWeights
const ConstTensor * m_InputToInputWeights
Definition: QuantizedLstmParams.hpp:33
armnn::TransposeConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:1473
armnn_driver::ConstTensorPin::GetConstTensorPtr
const armnn::ConstTensor * GetConstTensorPtr() const
Definition: ConversionUtils.cpp:127
armnn::TransposeConvolution2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:1471
armnn::QuantizedLstmInputParams::m_CellBias
const ConstTensor * m_CellBias
Definition: QuantizedLstmParams.hpp:45
android::nn
Definition: support_library_service.cpp:10
armnn::LstmInputParams::m_OutputGateBias
const ConstTensor * m_OutputGateBias
Definition: LstmParams.hpp:54
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn::QLstmDescriptor::m_CifgEnabled
bool m_CifgEnabled
Enable/disable CIFG (coupled input & forget gate).
Definition: Descriptors.hpp:1418
armnn_driver::GetInputActivationFunctionFromTensor
bool GetInputActivationFunctionFromTensor(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:870
armnn::BackendId
Definition: BackendId.hpp:75
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToCellWeights
const TensorInfo * m_RecurrentToCellWeights
Definition: QuantizedLstmParams.hpp:146
armnn::BinaryOperation::Minimum
@ Minimum
armnn_driver::ConvertReduce
bool ConvertReduce(const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
Definition: ConversionUtils.cpp:520
armnn::OriginsDescriptor
An OriginsDescriptor for the ConcatLayer.
Definition: Descriptors.hpp:201
armnn::LstmInputParams::m_ProjectionWeights
const ConstTensor * m_ProjectionWeights
Definition: LstmParams.hpp:55
armnn::ActivationFunction::ReLu
@ ReLu
armnn::LstmInputParams::m_InputToForgetWeights
const ConstTensor * m_InputToForgetWeights
Definition: LstmParams.hpp:41
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:195
Exceptions.hpp
armnn::ElementwiseUnaryDescriptor
A ElementwiseUnaryDescriptor for the ElementwiseUnaryLayer.
Definition: Descriptors.hpp:129
armnn::TransposeConvolution2dDescriptor
A TransposeConvolution2dDescriptor for the TransposeConvolution2dLayer.
Definition: Descriptors.hpp:1440
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::ArgMinMaxDescriptor::m_Axis
int m_Axis
Axis to reduce across the input tensor.
Definition: Descriptors.hpp:83
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::OriginsDescriptor::SetViewOriginCoord
Status SetViewOriginCoord(uint32_t view, uint32_t coord, uint32_t value)
@Brief Set the view origin coordinates.
Definition: Descriptors.cpp:167
armnn::LstmInputParamsInfo::m_CellToOutputWeights
const TensorInfo * m_CellToOutputWeights
Definition: LstmParams.hpp:99
armnn::LstmInputParamsInfo
Definition: LstmParams.hpp:63
armnn::QLstmDescriptor::m_HiddenStateScale
float m_HiddenStateScale
Hidden State quantization scale.
Definition: Descriptors.hpp:1436
armnn::NormalizationDescriptor::m_K
float m_K
Kappa value used for the across channel normalization equation.
Definition: Descriptors.hpp:803
armnn::LstmDescriptor::m_ProjectionEnabled
bool m_ProjectionEnabled
Enable/disable the projection layer.
Definition: Descriptors.hpp:1150
armnn::StridedSliceDescriptor::m_End
std::vector< int > m_End
End values for the input that will be sliced.
Definition: Descriptors.hpp:1344
armnn::ReduceOperation::Min
@ Min
armnn_driver::Converter::Operation
::android::nn::Operation Operation
Definition: Converter.hpp:28
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:329
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:57
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToInputWeights
const TensorInfo * m_RecurrentToInputWeights
Definition: QuantizedLstmParams.hpp:144
armnn::QuantizedLstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: QuantizedLstmParams.hpp:140
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:505
armnn::TransposeConvolution2dDescriptor::m_OutputShapeEnabled
bool m_OutputShapeEnabled
Output shape if it has been specified.
Definition: Descriptors.hpp:1485
armnn::TransposeConvolution2dDescriptor::m_BiasEnabled
bool m_BiasEnabled
Enable/disable bias.
Definition: Descriptors.hpp:1481
armnn::QLstmDescriptor::m_CellClip
float m_CellClip
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1414
armnn::LstmInputParamsInfo::m_InputGateBias
const TensorInfo * m_InputGateBias
Definition: LstmParams.hpp:100
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:92
armnn::QLstmDescriptor::m_LayerNormEnabled
bool m_LayerNormEnabled
Enable/disable layer normalization.
Definition: Descriptors.hpp:1424
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:112
armnn_driver::DequantizeAndMakeConstTensorPin
ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
Definition: ConversionUtils.cpp:752
armnn::BinaryOperation::Div
@ Div
armnn::TransposeConvolution2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1483
armnn::QuantizedLstmInputParams::m_InputGateBias
const ConstTensor * m_InputGateBias
Definition: QuantizedLstmParams.hpp:43
armnn::LstmDescriptor::m_ActivationFunc
uint32_t m_ActivationFunc
The activation function to use.
Definition: Descriptors.hpp:1140
armnn::LstmInputParamsInfo::m_InputToForgetWeights
const TensorInfo * m_InputToForgetWeights
Definition: LstmParams.hpp:90
armnn::BatchToSpaceNdDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:902
armnn::TensorInfo::GetQuantizationOffset
int32_t GetQuantizationOffset() const
Definition: Tensor.cpp:478
armnn::DepthwiseConvolution2dDescriptor
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.
Definition: Descriptors.hpp:659
armnn::DepthwiseConvolution2dDescriptor::m_DilationX
uint32_t m_DilationX
Dilation factor value for width dimension.
Definition: Descriptors.hpp:704
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:856
armnn::LstmDescriptor::m_ClippingThresCell
float m_ClippingThresCell
Clipping threshold value for the cell state.
Definition: Descriptors.hpp:1142
armnn::LstmInputParamsInfo::m_InputToOutputWeights
const TensorInfo * m_InputToOutputWeights
Definition: LstmParams.hpp:92
armnn::NullPointerException
Definition: Exceptions.hpp:146
armnn::LstmInputParamsInfo::m_ProjectionBias
const TensorInfo * m_ProjectionBias
Definition: LstmParams.hpp:105
armnn::LstmInputParams
Definition: LstmParams.hpp:13
armnn::LstmInputParamsInfo::m_ProjectionWeights
const TensorInfo * m_ProjectionWeights
Definition: LstmParams.hpp:104
armnn::LstmInputParams::m_CellLayerNormWeights
const ConstTensor * m_CellLayerNormWeights
Definition: LstmParams.hpp:59
armnn::GetComparisonOperationAsCString
constexpr char const * GetComparisonOperationAsCString(ComparisonOperation operation)
Definition: TypesUtils.hpp:62
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn::MeanDescriptor
A MeanDescriptor for the MeanLayer.
Definition: Descriptors.hpp:1172
armnn::QLstmDescriptor::m_PeepholeEnabled
bool m_PeepholeEnabled
Enable/disable peephole.
Definition: Descriptors.hpp:1420
armnn::QuantizedLstmInputParams
Definition: QuantizedLstmParams.hpp:13
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:61
armnn::OptionalReferenceSwitch::value
const T & value() const
Definition: Optional.hpp:146
armnn::TileDescriptor
Definition: Descriptors.hpp:1640
Converter.hpp
armnn_driver::IsConnectedToDequantize
bool IsConnectedToDequantize(armnn::IOutputSlot *ioutputSlot)
Definition: ConversionUtils.cpp:1064
armnn::SoftmaxDescriptor
A SoftmaxDescriptor for the SoftmaxLayer.
Definition: Descriptors.hpp:177
armnn::QuantizedLstmInputParams::m_InputToCellWeights
const ConstTensor * m_InputToCellWeights
Definition: QuantizedLstmParams.hpp:35
armnn::QuantizedLstmInputParamsInfo::m_RecurrentToForgetWeights
const TensorInfo * m_RecurrentToForgetWeights
Definition: QuantizedLstmParams.hpp:145
armnn::InstanceNormalizationDescriptor::m_Eps
float m_Eps
Epsilon, small scalar value added to variance to avoid dividing by zero. Defaults to 1e-12f.
Definition: Descriptors.hpp:869
armnn::SpaceToDepthDescriptor
A SpaceToDepthDescriptor for the SpaceToDepthLayer.
Definition: Descriptors.hpp:1075
armnn::OptionalBase::has_value
bool has_value() const noexcept
Definition: Optional.hpp:53
armnn::ReduceOperation::Max
@ Max
Connect
void Connect(armnn::IConnectableLayer *from, armnn::IConnectableLayer *to, const armnn::TensorInfo &tensorInfo, unsigned int fromIndex, unsigned int toIndex)
Definition: TestUtils.cpp:14
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:28
armnn_driver::GetInputFloat32
bool GetInputFloat32(const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:824
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::SpaceToDepthDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:1095
armnn::DepthwiseConvolution2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:700
armnn::DepthwiseConvolution2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:696