ArmNN
 23.05
ConversionUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConversionUtils.hpp"
7 #include <armnnUtils/Permute.hpp>
8 
9 ///
10 /// Helper classes
11 ///
12 
13 namespace armnn_driver
14 {
15 
17  : m_OutputSlot(nullptr)
18  , m_Valid(false)
19 {}
20 
22  : m_OutputSlot(outputSlot)
23  , m_Valid(valid)
24  , m_TensorInfo(tensorInfo)
25 {}
26 
28 {
29  return m_Valid;
30 }
31 
33 {
35  if (m_OutputSlot)
36  {
37  m_OutputSlot->Connect(inputSlot);
38  }
39 }
40 
42 {
44  if (m_OutputSlot)
45  {
46  m_OutputSlot->Disconnect(inputSlot);
47  }
48 }
49 
51 {
52  return m_TensorInfo;
53 }
54 
56 {
57  if (m_OutputSlot)
58  {
59  armnn::TensorInfo weightInfo = weight.GetTensorInfo();
60  armnn::TensorInfo inputInfo = input.GetTensorInfo();
61  armnn::TensorInfo biasInfo = GetTensorInfo();
62 
63  SanitizeBiasQuantizationScale(biasInfo, weightInfo, inputInfo);
64 
65  m_TensorInfo = biasInfo;
66  m_OutputSlot->SetTensorInfo(biasInfo);
67  }
68 }
69 
71 {
72  return m_OutputSlot;
73 }
74 
76  : m_Optional(optional)
77 {}
78 
80  const void* valueStart,
81  uint32_t numBytes,
82  const armnn::PermutationVector& mappings)
83  : m_Optional(false)
84 {
85  armnn::IgnoreUnused(numBytes);
86  if (tensorInfo.GetNumBytes() != numBytes)
87  {
88  VLOG(DRIVER) << "The size of ConstTensor does not match its TensorInfo.";
89  }
90 
91  const bool needsSwizzling = (mappings.GetSize() > 0);
92  if (needsSwizzling)
93  {
94  m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
95  SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
96 
97  m_ConstTensor = armnn::ConstTensor(tensorInfo, m_SwizzledTensorData.data());
98  }
99  else
100  {
101  m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
102  }
103 }
104 
106 {
107  return m_ConstTensor.GetMemoryArea() != nullptr;
108 }
109 
111 {
112  return m_Optional;
113 }
114 
116 {
117  return m_ConstTensor;
118 }
119 
121 {
122  if (IsValid() && m_ConstTensor.GetNumElements() > 0)
123  {
124  return &m_ConstTensor;
125  }
126  // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
127  return nullptr;
128 }
129 
130 ///
131 /// Utility functions
132 ///
133 
134 bool IsWeightsValid(const Operation& operation,
135  uint32_t inputIndex,
136  const Model& model)
137 {
138  const Operand* operand = GetInputOperand(operation, inputIndex, model);
139  if (!operand)
140  {
141  Fail("%s: failed to get input operand %i", __func__, inputIndex);
142  return false;
143  }
144 
145  if (operand->lifetime != OperandLifeTime::CONSTANT_COPY
146  && operand->lifetime != OperandLifeTime::CONSTANT_REFERENCE
147  && operand->lifetime != OperandLifeTime::NO_VALUE)
148  {
149  return false;
150  }
151  return true;
152 }
153 
155  const Model& model,
156  const ConversionData& data,
157  const armnn::PermutationVector& dimensionMappings,
158  const armnn::TensorShape* overrideTensorShape,
159  bool optional,
160  const armnn::DataType* overrideDataType)
161 {
162  if (!IsOperandTypeSupportedForTensors(operand.type))
163  {
164  VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor" << operand.type;
165  return ConstTensorPin();
166  }
167 
168  if (!optional && !IsOperandConstant(operand))
169  {
170  VLOG(DRIVER) << __func__ << ": lifetime for input tensor: r" << operand.lifetime;
171  return ConstTensorPin();
172  }
173 
174  const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data, optional);
175  if (!valueStart)
176  {
177  if (optional)
178  {
179  // optional tensor with no values is not really an error; return it as invalid, but marked as optional
180  return ConstTensorPin(true);
181  }
182  // mandatory tensor with no values
183  Fail("%s: failed to get operand address", __func__);
184  return ConstTensorPin();
185  }
186 
187  armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
188 
189  if (overrideTensorShape)
190  {
191  tensorInfo.SetShape(*overrideTensorShape);
192  }
193 
194  if (overrideDataType)
195  {
196  tensorInfo.SetDataType(*overrideDataType);
197  }
198 
199  // Make sure isConstant flag is set.
200  tensorInfo.SetConstant();
201  return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
202 }
203 
205  uint32_t inputIndex,
206  const Model& model,
207  ConversionData& data,
208  const armnn::PermutationVector& dimensionMappings,
209  const LayerInputHandle* inputHandle)
210 {
211 
212  const Operand* operand = GetInputOperand(operation, inputIndex, model);
213  if (!operand)
214  {
215  Fail("%s: failed to get input operand %i", __func__, inputIndex);
216  return LayerInputHandle();
217  }
218 
219  if (!IsOperandTypeSupportedForTensors(operand->type))
220  {
221  VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor: " << operand->type;
222  return LayerInputHandle();
223  }
224 
225  try
226  {
227  armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
228 
229  if (IsDynamicTensor(operandTensorInfo))
230  {
231  data.m_DynamicInputsEncountered = true;
232 
233  const uint32_t operandIndex = operation.inputs[inputIndex];
234 
235  // Check if the dynamic input tensors have been inferred by one of the previous layers
236  // If not we can't support them
237  if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
238  {
239  operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
240  }
241  else
242  {
243  Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
244  return LayerInputHandle();
245  }
246  }
247 
248  switch (operand->lifetime)
249  {
250  case OperandLifeTime::SUBGRAPH_INPUT:
251  {
252  // NOTE: We must check whether we can support the input tensor on at least one
253  // of the provided backends; otherwise we cannot convert the operation
254  bool isInputSupported = false;
257  data.m_Backends,
258  isInputSupported,
260  operandTensorInfo);
261 
262  if (!isInputSupported)
263  {
264  Fail("%s: unsupported input tensor", __func__);
265  return LayerInputHandle();
266  }
267 
268  [[clang::fallthrough]]; // intentional fallthrough
269  }
270  case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
271  case OperandLifeTime::SUBGRAPH_OUTPUT:
272  {
273  // The tensor is either an operand internal to the model, or a model input.
274  // It can be associated with an ArmNN output slot for an existing layer.
275 
276  // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
277  const uint32_t operandIndex = operation.inputs[inputIndex];
278  return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
279  }
280  case OperandLifeTime::CONSTANT_COPY: // intentional fallthrough
281  case OperandLifeTime::POINTER:
282  case OperandLifeTime::CONSTANT_REFERENCE:
283  {
284  auto constantTensorDataType = operandTensorInfo.GetDataType();
285  // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
286  ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand,
287  model,
288  data,
289  dimensionMappings,
290  nullptr,
291  false,
292  &constantTensorDataType);
293  if (tensorPin.IsValid())
294  {
295  bool isSupported = false;
296  armnn::BackendId setBackend;
299  data.m_Backends,
300  isSupported,
301  setBackend,
302  tensorPin.GetConstTensor().GetInfo());
303  if (!isSupported)
304  {
305  return LayerInputHandle();
306  }
307 
308  armnn::IConnectableLayer* constantLayer =
309  data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
310  constantLayer->SetBackendId(setBackend);
311  armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
312  armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
313  outputSlot.SetTensorInfo(constantTensorInfo);
314 
315  return LayerInputHandle(true, &outputSlot, constantTensorInfo);
316  }
317  else
318  {
319  Fail("%s: invalid operand tensor", __func__);
320  return LayerInputHandle();
321  }
322  break;
323  }
324  default:
325  {
326  VLOG(DRIVER) << __func__ << ": unsupported lifetime for input tensor: " << operand->lifetime;
327  return LayerInputHandle();
328  }
329  }
330  }
332  {
333  VLOG(DRIVER) << __func__ << ": Operand type: " << e.m_type << " not supported in ArmnnDriver";
334  return LayerInputHandle();
335  }
336 }
337 
338 bool ConvertPaddings(const Operation& operation,
339  const Model& model,
340  ConversionData& data,
341  unsigned int rank,
342  armnn::PadDescriptor& padDescriptor)
343 {
344  const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
345  if (!paddingsOperand)
346  {
347  return Fail("%s: Could not read paddings operand", __func__);
348  }
349 
350  armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
351  if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
352  {
353  return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
354  }
355 
356  std::vector<int32_t> paddings;
357  if (!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
358  {
359  return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
360  }
361 
362  // add padding for each dimension of input tensor.
363  for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
364  {
365  int paddingBeforeInput = paddings[i];
366  int paddingAfterInput = paddings[i + 1];
367 
368  if (paddingBeforeInput < 0 || paddingAfterInput < 0)
369  {
370  return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
371  }
372 
373  padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
374  }
375 
376  return true;
377 }
378 
379 
380 bool ConvertPooling2d(const Operation& operation,
381  const char* operationName,
382  armnn::PoolingAlgorithm poolType,
383  const Model& model,
384  ConversionData& data)
385 {
386 
387  VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
388 
389  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
390  if (!input.IsValid())
391  {
392  return Fail("%s: Operation Could not read input 0", operationName);
393  }
394 
395  const Operand* output = GetOutputOperand(operation, 0, model);
396  if (!output)
397  {
398  return Fail("%s: Could not read output 0", __func__);
399  }
400 
401  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
402  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
403 
405  desc.m_PoolType = poolType;
408 
409  ActivationFn activation;
410 
411  auto inputSize = operation.inputs.size();
412 
413  if (inputSize >= 10)
414  {
415  // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
416  if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data) ||
417  !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data) ||
418  !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data) ||
419  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data) ||
420  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data) ||
421  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data) ||
422  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data) ||
423  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data) ||
424  !GetInputActivationFunction(operation, 9, activation, model, data))
425  {
426  return Fail("%s: Operation has invalid inputs", operationName);
427  }
428 
429  if (Is12OrLaterOperand(*output))
430  {
431  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
432  }
433  }
434  else
435  {
436  // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
437  ::android::nn::PaddingScheme scheme;
438  if (!GetInputPaddingScheme(operation, 1, scheme, model, data) ||
439  !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data) ||
440  !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data) ||
441  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data) ||
442  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data) ||
443  !GetInputActivationFunction(operation, 6, activation, model, data))
444  {
445  return Fail("%s: Operation has invalid inputs", operationName);
446  }
447 
448  if (Is12OrLaterOperand(*output))
449  {
450  desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
451  }
452 
453  const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
454  const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
455  const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
456 
457  CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
458  CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
459  }
460 
461  bool isSupported = false;
462  armnn::BackendId setBackend;
463  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
464  {
467  data.m_Backends,
468  isSupported,
469  setBackend,
470  inputInfo,
471  outputInfo,
472  desc);
473 
474  };
475 
476  if(IsDynamicTensor(outputInfo))
477  {
478  isSupported = AreDynamicTensorsSupported();
479  }
480  else
481  {
482  validateFunc(outputInfo, isSupported);
483  }
484 
485  if (!isSupported)
486  {
487  return false;
488  }
489 
490  armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
491  pooling2dLayer->SetBackendId(setBackend);
492  if (!pooling2dLayer)
493  {
494  return Fail("%s: AddPooling2dLayer failed", __func__);
495  }
496 
497  input.Connect(pooling2dLayer->GetInputSlot(0));
498 
499  if (!isSupported)
500  {
501  return false;
502  }
503 
504  return SetupAndTrackLayerOutputSlot(operation, 0, *pooling2dLayer, model,
505  data, nullptr, validateFunc, activation);
506 }
507 
508 bool ConvertReduce(const Operation& operation,
509  const Model& model,
510  ConversionData& data,
511  armnn::ReduceOperation reduceOperation)
512 {
513  armnn::ReduceDescriptor descriptor;
514  descriptor.m_ReduceOperation = reduceOperation;
515 
516  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
517  if (!input.IsValid())
518  {
519  return Fail("%s: Operation has invalid inputs", __func__);
520  }
521  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
522 
523  const Operand* output = GetOutputOperand(operation, 0, model);
524  if (!output)
525  {
526  return Fail("%s: Could not read output 0", __func__);
527  }
528  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
529 
530  const Operand* axisOperand = GetInputOperand(operation, 1, model);
531  if (!axisOperand)
532  {
533  return Fail("%s: Could not read input 1", __func__);
534  }
535  std::vector<int32_t> axis;
536  if (!GetTensorInt32Values(*axisOperand, axis, model, data))
537  {
538  return Fail("%s: Input 1 has invalid values", __func__);
539  }
540 
541  // Convert the axis to unsigned int and remove duplicates.
542  unsigned int rank = inputInfo.GetNumDimensions();
543  std::set<unsigned int> uniqueAxis;
544  std::transform(axis.begin(), axis.end(),
545  std::inserter(uniqueAxis, uniqueAxis.begin()),
546  [rank](int i) -> unsigned int { return (i + rank) % rank; });
547  descriptor.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
548 
549  // Get the "keep dims" flag.
550  if (!GetInputScalar(operation, 2, OperandType::BOOL, descriptor.m_KeepDims, model, data))
551  {
552  return Fail("%s: Could not read input 2", __func__);
553  }
554 
555  bool isSupported = false;
556  armnn::BackendId setBackend;
557  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
558  {
561  data.m_Backends,
562  isSupported,
563  setBackend,
564  inputInfo,
565  outputInfo,
566  descriptor);
567  };
568 
569  if(!IsDynamicTensor(outputInfo))
570  {
571  validateFunc(outputInfo, isSupported);
572  }
573  else
574  {
575  isSupported = AreDynamicTensorsSupported();
576  }
577 
578  if (!isSupported)
579  {
580  return false;
581  }
582 
583  armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
584  layer->SetBackendId(setBackend);
585  assert(layer != nullptr);
586  input.Connect(layer->GetInputSlot(0));
587 
588  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
589 }
590 
591 
592 bool ConvertToActivation(const Operation& operation,
593  const char* operationName,
594  const armnn::ActivationDescriptor& activationDesc,
595  const Model& model,
596  ConversionData& data)
597 {
598  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
599  if (!input.IsValid())
600  {
601  return Fail("%s: Input 0 is invalid", operationName);
602  }
603 
604  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
605  if (!outputOperand)
606  {
607  return false;
608  }
609 
610  const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
611 
612  bool isSupported = false;
613  armnn::BackendId setBackend;
614  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
615  {
618  data.m_Backends,
619  isSupported,
620  setBackend,
621  input.GetTensorInfo(),
622  outInfo,
623  activationDesc);
624  };
625 
626  if(IsDynamicTensor(outInfo))
627  {
628  isSupported = AreDynamicTensorsSupported();
629  }
630  else
631  {
632  validateFunc(outInfo, isSupported);
633  }
634 
635  if (!isSupported)
636  {
637  return false;
638  }
639 
640  armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
641  layer->SetBackendId(setBackend);
642  ARMNN_ASSERT(layer != nullptr);
643  input.Connect(layer->GetInputSlot(0));
644 
645  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
646 }
647 
649  const Operation& operation,
650  const Model& model,
651  const ConversionData& data)
652 {
653  const Operand* weightsOperand = GetInputOperand(operation, operand_index, model);
654  if (!weightsOperand)
655  {
656  return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
657  }
658 
659  if (IsOperandConstant(*weightsOperand))
660  {
661  // Weights are already constant
662  return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
663  }
664 
665  const size_t weightsInputIndex = operation.inputs[operand_index];
666 
667  // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
668  // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
669  for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
670  {
671  // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
672  const auto& operationIt = getMainModel(model).operations[operationIdx];
673  if (operationIt.type != OperationType::DEQUANTIZE)
674  {
675  continue;
676  }
677 
678  size_t outOpIndex = weightsInputIndex + 1;
679  for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
680  {
681  outOpIndex = operationIt.outputs[i];
682  }
683 
684  if (outOpIndex != weightsInputIndex)
685  {
686  continue;
687  }
688 
689  const Operand* operand = GetInputOperand(operationIt, 0, model);
690  ARMNN_ASSERT(operand);
691 
692  if (!IsQSymm8(*operand))
693  {
694  // Only supporting dequantize from QSYMM8 to FLOAT
695  break;
696  }
697 
698  // Allocate a new buffer for the dequantized data and manually dequantize
699  const void* startValue = GetOperandValueReadOnlyAddress(*operand, model, data);
700  if (!startValue)
701  {
702  // Failed to get the operand address
703  break;
704  }
705 
706  const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
707  size_t dequantizedBufferLength = operand->location.length;
708  const float quantizationScale = operand->scale;
709 
710  auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
711  for (size_t i = 0; i < dequantizedBufferLength; ++i)
712  {
713  float* dstPtr = dequantizedBuffer.get();
714  ARMNN_ASSERT(dstPtr);
715  *dstPtr++ = quantizedBuffer[i] * quantizationScale;
716  }
717 
718  // Construct tensor info for dequantized ConstTensor
719  armnn::TensorInfo tensorInfo(operand->dimensions.size(),
720  operand->dimensions.data(),
722 
723  return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
724  std::move(tensorInfo),
726  }
727 
728  return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
729 }
730 
732  const Model& model,
733  const ConversionData& data,
734  size_t operandIndex,
735  bool optional)
736 {
737  DequantizeResult dequantized = DequantizeIfRequired(operandIndex,operation, model, data);
738 
739  DequantizeStatus status = std::get<3>(dequantized);
740  switch (status)
741  {
743  {
744  // return invalid const tensor pin
745  return ConstTensorPin();
746  }
748  {
750  operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
751  }
753  default:
754  {
755  return ConstTensorPin(
756  std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
757  }
758  }
759 }
760 
761 bool GetInputPaddingScheme(const Operation& operation,
762  uint32_t inputIndex,
763  PaddingScheme& outPaddingScheme,
764  const Model& model,
765  const ConversionData& data)
766 {
767  int32_t paddingSchemeAsInt;
768  if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
769  {
770  return Fail("%s: failed to get padding scheme input value", __func__);
771  }
772 
773  outPaddingScheme = static_cast<::android::nn::PaddingScheme>(paddingSchemeAsInt);
774  return true;
775 }
776 
777 const void* GetOperandValueReadOnlyAddress(const Operand& operand,
778  const Model& model,
779  const ConversionData& data,
780  bool optional)
781 {
782  const void* valueStart = nullptr;
783  switch (operand.lifetime)
784  {
785  case OperandLifeTime::CONSTANT_COPY:
786  {
787  valueStart = model.operandValues.data() + operand.location.offset;
788  break;
789  }
790  case OperandLifeTime::POINTER:
791  {
792  // Pointer specified in the model
793  valueStart = std::get<const void*>(operand.location.pointer);
794  break;
795  }
796  case OperandLifeTime::CONSTANT_REFERENCE:
797  {
798  // Constant specified via a Memory object
799  valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
800  break;
801  }
802  case OperandLifeTime::NO_VALUE:
803  {
804  // An optional input tensor with no values is not an error so should not register as a fail
805  if (optional)
806  {
807  valueStart = nullptr;
808  break;
809  }
810  [[fallthrough]];
811  }
812  default:
813  {
814  VLOG(DRIVER) << __func__ << ": unsupported/invalid operand lifetime:: " << operand.lifetime;
815  valueStart = nullptr;
816  }
817  }
818 
819  return valueStart;
820 }
821 
822 bool GetTensorInt32Values(const Operand& operand,
823  std::vector<int32_t>& outValues,
824  const Model& model,
825  const ConversionData& data)
826 {
827  if (operand.type != OperandType::TENSOR_INT32)
828  {
829  VLOG(DRIVER) << __func__ << ": invalid operand type: " << operand.type;
830  return false;
831  }
832 
833  const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
834  if (!startAddress)
835  {
836  VLOG(DRIVER) << __func__ << ": failed to get operand address " << operand.type;
837  return false;
838  }
839 
840  // Check number of bytes is sensible
841  const uint32_t numBytes = operand.location.length;
842  if (numBytes % sizeof(int32_t) != 0)
843  {
844  return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
845  __func__, numBytes, sizeof(int32_t));
846  }
847 
848  outValues.resize(numBytes / sizeof(int32_t));
849  memcpy(outValues.data(), startAddress, numBytes);
850  return true;
851 }
852 
854  uint32_t inputIndex,
855  const Model& model,
856  ConversionData& data)
857 {
858  const Operand* operand = GetInputOperand(operation, inputIndex, model);
859  if (!operand)
860  {
862  }
863 
864  if (!IsBool(*operand))
865  {
867  }
868 
869  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
870  if (!valueAddress)
871  {
873  }
874 
875  if (*(static_cast<const bool*>(valueAddress)))
876  {
878  }
879  else
880  {
882  }
883 }
884 
886  ActivationFn activation,
887  armnn::IConnectableLayer* prevLayer,
888  ConversionData& data)
889 {
890  ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
891 
892  prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
893 
894  armnn::IConnectableLayer* activationLayer = prevLayer;
895 
896  if (activation != ActivationFn::kActivationNone)
897  {
898  armnn::ActivationDescriptor activationDesc;
899  switch (activation)
900  {
901  case ActivationFn::kActivationRelu:
902  {
904  break;
905  }
906  case ActivationFn::kActivationRelu1:
907  {
909  activationDesc.m_A = 1.0f;
910  activationDesc.m_B = -1.0f;
911  break;
912  }
913  case ActivationFn::kActivationRelu6:
914  {
916  activationDesc.m_A = 6.0f;
917  break;
918  }
919  case ActivationFn::kActivationSigmoid:
920  {
922  break;
923  }
924  case ActivationFn::kActivationTanh:
925  {
927  activationDesc.m_A = 1.0f;
928  activationDesc.m_B = 1.0f;
929  break;
930  }
931  default:
932  {
933  Fail("%s: Invalid activation enum value %i", __func__, activation);
934  return nullptr;
935  }
936  }
937 
938  bool isSupported = false;
939  armnn::BackendId setBackend;
942  data.m_Backends,
943  isSupported,
944  setBackend,
945  prevLayer->GetOutputSlot(0).GetTensorInfo(),
946  tensorInfo,
947  activationDesc);
948  if (!isSupported)
949  {
950  return nullptr;
951  }
952 
953  activationLayer = data.m_Network->AddActivationLayer(activationDesc);
954  activationLayer->SetBackendId(setBackend);
955 
956  prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
957  activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
958  }
959 
960  return activationLayer;
961 }
962 
964  uint32_t operationOutputIndex,
966  uint32_t layerOutputIndex,
967  const Model& model,
968  ConversionData& data,
969  const armnn::TensorInfo* overrideOutputInfo,
970  const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc,
971  const ActivationFn& activationFunction,
972  bool inferOutputShapes)
973 {
974  const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
975  if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
976  {
977  return false;
978  }
979 
980  armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
981  if (overrideOutputInfo == nullptr)
982  {
983  outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
984  }
985  else
986  {
987  outputSlot.SetTensorInfo(*overrideOutputInfo);
988  }
989 
990  bool isSupported = false;
991  if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
992  {
993  // Type one dynamic tensors require the previous layer's output shape for inference
994  for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
995  {
996  if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
997  {
998  return false;
999  }
1000  }
1001  // IsTensorInfoSet will infer the dynamic output shape
1002  outputSlot.IsTensorInfoSet();
1003  // Once the shape is inferred we can validate it
1004  validateFunc(outputSlot.GetTensorInfo(), isSupported);
1005 
1006  if(!isSupported)
1007  {
1008  for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1009  {
1010  layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1011  }
1012  return false;
1013  }
1014  }
1015 
1016  const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1017 
1018  if (activationFunction != ActivationFn::kActivationNone)
1019  {
1020  const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1021  armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1022  &layer, data);
1023 
1024  if (!endLayer)
1025  {
1026  return Fail("%s: ProcessActivation failed", __func__);
1027  }
1028 
1029  armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1030  data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1031  }
1032  else
1033  {
1034  data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1035  }
1036 
1037  return true;
1038 }
1039 
1041 {
1042  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize()";
1043  if (!ioutputSlot)
1044  {
1045  return false;
1046  }
1047  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() ioutputSlot is valid.";
1048  // Find the connections and layers..
1049  armnn::IConnectableLayer& owningLayer = ioutputSlot->GetOwningIConnectableLayer();
1050  if (owningLayer.GetType() == armnn::LayerType::Dequantize)
1051  {
1052  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() connected to Dequantize Layer.";
1053  armnn::IInputSlot& inputSlot = owningLayer.GetInputSlot(0);
1054  armnn::IOutputSlot* connection = inputSlot.GetConnection();
1055  if (connection)
1056  {
1057  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer has a connection.";
1058  armnn::IConnectableLayer& connectedLayer =
1059  connection->GetOwningIConnectableLayer();
1060  if (connectedLayer.GetType() == armnn::LayerType::Constant)
1061  {
1062  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer connected to Constant";
1063  return true;
1064  }
1065  }
1066  }
1067  return false;
1068 }
1069 
1070 } // namespace armnn_driver
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:662
armnn_driver::IsWeightsValid
bool IsWeightsValid(const Operation &operation, uint32_t inputIndex, const Model &model)
Utility functions.
Definition: ConversionUtils.cpp:134
armnn_driver::IsQSymm8
bool IsQSymm8(const Operand &operand)
Definition: ConversionUtils.hpp:1002
armnn_driver::Operation
::android::nn::Operation Operation
Definition: ConversionUtils.hpp:47
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:409
armnn_driver::DequantizeStatus::INVALID_OPERAND
@ INVALID_OPERAND
armnn::BackendId
Definition: BackendId.hpp:75
armnn::IOutputSlot::GetOwningIConnectableLayer
virtual const IConnectableLayer & GetOwningIConnectableLayer() const =0
armnn_driver::IsConnectedToDequantize
bool IsConnectedToDequantize(armnn::IOutputSlot *ioutputSlot)
Definition: ConversionUtils.cpp:1040
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn::TensorInfo::SetDataType
void SetDataType(DataType type)
Definition: Tensor.hpp:199
armnn_driver::ConvertPaddings
bool ConvertPaddings(const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
Definition: ConversionUtils.cpp:338
armnn_driver::SwizzleAndroidNn4dTensorToArmNn
void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo &tensorInfo, const void *input, void *output, const armnn::PermutationVector &mappings)
Swizzles tensor data in input according to the dimension mappings.
Definition: CanonicalUtils.cpp:40
armnn_driver::DequantizeStatus::NOT_REQUIRED
@ NOT_REQUIRED
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1163
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn_driver::UnsupportedOperand
Definition: CanonicalUtils.hpp:27
armnn_driver::ConversionData::m_OutputSlotForOperand
std::vector< armnn::IOutputSlot * > m_OutputSlotForOperand
Definition: ConversionUtils.hpp:61
armnn::OutputShapeRounding::Floor
@ Floor
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1525
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:204
armnn_driver::ConstTensorPin::GetConstTensorPtr
const armnn::ConstTensor * GetConstTensorPtr() const
Definition: ConversionUtils.cpp:120
armnn_driver::GetOperandValueReadOnlyAddress
const void * GetOperandValueReadOnlyAddress(const Operand &operand, const Model &model, const ConversionData &data, bool optional)
Definition: ConversionUtils.cpp:777
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:405
armnn_driver::ConversionData::m_DynamicInputsEncountered
bool m_DynamicInputsEncountered
Definition: ConversionUtils.hpp:63
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1523
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1505
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:822
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:43
armnnUtils::DataLayoutIndexed::GetWidthIndex
unsigned int GetWidthIndex() const
Definition: DataLayoutIndexed.hpp:25
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:500
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:393
armnn::PermutationVector::GetSize
SizeType GetSize() const
Definition: Types.hpp:349
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:415
armnn_driver::DequantizeAndMakeConstTensorPin
ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
Definition: ConversionUtils.cpp:731
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:407
armnn_driver::ConversionData
Definition: ConversionUtils.hpp:51
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::TensorInfo::SetConstant
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:514
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::IOutputSlot::Disconnect
virtual void Disconnect(IInputSlot &slot)=0
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:44
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:60
armnn_driver::DequantizeResult
std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus > DequantizeResult
Definition: ConversionUtils.hpp:1014
armnn::TensorShape
Definition: Tensor.hpp:20
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:90
armnn::PoolingAlgorithm
PoolingAlgorithm
Definition: Types.hpp:147
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:66
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::DataLayout::NCHW
@ NCHW
armnn::ReduceOperation
ReduceOperation
Definition: Types.hpp:154
armnn_driver::ConvertPooling2d
bool ConvertPooling2d(const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:380
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1185
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:782
armnn_driver::LayerInputHandle::GetOutputSlot
armnn::IOutputSlot * GetOutputSlot() const
Definition: ConversionUtils.cpp:70
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:401
armnn::DataType::Float32
@ Float32
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:41
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn::LayerType::Constant
@ Constant
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:403
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:32
armnn_driver::LayerInputHandle::SanitizeQuantizationScale
void SanitizeQuantizationScale(LayerInputHandle &weight, LayerInputHandle &input)
Definition: ConversionUtils.cpp:55
ConversionUtils.hpp
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
Permute.hpp
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:643
armnn::IsInputSupported
bool IsInputSupported(const BackendId &backend, const TensorInfo &input, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:395
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:761
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:486
armnn::DataLayout::NHWC
@ NHWC
armnn_driver::ConstTensorPin::ConstTensorPin
ConstTensorPin(bool optional=false)
Definition: ConversionUtils.cpp:75
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:399
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0
armnn_driver::DequantizeIfRequired
DequantizeResult DequantizeIfRequired(size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:648
armnn::PermutationVector
Definition: Types.hpp:306
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:27
armnn::IsReduceSupported
bool IsReduceSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ReduceDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::UnsupportedOperand::m_type
OperandType m_type
Definition: CanonicalUtils.hpp:35
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::DataType
DataType
Definition: Types.hpp:48
armnn_driver::ConversionData::m_MemPools
std::vector<::android::nn::RunTimePoolInfo > m_MemPools
Definition: ConversionUtils.hpp:62
armnn::IsPooling2dSupported
bool IsPooling2dSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const Pooling2dDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn_driver::ConstTensorPin::IsOptional
bool IsOptional() const
Definition: ConversionUtils.cpp:110
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::IsConstantSupported
bool IsConstantSupported(const BackendId &backend, const TensorInfo &output, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnnUtils::DataLayoutIndexed
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
Definition: DataLayoutIndexed.hpp:17
armnn_driver::LayerInputHandle::LayerInputHandle
LayerInputHandle()
Definition: ConversionUtils.cpp:16
armnn::ActivationFunction::TanH
@ TanH
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:50
armnn::TensorInfo::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.cpp:427
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:359
armnn_driver::ProcessActivation
armnn::IConnectableLayer * ProcessActivation(const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
Definition: ConversionUtils.cpp:885
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:718
armnn_driver::GetMemoryFromPool
void * GetMemoryFromPool(DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools)
Returns a pointer to a specific location in a pool`.
Definition: CanonicalUtils.cpp:66
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:397
armnnUtils::DataLayoutIndexed::GetHeightIndex
unsigned int GetHeightIndex() const
Definition: DataLayoutIndexed.hpp:24
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:853
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:823
getMainModel
const android::nn::Model::Subgraph & getMainModel(const android::nn::Model &model)
Definition: ConversionUtils.hpp:28
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:115
armnn_driver::ConvertOperandToConstTensorPin
ConstTensorPin ConvertOperandToConstTensorPin(const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
Definition: ConversionUtils.cpp:154
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:105
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
armnn_driver::ConvertReduce
bool ConvertReduce(const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
Definition: ConversionUtils.cpp:508
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn_driver::DequantizeStatus
DequantizeStatus
Definition: ConversionUtils.hpp:1007
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::ActivationFunction::ReLu
@ ReLu
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1521
armnn::IsActivationSupported
bool IsActivationSupported(const BackendId &backend, const TensorInfo &input, const TensorInfo &output, const ActivationDescriptor &descriptor, char *reasonIfUnsupported=nullptr, size_t reasonIfUnsupportedMaxLength=1024)
Deprecated in favor of IBackend and ILayerSupport interfaces.
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:963
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn::BaseTensor::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:303
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:698
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:742
armnn::LayerType::Dequantize
@ Dequantize
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:153
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:59
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
armnn_driver::LayerInputHandle::Disconnect
void Disconnect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:41
armnn_driver::ConvertToActivation
bool ConvertToActivation(const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:592
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:411
armnn_driver::DequantizeStatus::SUCCESS
@ SUCCESS