ArmNN
 23.11
ConversionUtils.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConversionUtils.hpp"
7 #include <armnn/Exceptions.hpp>
8 #include <armnnUtils/Permute.hpp>
9 
10 ///
11 /// Helper classes
12 ///
13 
14 namespace armnn_driver
15 {
16 
18  : m_OutputSlot(nullptr)
19  , m_Valid(false)
20 {}
21 
23  : m_OutputSlot(outputSlot)
24  , m_Valid(valid)
25  , m_TensorInfo(tensorInfo)
26 {}
27 
29 {
30  return m_Valid;
31 }
32 
34 {
35  if (!IsValid())
36  {
37  throw armnn::Exception("cannot invoke Connect on an invalid LayerInputHandle");
38  }
39  if (m_OutputSlot)
40  {
41  m_OutputSlot->Connect(inputSlot);
42  }
43 }
44 
46 {
47  if (!IsValid())
48  {
49  throw armnn::Exception("cannot invoke Disconnect on an invalid LayerInputHandle");
50  }
51  if (m_OutputSlot)
52  {
53  m_OutputSlot->Disconnect(inputSlot);
54  }
55 }
56 
58 {
59  return m_TensorInfo;
60 }
61 
63 {
64  if (m_OutputSlot)
65  {
66  armnn::TensorInfo weightInfo = weight.GetTensorInfo();
67  armnn::TensorInfo inputInfo = input.GetTensorInfo();
68  armnn::TensorInfo biasInfo = GetTensorInfo();
69 
70  SanitizeBiasQuantizationScale(biasInfo, weightInfo, inputInfo);
71 
72  m_TensorInfo = biasInfo;
73  m_OutputSlot->SetTensorInfo(biasInfo);
74  }
75 }
76 
78 {
79  return m_OutputSlot;
80 }
81 
83  : m_Optional(optional)
84 {}
85 
87  const void* valueStart,
88  uint32_t numBytes,
89  const armnn::PermutationVector& mappings)
90  : m_Optional(false)
91 {
92  armnn::IgnoreUnused(numBytes);
93  if (tensorInfo.GetNumBytes() != numBytes)
94  {
95  VLOG(DRIVER) << "The size of ConstTensor does not match its TensorInfo.";
96  }
97 
98  const bool needsSwizzling = (mappings.GetSize() > 0);
99  if (needsSwizzling)
100  {
101  m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
102  SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
103 
104  m_ConstTensor = armnn::ConstTensor(tensorInfo, m_SwizzledTensorData.data());
105  }
106  else
107  {
108  m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
109  }
110 }
111 
113 {
114  return m_ConstTensor.GetMemoryArea() != nullptr;
115 }
116 
118 {
119  return m_Optional;
120 }
121 
123 {
124  return m_ConstTensor;
125 }
126 
128 {
129  if (IsValid() && m_ConstTensor.GetNumElements() > 0)
130  {
131  return &m_ConstTensor;
132  }
133  // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
134  return nullptr;
135 }
136 
137 ///
138 /// Utility functions
139 ///
140 
141 bool IsWeightsValid(const Operation& operation,
142  uint32_t inputIndex,
143  const Model& model,
144  const bool isOptional = true)
145 {
146  const Operand* operand = GetInputOperand(operation, inputIndex, model);
147  if (!operand)
148  {
149  Fail("%s: failed to get input operand %i", __func__, inputIndex);
150  return false;
151  }
152  // If the operand is not an optional operand it cannot have a NO_VALUE lifetime
153  if (!isOptional && operand->lifetime == OperandLifeTime::NO_VALUE)
154  {
155  return false;
156  }
157  if (operand->lifetime != OperandLifeTime::CONSTANT_COPY
158  && operand->lifetime != OperandLifeTime::CONSTANT_REFERENCE
159  && operand->lifetime != OperandLifeTime::NO_VALUE)
160  {
161  return false;
162  }
163  return true;
164 }
165 
167  const Model& model,
168  const ConversionData& data,
169  const armnn::PermutationVector& dimensionMappings,
170  const armnn::TensorShape* overrideTensorShape,
171  bool optional,
172  const armnn::DataType* overrideDataType)
173 {
174  if (!IsOperandTypeSupportedForTensors(operand.type))
175  {
176  VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor" << operand.type;
177  return ConstTensorPin();
178  }
179 
180  if (!optional && !IsOperandConstant(operand))
181  {
182  VLOG(DRIVER) << __func__ << ": lifetime for input tensor: r" << operand.lifetime;
183  return ConstTensorPin();
184  }
185 
186  const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data, optional);
187  if (!valueStart)
188  {
189  if (optional)
190  {
191  // optional tensor with no values is not really an error; return it as invalid, but marked as optional
192  return ConstTensorPin(true);
193  }
194  // mandatory tensor with no values
195  Fail("%s: failed to get operand address", __func__);
196  return ConstTensorPin();
197  }
198 
199  armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
200 
201  if (overrideTensorShape)
202  {
203  tensorInfo.SetShape(*overrideTensorShape);
204  }
205 
206  if (overrideDataType)
207  {
208  tensorInfo.SetDataType(*overrideDataType);
209  }
210 
211  // Make sure isConstant flag is set.
212  tensorInfo.SetConstant();
213  return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
214 }
215 
217  uint32_t inputIndex,
218  const Model& model,
219  ConversionData& data,
220  const armnn::PermutationVector& dimensionMappings,
221  const LayerInputHandle* inputHandle)
222 {
223 
224  const Operand* operand = GetInputOperand(operation, inputIndex, model);
225  if (!operand)
226  {
227  Fail("%s: failed to get input operand %i", __func__, inputIndex);
228  return LayerInputHandle();
229  }
230 
231  if (!IsOperandTypeSupportedForTensors(operand->type))
232  {
233  VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor: " << operand->type;
234  return LayerInputHandle();
235  }
236 
237  try
238  {
239  armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
240 
241  if (IsDynamicTensor(operandTensorInfo))
242  {
243  data.m_DynamicInputsEncountered = true;
244 
245  const uint32_t operandIndex = operation.inputs[inputIndex];
246 
247  // Check if the dynamic input tensors have been inferred by one of the previous layers
248  // If not we can't support them
249  if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
250  {
251  operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
252  }
253  else
254  {
255  Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
256  return LayerInputHandle();
257  }
258  }
259 
260  switch (operand->lifetime)
261  {
262  case OperandLifeTime::SUBGRAPH_INPUT:
263  {
264  // NOTE: We must check whether we can support the input tensor on at least one
265  // of the provided backends; otherwise we cannot convert the operation
266  bool isInputSupported = false;
268  IsInputSupported,
269  data.m_Backends,
270  isInputSupported,
272  operandTensorInfo);
273 
274  if (!isInputSupported)
275  {
276  Fail("%s: unsupported input tensor", __func__);
277  return LayerInputHandle();
278  }
279 
280  [[clang::fallthrough]]; // intentional fallthrough
281  }
282  case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
283  case OperandLifeTime::SUBGRAPH_OUTPUT:
284  {
285  // The tensor is either an operand internal to the model, or a model input.
286  // It can be associated with an ArmNN output slot for an existing layer.
287 
288  // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
289  const uint32_t operandIndex = operation.inputs[inputIndex];
290  return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
291  }
292  case OperandLifeTime::CONSTANT_COPY: // intentional fallthrough
293  case OperandLifeTime::POINTER:
294  case OperandLifeTime::CONSTANT_REFERENCE:
295  {
296  auto constantTensorDataType = operandTensorInfo.GetDataType();
297  // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
298  ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand,
299  model,
300  data,
301  dimensionMappings,
302  nullptr,
303  false,
304  &constantTensorDataType);
305  if (tensorPin.IsValid())
306  {
307  bool isSupported = false;
308  armnn::BackendId setBackend;
310  IsConstantSupported,
311  data.m_Backends,
312  isSupported,
313  setBackend,
314  tensorPin.GetConstTensor().GetInfo());
315  if (!isSupported)
316  {
317  return LayerInputHandle();
318  }
319 
320  armnn::IConnectableLayer* constantLayer =
321  data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
322  constantLayer->SetBackendId(setBackend);
323  armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
324  armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
325  outputSlot.SetTensorInfo(constantTensorInfo);
326 
327  return LayerInputHandle(true, &outputSlot, constantTensorInfo);
328  }
329  else
330  {
331  Fail("%s: invalid operand tensor", __func__);
332  return LayerInputHandle();
333  }
334  break;
335  }
336  default:
337  {
338  VLOG(DRIVER) << __func__ << ": unsupported lifetime for input tensor: " << operand->lifetime;
339  return LayerInputHandle();
340  }
341  }
342  }
344  {
345  VLOG(DRIVER) << __func__ << ": Operand type: " << e.m_type << " not supported in ArmnnDriver";
346  return LayerInputHandle();
347  }
348 }
349 
350 bool ConvertPaddings(const Operation& operation,
351  const Model& model,
352  ConversionData& data,
353  unsigned int rank,
354  armnn::PadDescriptor& padDescriptor)
355 {
356  const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
357  if (!paddingsOperand)
358  {
359  return Fail("%s: Could not read paddings operand", __func__);
360  }
361 
362  armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
363  if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
364  {
365  return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
366  }
367 
368  std::vector<int32_t> paddings;
369  if (!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
370  {
371  return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
372  }
373 
374  // add padding for each dimension of input tensor.
375  for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
376  {
377  int paddingBeforeInput = paddings[i];
378  int paddingAfterInput = paddings[i + 1];
379 
380  if (paddingBeforeInput < 0 || paddingAfterInput < 0)
381  {
382  return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
383  }
384 
385  padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
386  }
387 
388  return true;
389 }
390 
391 
392 bool ConvertPooling2d(const Operation& operation,
393  const char* operationName,
394  armnn::PoolingAlgorithm poolType,
395  const Model& model,
396  ConversionData& data)
397 {
398 
399  VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
400 
401  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
402  if (!input.IsValid())
403  {
404  return Fail("%s: Operation Could not read input 0", operationName);
405  }
406 
407  const Operand* output = GetOutputOperand(operation, 0, model);
408  if (!output)
409  {
410  return Fail("%s: Could not read output 0", __func__);
411  }
412 
413  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
414  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
415 
417  desc.m_PoolType = poolType;
420 
421  ActivationFn activation;
422 
423  auto inputSize = operation.inputs.size();
424 
425  if (inputSize >= 10)
426  {
427  // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
428  if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data) ||
429  !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data) ||
430  !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data) ||
431  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data) ||
432  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data) ||
433  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data) ||
434  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data) ||
435  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data) ||
436  !GetInputActivationFunction(operation, 9, activation, model, data))
437  {
438  return Fail("%s: Operation has invalid inputs", operationName);
439  }
440 
441  if (Is12OrLaterOperand(*output))
442  {
443  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
444  }
445  }
446  else
447  {
448  // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
449  ::android::nn::PaddingScheme scheme;
450  if (!GetInputPaddingScheme(operation, 1, scheme, model, data) ||
451  !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data) ||
452  !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data) ||
453  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data) ||
454  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data) ||
455  !GetInputActivationFunction(operation, 6, activation, model, data))
456  {
457  return Fail("%s: Operation has invalid inputs", operationName);
458  }
459 
460  if (Is12OrLaterOperand(*output))
461  {
462  desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
463  }
464 
465  const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
466  const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
467  const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
468 
469  CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
470  CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
471  }
472 
473  bool isSupported = false;
474  armnn::BackendId setBackend;
475  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
476  {
478  IsPooling2dSupported,
479  data.m_Backends,
480  isSupported,
481  setBackend,
482  inputInfo,
483  outputInfo,
484  desc);
485 
486  };
487 
488  if(IsDynamicTensor(outputInfo))
489  {
490  isSupported = AreDynamicTensorsSupported();
491  }
492  else
493  {
494  validateFunc(outputInfo, isSupported);
495  }
496 
497  if (!isSupported)
498  {
499  return false;
500  }
501 
502  armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
503  pooling2dLayer->SetBackendId(setBackend);
504  if (!pooling2dLayer)
505  {
506  return Fail("%s: AddPooling2dLayer failed", __func__);
507  }
508 
509  input.Connect(pooling2dLayer->GetInputSlot(0));
510 
511  if (!isSupported)
512  {
513  return false;
514  }
515 
516  return SetupAndTrackLayerOutputSlot(operation, 0, *pooling2dLayer, model,
517  data, nullptr, validateFunc, activation);
518 }
519 
520 bool ConvertReduce(const Operation& operation,
521  const Model& model,
522  ConversionData& data,
523  armnn::ReduceOperation reduceOperation)
524 {
525  armnn::ReduceDescriptor descriptor;
526  descriptor.m_ReduceOperation = reduceOperation;
527 
528  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
529  if (!input.IsValid())
530  {
531  return Fail("%s: Operation has invalid inputs", __func__);
532  }
533  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
534 
535  const Operand* output = GetOutputOperand(operation, 0, model);
536  if (!output)
537  {
538  return Fail("%s: Could not read output 0", __func__);
539  }
540  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
541 
542  const Operand* axisOperand = GetInputOperand(operation, 1, model);
543  if (!axisOperand)
544  {
545  return Fail("%s: Could not read input 1", __func__);
546  }
547  std::vector<int32_t> axis;
548  if (!GetTensorInt32Values(*axisOperand, axis, model, data))
549  {
550  return Fail("%s: Input 1 has invalid values", __func__);
551  }
552 
553  // Convert the axis to unsigned int and remove duplicates.
554  unsigned int rank = inputInfo.GetNumDimensions();
555  std::set<unsigned int> uniqueAxis;
556  std::transform(axis.begin(), axis.end(),
557  std::inserter(uniqueAxis, uniqueAxis.begin()),
558  [rank](int i) -> unsigned int { return (i + rank) % rank; });
559  descriptor.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
560 
561  // Get the "keep dims" flag.
562  if (!GetInputScalar(operation, 2, OperandType::BOOL, descriptor.m_KeepDims, model, data))
563  {
564  return Fail("%s: Could not read input 2", __func__);
565  }
566 
567  bool isSupported = false;
568  armnn::BackendId setBackend;
569  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
570  {
572  IsReduceSupported,
573  data.m_Backends,
574  isSupported,
575  setBackend,
576  inputInfo,
577  outputInfo,
578  descriptor);
579  };
580 
581  if(!IsDynamicTensor(outputInfo))
582  {
583  validateFunc(outputInfo, isSupported);
584  }
585  else
586  {
587  isSupported = AreDynamicTensorsSupported();
588  }
589 
590  if (!isSupported)
591  {
592  return false;
593  }
594 
595  armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
596  layer->SetBackendId(setBackend);
597  assert(layer != nullptr);
598  input.Connect(layer->GetInputSlot(0));
599 
600  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
601 }
602 
603 
604 bool ConvertToActivation(const Operation& operation,
605  const char* operationName,
606  const armnn::ActivationDescriptor& activationDesc,
607  const Model& model,
608  ConversionData& data)
609 {
610  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
611  if (!input.IsValid())
612  {
613  return Fail("%s: Input 0 is invalid", operationName);
614  }
615 
616  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
617  if (!outputOperand)
618  {
619  return false;
620  }
621 
622  const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
623 
624  bool isSupported = false;
625  armnn::BackendId setBackend;
626  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
627  {
629  IsActivationSupported,
630  data.m_Backends,
631  isSupported,
632  setBackend,
633  input.GetTensorInfo(),
634  outInfo,
635  activationDesc);
636  };
637 
638  if(IsDynamicTensor(outInfo))
639  {
640  isSupported = AreDynamicTensorsSupported();
641  }
642  else
643  {
644  validateFunc(outInfo, isSupported);
645  }
646 
647  if (!isSupported)
648  {
649  return false;
650  }
651 
652  armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
653  if (layer == nullptr)
654  {
655  throw armnn::NullPointerException("failed to add activation layer to network");
656  }
657  layer->SetBackendId(setBackend);
658  input.Connect(layer->GetInputSlot(0));
659 
660  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
661 }
662 
664  const Operation& operation,
665  const Model& model,
666  const ConversionData& data)
667 {
668  const Operand* weightsOperand = GetInputOperand(operation, operand_index, model);
669  if (!weightsOperand)
670  {
671  return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
672  }
673 
674  if (IsOperandConstant(*weightsOperand))
675  {
676  // Weights are already constant
677  return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
678  }
679 
680  const size_t weightsInputIndex = operation.inputs[operand_index];
681 
682  // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
683  // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
684  for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
685  {
686  // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
687  const auto& operationIt = getMainModel(model).operations[operationIdx];
688  if (operationIt.type != OperationType::DEQUANTIZE)
689  {
690  continue;
691  }
692 
693  size_t outOpIndex = weightsInputIndex + 1;
694  for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
695  {
696  outOpIndex = operationIt.outputs[i];
697  }
698 
699  if (outOpIndex != weightsInputIndex)
700  {
701  continue;
702  }
703 
704  const Operand* operand = GetInputOperand(operationIt, 0, model);
705  if (operand == nullptr)
706  {
707  throw armnn::Exception("failed to get input operand 0");
708  }
709 
710  if (!IsQSymm8(*operand))
711  {
712  // Only supporting dequantize from QSYMM8 to FLOAT
713  break;
714  }
715 
716  // Allocate a new buffer for the dequantized data and manually dequantize
717  const void* startValue = GetOperandValueReadOnlyAddress(*operand, model, data);
718  if (!startValue)
719  {
720  // Failed to get the operand address
721  break;
722  }
723 
724  const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
725  size_t dequantizedBufferLength = operand->location.length;
726  const float quantizationScale = operand->scale;
727 
728  auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
729  for (size_t i = 0; i < dequantizedBufferLength; ++i)
730  {
731  float* dstPtr = dequantizedBuffer.get();
732  if (dstPtr == nullptr)
733  {
734  throw armnn::NullPointerException("dequantizedBuffer unique pointer is null");
735  }
736  *dstPtr++ = quantizedBuffer[i] * quantizationScale;
737  }
738 
739  // Construct tensor info for dequantized ConstTensor
740  armnn::TensorInfo tensorInfo(operand->dimensions.size(),
741  operand->dimensions.data(),
743 
744  return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
745  std::move(tensorInfo),
747  }
748 
749  return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
750 }
751 
753  const Model& model,
754  const ConversionData& data,
755  size_t operandIndex,
756  bool optional)
757 {
758  DequantizeResult dequantized = DequantizeIfRequired(operandIndex,operation, model, data);
759 
760  DequantizeStatus status = std::get<3>(dequantized);
761  switch (status)
762  {
764  {
765  // return invalid const tensor pin
766  return ConstTensorPin();
767  }
769  {
771  operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
772  }
774  default:
775  {
776  return ConstTensorPin(
777  std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
778  }
779  }
780 }
781 
782 bool GetInputPaddingScheme(const Operation& operation,
783  uint32_t inputIndex,
784  PaddingScheme& outPaddingScheme,
785  const Model& model,
786  const ConversionData& data)
787 {
788  int32_t paddingSchemeAsInt;
789  if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
790  {
791  return Fail("%s: failed to get padding scheme input value", __func__);
792  }
793 
794  outPaddingScheme = static_cast<::android::nn::PaddingScheme>(paddingSchemeAsInt);
795  return true;
796 }
797 
798 const void* GetOperandValueReadOnlyAddress(const Operand& operand,
799  const Model& model,
800  const ConversionData& data,
801  bool optional)
802 {
803  const void* valueStart = nullptr;
804  switch (operand.lifetime)
805  {
806  case OperandLifeTime::CONSTANT_COPY:
807  {
808  valueStart = model.operandValues.data() + operand.location.offset;
809  break;
810  }
811  case OperandLifeTime::POINTER:
812  {
813  // Pointer specified in the model
814  valueStart = std::get<const void*>(operand.location.pointer);
815  break;
816  }
817  case OperandLifeTime::CONSTANT_REFERENCE:
818  {
819  // Constant specified via a Memory object
820  valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
821  break;
822  }
823  case OperandLifeTime::NO_VALUE:
824  {
825  // An optional input tensor with no values is not an error so should not register as a fail
826  if (optional)
827  {
828  valueStart = nullptr;
829  break;
830  }
831  [[fallthrough]];
832  }
833  default:
834  {
835  VLOG(DRIVER) << __func__ << ": unsupported/invalid operand lifetime:: " << operand.lifetime;
836  valueStart = nullptr;
837  }
838  }
839 
840  return valueStart;
841 }
842 
843 bool GetTensorInt32Values(const Operand& operand,
844  std::vector<int32_t>& outValues,
845  const Model& model,
846  const ConversionData& data)
847 {
848  if (operand.type != OperandType::TENSOR_INT32)
849  {
850  VLOG(DRIVER) << __func__ << ": invalid operand type: " << operand.type;
851  return false;
852  }
853 
854  const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
855  if (!startAddress)
856  {
857  VLOG(DRIVER) << __func__ << ": failed to get operand address " << operand.type;
858  return false;
859  }
860 
861  // Check number of bytes is sensible
862  const uint32_t numBytes = operand.location.length;
863  if (numBytes % sizeof(int32_t) != 0)
864  {
865  return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
866  __func__, numBytes, sizeof(int32_t));
867  }
868 
869  outValues.resize(numBytes / sizeof(int32_t));
870  memcpy(outValues.data(), startAddress, numBytes);
871  return true;
872 }
873 
875  uint32_t inputIndex,
876  const Model& model,
877  ConversionData& data)
878 {
879  const Operand* operand = GetInputOperand(operation, inputIndex, model);
880  if (!operand)
881  {
883  }
884 
885  if (!IsBool(*operand))
886  {
888  }
889 
890  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
891  if (!valueAddress)
892  {
894  }
895 
896  if (*(static_cast<const bool*>(valueAddress)))
897  {
899  }
900  else
901  {
903  }
904 }
905 
907  ActivationFn activation,
908  armnn::IConnectableLayer* prevLayer,
909  ConversionData& data)
910 {
911  if (prevLayer->GetNumOutputSlots() != 1)
912  {
913  throw armnn::Exception("ProcessActivation: previous layer does not have a single output slot");
914  }
915 
916  prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
917 
918  armnn::IConnectableLayer* activationLayer = prevLayer;
919 
920  if (activation != ActivationFn::kActivationNone)
921  {
922  armnn::ActivationDescriptor activationDesc;
923  switch (activation)
924  {
925  case ActivationFn::kActivationRelu:
926  {
928  break;
929  }
930  case ActivationFn::kActivationRelu1:
931  {
933  activationDesc.m_A = 1.0f;
934  activationDesc.m_B = -1.0f;
935  break;
936  }
937  case ActivationFn::kActivationRelu6:
938  {
940  activationDesc.m_A = 6.0f;
941  break;
942  }
943  case ActivationFn::kActivationSigmoid:
944  {
946  break;
947  }
948  case ActivationFn::kActivationTanh:
949  {
951  activationDesc.m_A = 1.0f;
952  activationDesc.m_B = 1.0f;
953  break;
954  }
955  default:
956  {
957  Fail("%s: Invalid activation enum value %i", __func__, activation);
958  return nullptr;
959  }
960  }
961 
962  bool isSupported = false;
963  armnn::BackendId setBackend;
965  IsActivationSupported,
966  data.m_Backends,
967  isSupported,
968  setBackend,
969  prevLayer->GetOutputSlot(0).GetTensorInfo(),
970  tensorInfo,
971  activationDesc);
972  if (!isSupported)
973  {
974  return nullptr;
975  }
976 
977  activationLayer = data.m_Network->AddActivationLayer(activationDesc);
978  activationLayer->SetBackendId(setBackend);
979 
980  prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
981  activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
982  }
983 
984  return activationLayer;
985 }
986 
988  uint32_t operationOutputIndex,
990  uint32_t layerOutputIndex,
991  const Model& model,
992  ConversionData& data,
993  const armnn::TensorInfo* overrideOutputInfo,
994  const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc,
995  const ActivationFn& activationFunction,
996  bool inferOutputShapes)
997 {
998  const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
999  if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1000  {
1001  return false;
1002  }
1003 
1004  armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1005  if (overrideOutputInfo == nullptr)
1006  {
1007  outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1008  }
1009  else
1010  {
1011  outputSlot.SetTensorInfo(*overrideOutputInfo);
1012  }
1013 
1014  bool isSupported = false;
1015  if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
1016  {
1017  // Type one dynamic tensors require the previous layer's output shape for inference
1018  for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1019  {
1020  if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1021  {
1022  return false;
1023  }
1024  }
1025  // IsTensorInfoSet will infer the dynamic output shape
1026  outputSlot.IsTensorInfoSet();
1027  // Once the shape is inferred we can validate it
1028  validateFunc(outputSlot.GetTensorInfo(), isSupported);
1029 
1030  if(!isSupported)
1031  {
1032  for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1033  {
1034  layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1035  }
1036  return false;
1037  }
1038  }
1039 
1040  const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1041 
1042  if (activationFunction != ActivationFn::kActivationNone)
1043  {
1044  const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1045  armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1046  &layer, data);
1047 
1048  if (!endLayer)
1049  {
1050  return Fail("%s: ProcessActivation failed", __func__);
1051  }
1052 
1053  armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1054  data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1055  }
1056  else
1057  {
1058  data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1059  }
1060 
1061  return true;
1062 }
1063 
1065 {
1066  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize()";
1067  if (!ioutputSlot)
1068  {
1069  return false;
1070  }
1071  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() ioutputSlot is valid.";
1072  // Find the connections and layers..
1073  armnn::IConnectableLayer& owningLayer = ioutputSlot->GetOwningIConnectableLayer();
1074  if (owningLayer.GetType() == armnn::LayerType::Dequantize)
1075  {
1076  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() connected to Dequantize Layer.";
1077  armnn::IInputSlot& inputSlot = owningLayer.GetInputSlot(0);
1078  armnn::IOutputSlot* connection = inputSlot.GetConnection();
1079  if (connection)
1080  {
1081  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer has a connection.";
1082  armnn::IConnectableLayer& connectedLayer =
1083  connection->GetOwningIConnectableLayer();
1084  if (connectedLayer.GetType() == armnn::LayerType::Constant)
1085  {
1086  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer connected to Constant";
1087  return true;
1088  }
1089  }
1090  }
1091  return false;
1092 }
1093 
1094 } // namespace armnn_driver
armnn_driver::ProcessActivation
armnn::IConnectableLayer * ProcessActivation(const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
Definition: ConversionUtils.cpp:906
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn_driver::DequantizeResult
std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus > DequantizeResult
Definition: ConversionUtils.hpp:1047
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:843
armnn::BaseTensor::GetMemoryArea
MemoryType GetMemoryArea() const
Definition: Tensor.hpp:305
armnn_driver::LayerInputHandle::LayerInputHandle
LayerInputHandle()
Definition: ConversionUtils.cpp:17
armnn::TensorInfo::GetNumBytes
unsigned int GetNumBytes() const
Definition: Tensor.cpp:427
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn_driver::ConversionData::m_MemPools
std::vector<::android::nn::RunTimePoolInfo > m_MemPools
Definition: ConversionUtils.hpp:64
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:417
armnn::DataLayout::NHWC
@ NHWC
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:46
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:491
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn_driver::ConvertPooling2d
bool ConvertPooling2d(const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:392
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:33
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:751
armnn_driver::ConvertOperandToConstTensorPin
ConstTensorPin ConvertOperandToConstTensorPin(const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
Definition: ConversionUtils.cpp:166
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:216
armnn::TensorInfo::SetDataType
void SetDataType(DataType type)
Definition: Tensor.hpp:199
armnn::IOutputSlot::Disconnect
virtual void Disconnect(IInputSlot &slot)=0
armnn::OutputShapeRounding::Floor
@ Floor
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:421
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnnUtils::DataLayoutIndexed
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
Definition: DataLayoutIndexed.hpp:17
armnn_driver::ConversionData
Definition: ConversionUtils.hpp:53
armnn::DataType::Float32
@ Float32
armnn::ActivationFunction::TanH
@ TanH
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:411
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:155
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:688
armnn_driver::Operation
::android::nn::Operation Operation
Definition: ConversionUtils.hpp:49
armnn_driver::ConvertToActivation
bool ConvertToActivation(const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:604
armnn_driver::LayerInputHandle::SanitizeQuantizationScale
void SanitizeQuantizationScale(LayerInputHandle &weight, LayerInputHandle &input)
Definition: ConversionUtils.cpp:62
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
ConversionUtils.hpp
armnn_driver::ConversionData::m_DynamicInputsEncountered
bool m_DynamicInputsEncountered
Definition: ConversionUtils.hpp:65
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn::BaseTensor::GetNumElements
unsigned int GetNumElements() const
Definition: Tensor.hpp:303
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:874
armnn_driver::DequantizeStatus::SUCCESS
@ SUCCESS
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:415
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1558
armnnUtils::DataLayoutIndexed::GetHeightIndex
unsigned int GetHeightIndex() const
Definition: DataLayoutIndexed.hpp:24
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:782
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn_driver::SwizzleAndroidNn4dTensorToArmNn
void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo &tensorInfo, const void *input, void *output, const armnn::PermutationVector &mappings)
Swizzles tensor data in input according to the dimension mappings.
Definition: CanonicalUtils.cpp:40
armnn_driver::ConvertPaddings
bool ConvertPaddings(const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
Definition: ConversionUtils.cpp:350
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:427
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:413
armnn_driver::DequantizeStatus::INVALID_OPERAND
@ INVALID_OPERAND
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:409
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn_driver::GetMemoryFromPool
void * GetMemoryFromPool(DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools)
Returns a pointer to a specific location in a pool`.
Definition: CanonicalUtils.cpp:66
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:45
armnn::DataType
DataType
Definition: Types.hpp:48
armnn_driver::UnsupportedOperand
Definition: CanonicalUtils.hpp:27
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:815
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:775
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:68
armnn_driver::DequantizeStatus
DequantizeStatus
Definition: ConversionUtils.hpp:1040
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:987
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:122
armnn::PermutationVector
Definition: Types.hpp:314
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnnUtils::DataLayoutIndexed::GetWidthIndex
unsigned int GetWidthIndex() const
Definition: DataLayoutIndexed.hpp:25
armnn_driver::ConversionData::m_OutputSlotForOperand
std::vector< armnn::IOutputSlot * > m_OutputSlotForOperand
Definition: ConversionUtils.hpp:63
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:407
Permute.hpp
armnn_driver::DequantizeStatus::NOT_REQUIRED
@ NOT_REQUIRED
armnn_driver::LayerInputHandle::GetOutputSlot
armnn::IOutputSlot * GetOutputSlot() const
Definition: ConversionUtils.cpp:77
armnn_driver::ConstTensorPin::IsOptional
bool IsOptional() const
Definition: ConversionUtils.cpp:117
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:662
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1554
armnn_driver::DequantizeIfRequired
DequantizeResult DequantizeIfRequired(size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:663
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:419
armnn::IOutputSlot::GetOwningIConnectableLayer
virtual const IConnectableLayer & GetOwningIConnectableLayer() const =0
armnn_driver::ConstTensorPin::ConstTensorPin
ConstTensorPin(bool optional=false)
Definition: ConversionUtils.cpp:82
armnn_driver::LayerInputHandle::Disconnect
void Disconnect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:45
armnn::PermutationVector::GetSize
SizeType GetSize() const
Definition: Types.hpp:357
armnn_driver::IsQSymm8
bool IsQSymm8(const Operand &operand)
Definition: ConversionUtils.hpp:1035
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
getMainModel
const android::nn::Model::Subgraph & getMainModel(const android::nn::Model &model)
Definition: ConversionUtils.hpp:30
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:62
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1556
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1218
armnn_driver::IsWeightsValid
bool IsWeightsValid(const Operation &operation, uint32_t inputIndex, const Model &model, const bool isOptional=true)
Utility functions.
Definition: ConversionUtils.cpp:141
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:731
armnn::IgnoreUnused
void IgnoreUnused(Ts &&...)
Definition: IgnoreUnused.hpp:14
armnn_driver::ConstTensorPin::GetConstTensorPtr
const armnn::ConstTensor * GetConstTensorPtr() const
Definition: ConversionUtils.cpp:127
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn::BackendId
Definition: BackendId.hpp:75
armnn_driver::ConvertReduce
bool ConvertReduce(const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
Definition: ConversionUtils.cpp:520
armnn::ActivationFunction::ReLu
@ ReLu
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
Exceptions.hpp
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::ReduceOperation
ReduceOperation
Definition: Types.hpp:157
armnn_driver::UnsupportedOperand::m_type
OperandType m_type
Definition: CanonicalUtils.hpp:35
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:57
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:423
armnn::PoolingAlgorithm
PoolingAlgorithm
Definition: Types.hpp:150
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:505
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:92
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:112
armnn_driver::DequantizeAndMakeConstTensorPin
ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
Definition: ConversionUtils.cpp:752
armnn::TensorInfo::SetConstant
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:514
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::Dequantize
@ Dequantize
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:856
armnn_driver::GetOperandValueReadOnlyAddress
const void * GetOperandValueReadOnlyAddress(const Operand &operand, const Model &model, const ConversionData &data, bool optional)
Definition: ConversionUtils.cpp:798
armnn::NullPointerException
Definition: Exceptions.hpp:146
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:61
armnn_driver::IsConnectedToDequantize
bool IsConnectedToDequantize(armnn::IOutputSlot *ioutputSlot)
Definition: ConversionUtils.cpp:1064
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:405
armnn::LayerType::Constant
@ Constant
armnn::DataLayout::NCHW
@ NCHW
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:28
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0