ArmNN
 23.11
ConversionUtils.hpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "CanonicalUtils.hpp"
9 
10 #include <armnn/ArmNN.hpp>
11 #include <armnn/BackendHelper.hpp>
12 #include <armnn/Exceptions.hpp>
13 #include <armnn/utility/Assert.hpp>
16 
18 #include <armnnUtils/Transpose.hpp>
19 
20 #include <ActivationFunctor.h>
21 #include <CpuExecutor.h>
22 #include <OperationsUtils.h>
23 
25 
26 #include <log/log.h>
27 #include <sstream>
28 #include <vector>
29 
30 inline const android::nn::Model::Subgraph& getMainModel(const android::nn::Model& model) { return model.main; }
31 
32 namespace armnn_driver
33 {
34 
35 ///
36 /// Helper classes
37 ///
38 
39 #include <nnapi/OperandTypes.h>
40 #include <nnapi/Result.h>
41 #include <nnapi/TypeUtils.h>
42 #include <nnapi/Types.h>
43 #include <nnapi/Validation.h>
44 
47 using OperandLifeTime = ::android::nn::Operand::LifeTime;
52 
54 {
55  ConversionData(const std::vector<armnn::BackendId>& backends)
56  : m_Backends(backends)
57  , m_Network(nullptr, nullptr)
59  {}
60 
61  const std::vector<armnn::BackendId> m_Backends;
63  std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
64  std::vector<::android::nn::RunTimePoolInfo> m_MemPools;
66 };
67 
69 {
70 public:
72  LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
73 
74  bool IsValid() const;
75 
76  void Connect(armnn::IInputSlot& inputSlot);
77 
78  void Disconnect(armnn::IInputSlot& inputSlot);
79 
80  const armnn::TensorInfo& GetTensorInfo() const;
81 
83 
85 
86 private:
87  armnn::IOutputSlot* m_OutputSlot;
88  bool m_Valid;
89  armnn::TensorInfo m_TensorInfo;
90 };
91 
93 {
94 public:
95  // Creates an invalid tensor pin (can be used to signal errors)
96  // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
97  ConstTensorPin(bool optional = false);
98 
99  // @param tensorInfo TensorInfo associated with the tensor.
100  // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
101  // the model being converted.
102  // @param numBytes Number of bytes for the tensor data.
103  ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
104  const armnn::PermutationVector& mappings);
105 
106  ConstTensorPin(const ConstTensorPin& other) = delete;
107  ConstTensorPin(ConstTensorPin&& other) = default;
108 
109  bool IsValid() const;
110  bool IsOptional() const;
111 
112  const armnn::ConstTensor& GetConstTensor() const;
113  const armnn::ConstTensor* GetConstTensorPtr() const;
114 
115 private:
116  armnn::ConstTensor m_ConstTensor;
117 
118  // Owned memory for swizzled tensor data, only required if the tensor needed
119  // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
120  // the pools associated with the model being converted.
121  std::vector<uint8_t> m_SwizzledTensorData;
122 
123  // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
124  bool m_Optional;
125 };
126 
128 {
129  Success,
132 };
133 
134 } // namespace armnn_driver
135 
136 ///
137 /// Utility functions
138 ///
139 
140 namespace
141 {
142 using namespace armnn_driver;
143 
144 // Convenience function to log the reason for failing to convert a model.
145 // @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
146 template<class... Args>
147 static bool Fail(const char* formatStr, Args&&... args)
148 {
149  ALOGD(formatStr, std::forward<Args>(args)...);
150  return false;
151 }
152 
153 // Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
154 // Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
155 #define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...) \
156 try \
157 { \
158  for (auto&& backendId : backends) \
159  { \
160  auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
161  if (layerSupportObject.IsBackendRegistered()) \
162  { \
163  std::string reasonIfUnsupported; \
164  supported = \
165  layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
166  if (supported) \
167  { \
168  setBackend = backendId; \
169  break; \
170  } \
171  else \
172  { \
173  if (reasonIfUnsupported.size() > 0) \
174  { \
175  VLOG(DRIVER) << funcName << ": not supported by armnn: " << reasonIfUnsupported.c_str(); \
176  } \
177  else \
178  { \
179  VLOG(DRIVER) << funcName << ": not supported by armnn"; \
180  } \
181  } \
182  } \
183  else \
184  { \
185  VLOG(DRIVER) << funcName << ": backend not registered: " << backendId.Get().c_str(); \
186  } \
187  } \
188  if (!supported) \
189  { \
190  VLOG(DRIVER) << funcName << ": not supported by any specified backend"; \
191  } \
192 } \
193 catch (const armnn::InvalidArgumentException &e) \
194 { \
195  throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
196 }
197 
198 inline armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
199 {
200  return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
201 }
202 
203 // Support within the 1.3 driver for specific tensor data types
204 inline bool IsOperandTypeSupportedForTensors(OperandType type)
205 {
206  return type == OperandType::BOOL ||
207  type == OperandType::TENSOR_BOOL8 ||
208  type == OperandType::TENSOR_FLOAT16 ||
209  type == OperandType::TENSOR_FLOAT32 ||
210  type == OperandType::TENSOR_QUANT8_ASYMM ||
211  type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED ||
212  type == OperandType::TENSOR_QUANT8_SYMM ||
213  type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
214  type == OperandType::TENSOR_QUANT16_SYMM ||
215  type == OperandType::TENSOR_INT32;
216 }
217 
218 inline bool IsBool(Operand operand)
219 {
220  return operand.type == OperandType::BOOL;
221 }
222 
223 inline bool Is12OrLaterOperand(Operand)
224 {
225  return true;
226 }
227 
228 
229 template<typename LayerHandleType>
230 armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
231  LayerHandleType& inputLayer,
232  armnn::TensorInfo reshapeInfo)
233 {
234  armnn::ReshapeDescriptor reshapeDescriptor;
235  reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
236 
237  armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
238  if (reshapeLayer == nullptr)
239  {
240  throw armnn::Exception("failed to add reshape layer to network");
241  }
242 
243  // Attach the input layer to the reshape layer
244  inputLayer.Connect(reshapeLayer->GetInputSlot(0));
245  reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
246 
247  return *reshapeLayer;
248 }
249 
250 
251  armnn::TensorShape FlattenFullyConnectedInput(const armnn::TensorShape& inputShape,
252  const armnn::TensorShape& weightsShape)
253 {
254  if (inputShape.GetNumDimensions() > 2U)
255  {
256  unsigned int totalInputElements = inputShape.GetNumElements();
257  unsigned int inputSize = weightsShape[1];
258 
259  unsigned int batchSize = totalInputElements / inputSize;
260 
261  if(totalInputElements % batchSize != 0)
262  {
263  throw std::runtime_error("Failed to deduce tensor shape");
264  }
265 
266  return armnn::TensorShape({batchSize, inputSize});
267  }
268  else
269  {
270  return inputShape;
271  }
272 }
273 
274 inline bool VerifyFullyConnectedShapes(const armnn::TensorShape& inputShape,
275  const armnn::TensorShape& weightsShape,
276  const armnn::TensorShape& outputShape,
277  bool transposeWeightMatrix)
278 {
279  unsigned int dimIdx = transposeWeightMatrix ? 0 : 1;
280  return (inputShape[0] == outputShape[0] && weightsShape[dimIdx] == outputShape[1]);
281 }
282 
283 bool BroadcastTensor(LayerInputHandle& input0,
284  LayerInputHandle& input1,
285  armnn::IConnectableLayer* startLayer,
286  ConversionData& data)
287 {
288  if (startLayer == nullptr)
289  {
290  throw armnn::InvalidArgumentException("BroadcastTensor: startLayer pointer handed in is null");
291  }
292 
293  const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
294  const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
295 
296  unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
297  unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
298 
299  if (inputDimensions0 == inputDimensions1)
300  {
301  // The inputs have the same number of dimensions, simply connect them to the given layer as they are
302  input0.Connect(startLayer->GetInputSlot(0));
303  input1.Connect(startLayer->GetInputSlot(1));
304 
305  return true;
306  }
307 
308  // Since the number of dimensions do not match then we need to add degenerate dimensions
309  // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
310 
311  unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
312  unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
313  armnn::numeric_cast<int>(inputDimensions1));
314 
315  bool input0IsSmaller = inputDimensions0 < inputDimensions1;
316  LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
317  const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
318 
319  const armnn::TensorShape& smallShape = smallInfo.GetShape();
320  std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
321  for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
322  {
323  reshapedDimensions[i] = smallShape[i - sizeDifference];
324  }
325 
326  armnn::TensorInfo reshapedInfo = smallInfo;
327  reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
328  reshapedDimensions.data() });
329 
330  // RehsapeDescriptor that is ignored in the IsReshapeSupported function
331  armnn::ReshapeDescriptor reshapeDescriptor;
332 
333  bool isSupported = false;
334  armnn::BackendId setBackend;
336  IsReshapeSupported,
337  data.m_Backends,
338  isSupported,
339  setBackend,
340  smallInfo,
341  reshapedInfo,
342  reshapeDescriptor);
343  if (!isSupported)
344  {
345  return false;
346  }
347 
348  if (data.m_Network == nullptr)
349  {
351  "BroadcastTensor: the conversion data handed in has a null network pointer");
352  }
353  armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
354  reshapeLayer.SetBackendId(setBackend);
355 
356  if (input0IsSmaller)
357  {
358  // Input0 is the "smaller" tensor, connect the reshape layer as follows:
359  //
360  // Input0 Input1
361  // | |
362  // Reshape |
363  // \ /
364  // StartLayer
365 
366  reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
367  input1.Connect(startLayer->GetInputSlot(1));
368  }
369  else
370  {
371  // Input1 is the "smaller" tensor, connect the reshape layer as follows:
372  //
373  // Input0 Input1
374  // | |
375  // | Reshape
376  // \ /
377  // StartLayer
378 
379  input0.Connect(startLayer->GetInputSlot(0));
380  reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
381  }
382 
383  return true;
384 }
385 
386 void CalcPadding(uint32_t input,
387  uint32_t kernel,
388  uint32_t stride,
389  uint32_t& outPadHead,
390  uint32_t& outPadTail,
391  PaddingScheme scheme)
392 {
393  int32_t padHead;
394  int32_t padTail;
395  calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
396  outPadHead = armnn::numeric_cast<uint32_t>(padHead);
397  outPadTail = armnn::numeric_cast<uint32_t>(padTail);
398 }
399 
400 void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
401  uint32_t& outPadTail, ::android::nn::PaddingScheme scheme)
402 {
403  int32_t padHead;
404  int32_t padTail;
405  calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
406  outPadHead = armnn::numeric_cast<uint32_t>(padHead);
407  outPadTail = armnn::numeric_cast<uint32_t>(padTail);
408 }
409 
410 inline void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
411  int32_t& outPadTail, ::android::nn::PaddingScheme scheme)
412 {
413  calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
414 }
415 
416 Shape GetOperandShape(const Operand& operand)
417 {
418  Shape shape;
419  shape.type = OperandType(operand.type);
420  shape.dimensions = operand.dimensions;
421  shape.scale = operand.scale;
422  shape.offset = operand.zeroPoint;
423  return shape;
424 }
425 
426 
427 // ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
428 // what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
429 // we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
430 // user (us, in this case) to ensure they match.
431 void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
432  const armnn::TensorInfo& weightInfo,
433  const armnn::TensorInfo& inputInfo)
434 {
435  if (weightInfo.HasPerAxisQuantization())
436  {
437  // NOTE: Bias scale is always set to 0 for per-axis quantization and
438  // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
439  auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
440  {
441  return biasScale * inputInfo.GetQuantizationScale();
442  };
443 
444  std::vector<float> biasScales(weightInfo.GetQuantizationScales());
445  std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
446 
447  biasInfo.SetQuantizationScales(biasScales);
448  // bias is expected to be a 1d tensor, set qdim=0
449  biasInfo.SetQuantizationDim(0);
450 
451  VLOG(DRIVER) << "Bias quantization params have been updated for per-axis quantization";
452  }
453  else
454  {
455  const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
456  if (biasInfo.GetQuantizationScale() != expectedBiasScale)
457  {
458  if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
459  {
460  VLOG(DRIVER) << "Bias quantization scale has been modified to match input * weights";
461  biasInfo.SetQuantizationScale(expectedBiasScale);
462  }
463  }
464  }
465 }
466 
467 // 4D Tensor Permutations
468 const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
469 const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
470 const armnn::PermutationVector SwapDim2And3({ 0U, 1U, 3U, 2U });
471 
472 // 3D Permutation Vectors
473 const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
474 const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
475 
476 template<typename OSlot>
477 armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
478  const armnn::PermutationVector& mappings)
479 {
480  // Add swizzle layer
481  armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
482 
483  if (layer == nullptr)
484  {
485  throw armnn::Exception("failed to add transpose layer to network");
486  }
487 
488  // Connect input to swizzle layer
489  input.Connect(layer->GetInputSlot(0));
490 
491  // Setup swizzled output
492  const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
493  layer->GetOutputSlot(0).SetTensorInfo(outInfo);
494 
495  return *layer;
496 }
497 
498 bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
499  const armnn::TensorShape & outputShape,
500  uint32_t concatDim)
501 {
502  // Validate the output shape is correct given the input shapes (which have just been validated)
503  unsigned int numDimensions = inputShapes[0].GetNumDimensions();
504  if (outputShape.GetNumDimensions() != numDimensions)
505  {
506  return Fail("%s: Output shape has wrong number of dimensions", __func__);
507  }
508 
509  unsigned int outputSizeAlongConcatenatedDimension = 0;
510  for (unsigned int i = 0; i < inputShapes.size(); i++)
511  {
512  outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
513  }
514 
515  for (unsigned int i = 0; i < numDimensions; ++i)
516  {
517  if (i == concatDim)
518  {
519  if (outputShape[i] != outputSizeAlongConcatenatedDimension)
520  {
521  return Fail(
522  "%s: Invalid output shape for dimension %d (%d != %d)",
523  __func__,
524  i,
525  outputShape[i],
526  outputSizeAlongConcatenatedDimension);
527  }
528  }
529  else
530  {
531  if (outputShape[i] != inputShapes[0][i])
532  {
533  return Fail("%s: Invalid output shape", __func__);
534  }
535  }
536  }
537 
538  return true;
539 }
540 
541 inline bool RequiresReshape(armnn::TensorShape & inputShape)
542 {
543  return inputShape.GetNumDimensions() < 3;
544 }
545 
546 inline void SwizzleInputs(armnn::INetwork& network,
547  std::vector<LayerInputHandle>& inputs,
548  std::vector<armnn::TensorShape>& inputShapes,
549  const armnn::PermutationVector& mapping,
550  std::vector<armnn::BackendId>& setBackends)
551 {
552  if (!mapping.IsEqual(IdentityPermutation4D))
553  {
554  size_t nInputs = inputs.size();
555  for (size_t i=0; i<nInputs; ++i)
556  {
557  // add swizzle layer
558  armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
559  swizzleLayer.SetBackendId(setBackends[i]);
560  auto& outputSlot = swizzleLayer.GetOutputSlot(0);
561  auto& outputInfo = outputSlot.GetTensorInfo();
562  // replace inputs with the swizzled ones
563  inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
564  inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
565  }
566  }
567 }
568 
569 bool TransposeInputTensors(ConversionData& data,
570  std::vector<LayerInputHandle>& inputs,
571  std::vector<armnn::TensorShape>& inputShapes,
572  const armnn::PermutationVector& mapping)
573 {
574  // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
575  if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
576  {
577  std::vector<armnn::BackendId> setBackendsVec;
578  armnn::TensorInfo outputTransposeInfo;
579  size_t nInputs = inputs.size();
580  for (size_t i=0; i<nInputs; ++i)
581  {
582  // check permute layer
583  armnn::TransposeDescriptor transposeDesc;
584  transposeDesc.m_DimMappings = mapping;
585  outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
586 
587  bool isSupported = false;
588  armnn::BackendId setBackend;
590  IsTransposeSupported,
591  data.m_Backends,
592  isSupported,
593  setBackend,
594  inputs[i].GetTensorInfo(),
595  outputTransposeInfo,
596  transposeDesc);
597  setBackendsVec.push_back(setBackend);
598  if (!isSupported)
599  {
600  return false;
601  }
602 
603  }
604  SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping, setBackendsVec);
605  }
606  return true;
607 }
608 
609 bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
610  int32_t & concatDimension,
611  std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
612 {
613  bool needPermute = false;
614  if (numberOfDimensions < 3)
615  {
617  "CreateConcatPermutationParameters: numberOfDimensions handed in cannot be less than three");
618  }
619 
620  // ArmNN uses Compute Library subtensors to perform concatenation
621  // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
622  // or along dimension 0 or 2 for a 3-D tensor.
623  if (numberOfDimensions == 4 && concatDimension == 2)
624  {
625  concatDimension = 3;
626  permutationPair = std::make_pair(SwapDim2And3, SwapDim2And3);
627  needPermute = true;
628  }
629  else if (numberOfDimensions == 3 && concatDimension == 1)
630  {
631  concatDimension = 0;
632  permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
633  needPermute = true;
634  }
635  // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
636  // permutation identity to only have 3 dimensions
637  else if (numberOfDimensions == 3 && concatDimension == 2)
638  {
639  permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
640  }
641  return needPermute;
642 }
643 
644 } // anonymous namespace
645 
646 namespace armnn_driver
647 {
648 using namespace android::nn;
649 
650 //// Creates an ArmNN activation layer and connects it to the given layer, if the
651 //// passed in AndroidNN activation function requires so.
652 //// @return The end layer of the sequence of layers built for the given AndroidNN
653 //// activation function or nullptr if an error occurred (e.g. unsupported activation).
654 //// Note that the end layer matches the input layer if no activation is required
655 //// (the sequence of layers has length 1).
657  ActivationFn activation,
658  armnn::IConnectableLayer* prevLayer,
659  ConversionData& data);
660 
661 
662 inline const Operand* GetInputOperand(const Operation& operation,
663  uint32_t inputIndex,
664  const Model& model,
665  bool failOnIndexOutOfBounds = true)
666 {
667  if (inputIndex >= operation.inputs.size())
668  {
669  if (failOnIndexOutOfBounds)
670  {
671  Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
672  }
673  return nullptr;
674  }
675 
676  // Model should have been validated beforehand
677  if (!(operation.inputs[inputIndex] < getMainModel(model).operands.size()))
678  {
679  std::ostringstream os;
680  os << "GetInputOperand: inputIndex [" << inputIndex << "]";
681  os << " is too large. The number of main model operands is [";
682  os << getMainModel(model).operands.size() << "]";
683  throw armnn::InvalidArgumentException(os.str());
684  }
685  return &getMainModel(model).operands[operation.inputs[inputIndex]];
686 }
687 
688 inline const Operand* GetOutputOperand(const Operation& operation,
689  uint32_t outputIndex,
690  const Model& model)
691 {
692  if (outputIndex >= operation.outputs.size())
693  {
694  Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
695  return nullptr;
696  }
697 
698  // Model should have been validated beforehand
699  if (!(operation.outputs[outputIndex] < getMainModel(model).operands.size()))
700  {
701  std::ostringstream os;
702  os << "GetOutputOperand: outputIndex [" << outputIndex << "]";
703  os << " is too large. The number of main model operands is [";
704  os << getMainModel(model).operands.size() << "]";
705  throw armnn::InvalidArgumentException(os.str());
706  }
707 
708  return &getMainModel(model).operands[operation.outputs[outputIndex]];
709 }
710 
711 const void* GetOperandValueReadOnlyAddress(const Operand& operand,
712  const Model& model,
713  const ConversionData& data,
714  bool optional = false);
715 
716 inline bool GetOperandType(const Operation& operation,
717  uint32_t inputIndex,
718  const Model& model,
719  OperandType& type)
720 {
721  const Operand* operand = GetInputOperand(operation, inputIndex, model);
722  if (!operand)
723  {
724  return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
725  }
726 
727  type = operand->type;
728  return true;
729 }
730 
731 inline bool IsOperandConstant(const Operand& operand)
732 {
733  OperandLifeTime lifetime = operand.lifetime;
734 
735  return lifetime == OperandLifeTime::CONSTANT_COPY ||
736  lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
737  lifetime == OperandLifeTime::POINTER ||
738  lifetime == OperandLifeTime::NO_VALUE;
739 }
740 
741 bool IsWeightsValid(const Operation& operation, uint32_t inputIndex, const Model& model, const bool IsOptional);
742 
744  const Model& model,
745  const ConversionData& data,
746  const armnn::PermutationVector& dimensionMappings = g_DontPermute,
747  const armnn::TensorShape* overrideTensorShape = nullptr,
748  bool optional = false,
749  const armnn::DataType* overrideDataType = nullptr);
750 
752  const Operation& operation,
753  uint32_t inputIndex,
754  const Model& model,
755  const ConversionData& data,
756  const armnn::PermutationVector& dimensionMappings = g_DontPermute,
757  const armnn::TensorShape* overrideTensorShape = nullptr,
758  bool optional = false)
759 {
760  const Operand* operand = GetInputOperand(operation, inputIndex, model);
761  if (!operand)
762  {
763  Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
764  return ConstTensorPin();
765  }
766  return ConvertOperandToConstTensorPin(*operand,
767  model,
768  data,
769  dimensionMappings,
770  overrideTensorShape,
771  optional);
772 }
773 
774 template <typename OutputType>
775 bool GetInputScalar(const Operation& operation,
776  uint32_t inputIndex,
777  OperandType type,
778  OutputType& outValue,
779  const Model& model,
780  const ConversionData& data,
781  bool optional = false)
782 {
783  const Operand* operand = GetInputOperand(operation, inputIndex, model);
784  if (!optional && !operand)
785  {
786  return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
787  }
788 
789  if (!optional && operand->type != type)
790  {
791  VLOG(DRIVER) << __func__ << ": unexpected operand type: " << operand->type << " should be: " << type;
792  return false;
793  }
794 
795  if (!optional && operand->location.length != sizeof(OutputType))
796  {
797  return Fail("%s: incorrect operand location length: %i (should be %i)",
798  __func__, operand->location.length, sizeof(OutputType));
799  }
800 
801  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
802  if (!optional && !valueAddress)
803  {
804  return Fail("%s: failed to get address for operand", __func__);
805  }
806 
807  if(!optional)
808  {
809  outValue = *(static_cast<const OutputType*>(valueAddress));
810  }
811 
812  return true;
813 }
814 
815 inline bool GetInputInt32(const Operation& operation,
816  uint32_t inputIndex,
817  int32_t& outValue,
818  const Model& model,
819  const ConversionData& data)
820 {
821  return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
822 }
823 
824 inline bool GetInputFloat32(const Operation& operation,
825  uint32_t inputIndex,
826  float& outValue,
827  const Model& model,
828  const ConversionData& data)
829 {
830  return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
831 }
832 
833 inline bool GetInputActivationFunctionImpl(const Operation& operation,
834  uint32_t inputIndex,
835  OperandType type,
836  ActivationFn& outActivationFunction,
837  const Model& model,
838  const ConversionData& data)
839 {
840  if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
841  {
842  VLOG(DRIVER) << __func__ << ": unexpected operand type: " << type
843  << " should be OperandType::INT32 or OperandType::TENSOR_INT32";
844  return false;
845  }
846 
847  int32_t activationFunctionAsInt;
848  if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
849  {
850  return Fail("%s: failed to get activation input value", __func__);
851  }
852  outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
853  return true;
854 }
855 
856 inline bool GetInputActivationFunction(const Operation& operation,
857  uint32_t inputIndex,
858  ActivationFn& outActivationFunction,
859  const Model& model,
860  const ConversionData& data)
861 {
862  return GetInputActivationFunctionImpl(operation,
863  inputIndex,
864  OperandType::INT32,
865  outActivationFunction,
866  model,
867  data);
868 }
869 
870 inline bool GetInputActivationFunctionFromTensor(const Operation& operation,
871  uint32_t inputIndex,
872  ActivationFn& outActivationFunction,
873  const Model& model,
874  const ConversionData& data)
875 {
876  // This only accepts a 1-D tensor of size 1
877  return GetInputActivationFunctionImpl(operation,
878  inputIndex,
879  OperandType::INT32,
880  outActivationFunction,
881  model,
882  data);
883 }
884 
885 
886 inline bool GetOptionalInputActivation(const Operation& operation,
887  uint32_t inputIndex,
888  ActivationFn& activationFunction,
889  const Model& model,
890  const ConversionData& data)
891 {
892  if (operation.inputs.size() <= inputIndex)
893  {
894  activationFunction = ActivationFn::kActivationNone;
895  }
896  else
897  {
898  if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
899  {
900  return Fail("%s: Operation has invalid inputs", __func__);
901  }
902  }
903  return true;
904 }
905 
906 template<typename ConvolutionDescriptor>
908  uint32_t dilationXIndex,
909  ConvolutionDescriptor& descriptor,
910  const Model& model,
911  const ConversionData& data)
912 {
913  bool success = true;
914  if (operation.inputs.size() >= dilationXIndex + 2)
915  {
916  success &= GetInputScalar(operation,
917  dilationXIndex,
918  OperandType::INT32,
919  descriptor.m_DilationX,
920  model,
921  data);
922  success &= GetInputScalar(operation,
923  dilationXIndex + 1,
924  OperandType::INT32,
925  descriptor.m_DilationY,
926  model,
927  data);
928  }
929 
930  return success;
931 }
932 
933 inline bool GetOptionalBool(const Operation& operation,
934  uint32_t inputIndex,
935  const Model& model,
936  const ConversionData& data)
937 {
938  const Operand* operand = GetInputOperand(operation, inputIndex, model);
939  if (!operand)
940  {
941  return false;
942  }
943 
944  if (!IsBool(*operand))
945  {
946  return false;
947  }
948 
949  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
950  if (!valueAddress)
951  {
952  return false;
953  }
954 
955  return *(static_cast<const bool*>(valueAddress));
956 }
957 
958 bool GetTensorInt32Values(const Operand& operand,
959  std::vector<int32_t>& outValues,
960  const Model& model,
961  const ConversionData& data);
962 
963 bool GetInputPaddingScheme(const Operation& operation,
964  uint32_t inputIndex,
965  PaddingScheme& outPaddingScheme,
966  const Model& model,
967  const ConversionData& data);
968 
970  uint32_t inputIndex,
971  const Model& model,
972  ConversionData& data,
973  const armnn::PermutationVector& dimensionMappings = g_DontPermute,
974  const LayerInputHandle* inputHandle = nullptr);
975 
976 bool SetupAndTrackLayerOutputSlot(const Operation& operation,
977  uint32_t operationOutputIndex,
979  uint32_t layerOutputIndex,
980  const Model& model,
981  ConversionData& data,
982  const armnn::TensorInfo* overrideOutputInfo = nullptr,
983  const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
984  const ActivationFn& activationFunction = ActivationFn::kActivationNone,
985  bool inferOutputShapes = false);
986 
988  uint32_t inputIndex,
989  const Model& model,
990  ConversionData& data);
991 
993  const Operation& operation,
994  uint32_t outputIndex,
996  const Model& model,
997  ConversionData& data,
998  const armnn::TensorInfo* overrideOutputInfo = nullptr,
999  const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
1000  const ActivationFn& activationFunction = ActivationFn::kActivationNone)
1001 {
1002  return SetupAndTrackLayerOutputSlot(operation,
1003  outputIndex,
1004  layer,
1005  outputIndex,
1006  model,
1007  data,
1008  overrideOutputInfo,
1009  validateFunc,
1010  activationFunction);
1011 }
1012 
1013 bool ConvertToActivation(const Operation& operation,
1014  const char* operationName,
1015  const armnn::ActivationDescriptor& activationDesc,
1016  const Model& model,
1017  ConversionData& data);
1018 
1019 bool ConvertPaddings(const Operation& operation,
1020  const Model& model,
1021  ConversionData& data,
1022  unsigned int rank,
1023  armnn::PadDescriptor& padDescriptor);
1024 bool ConvertReduce(const Operation& operation,
1025  const Model& model,
1026  ConversionData& data,
1027  armnn::ReduceOperation reduceOperation);
1028 
1029 bool ConvertPooling2d(const Operation& operation,
1030  const char* operationName,
1031  armnn::PoolingAlgorithm poolType,
1032  const Model& model,
1033  ConversionData& data);
1034 
1035 inline bool IsQSymm8(const Operand& operand)
1036 {
1037  return operand.type == OperandType::TENSOR_QUANT8_SYMM;
1038 }
1039 
1041 {
1042  SUCCESS,
1043  NOT_REQUIRED,
1045 };
1046 
1047 using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
1048 
1049 DequantizeResult DequantizeIfRequired(size_t operand_index,
1050  const Operation& operation,
1051  const Model& model,
1052  const ConversionData& data);
1053 
1055  const Model& model,
1056  const ConversionData& data,
1057  size_t operandIndex,
1058  bool optional = false);
1059 
1060 bool IsConnectedToDequantize(armnn::IOutputSlot* ioutputSlot);
1061 
1062 } // namespace armnn_driver
armnn::INetwork::AddReshapeLayer
IConnectableLayer * AddReshapeLayer(const ReshapeDescriptor &reshapeDescriptor, const char *name=nullptr)
Adds a reshape layer to the network.
Definition: Network.cpp:474
BackendHelper.hpp
armnn_driver::ProcessActivation
armnn::IConnectableLayer * ProcessActivation(const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
Definition: ConversionUtils.cpp:906
armnn::INetworkPtr
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:339
armnn::PermutationVector::IsEqual
bool IsEqual(const PermutationVector &other) const
Definition: Types.hpp:366
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn_driver::GetOptionalInputActivation
bool GetOptionalInputActivation(const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:886
armnn::TensorInfo::SetQuantizationDim
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:499
armnn_driver::DequantizeResult
std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus > DequantizeResult
Definition: ConversionUtils.hpp:1047
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:843
armnn_driver::LayerInputHandle::LayerInputHandle
LayerInputHandle()
Definition: ConversionUtils.cpp:17
armnn_driver::ConversionData::m_MemPools
std::vector<::android::nn::RunTimePoolInfo > m_MemPools
Definition: ConversionUtils.hpp:64
armnn::DataLayout
DataLayout
Definition: Types.hpp:62
armnn::TensorInfo::GetQuantizationScales
std::vector< float > GetQuantizationScales() const
Definition: Tensor.cpp:451
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:46
armnn_driver::ConvertPooling2d
bool ConvertPooling2d(const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:392
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:33
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:751
armnn_driver::ConvertOperandToConstTensorPin
ConstTensorPin ConvertOperandToConstTensorPin(const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
Definition: ConversionUtils.cpp:166
armnn::TensorInfo::GetQuantizationScale
float GetQuantizationScale() const
Definition: Tensor.cpp:461
FloatingPointComparison.hpp
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:216
armnn_driver::OperandLifeTime
::android::nn::Operand::LifeTime OperandLifeTime
Definition: ConversionUtils.hpp:47
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnn_driver::ConversionData
Definition: ConversionUtils.hpp:53
armnn_driver::GetOptionalBool
bool GetOptionalBool(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:933
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:155
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:688
armnn_driver::Operation
::android::nn::Operation Operation
Definition: ConversionUtils.hpp:49
armnn_driver::ConvertToActivation
bool ConvertToActivation(const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:604
armnn_driver::LayerInputHandle::SanitizeQuantizationScale
void SanitizeQuantizationScale(LayerInputHandle &weight, LayerInputHandle &input)
Definition: ConversionUtils.cpp:62
armnn_driver::ConversionData::m_DynamicInputsEncountered
bool m_DynamicInputsEncountered
Definition: ConversionUtils.hpp:65
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
IgnoreUnused.hpp
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:874
armnn_driver::DequantizeStatus::SUCCESS
@ SUCCESS
armnnUtils::TransposeTensorShape
armnn::TensorShape TransposeTensorShape(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Transpose.cpp:125
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::TensorInfo::HasPerAxisQuantization
bool HasPerAxisQuantization() const
Definition: Tensor.cpp:446
armnn_driver::ConversionResult::UnsupportedFeature
@ UnsupportedFeature
NumericCast.hpp
armnn_driver::OperandType
::android::nn::OperandType OperandType
Definition: ConversionUtils.hpp:48
armnn_driver::ConversionData::ConversionData
ConversionData(const std::vector< armnn::BackendId > &backends)
Definition: ConversionUtils.hpp:55
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:782
armnn::INetwork::AddTransposeLayer
IConnectableLayer * AddTransposeLayer(const TransposeDescriptor &transposeDescriptor, const char *name=nullptr)
Adds a transpose layer to the network.
Definition: Network.cpp:587
Assert.hpp
armnn_driver::ConversionResult
ConversionResult
Definition: ConversionUtils.hpp:127
CanonicalUtils.hpp
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::TensorInfo::SetQuantizationScale
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
armnn::TensorInfo::SetQuantizationScales
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:456
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn_driver::ConvertPaddings
bool ConvertPaddings(const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
Definition: ConversionUtils.cpp:350
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn_driver::ErrorStatus
::android::nn::ErrorStatus ErrorStatus
Definition: ConversionUtils.hpp:51
armnn_driver::DequantizeStatus::INVALID_OPERAND
@ INVALID_OPERAND
armnn_driver
Helper classes.
Definition: ArmnnDevice.cpp:37
armnn::PadDescriptor
A PadDescriptor for the PadLayer.
Definition: Descriptors.hpp:1196
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn::TransposeDescriptor
A TransposeDescriptor for the TransposeLayer.
Definition: Descriptors.hpp:1490
armnn_driver::Model
::android::nn::Model Model
Helper classes.
Definition: ConversionUtils.hpp:45
armnn::DataType
DataType
Definition: Types.hpp:48
armnn_driver::GetOperandType
bool GetOperandType(const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
Definition: ConversionUtils.hpp:716
armnn::ReshapeDescriptor
A ReshapeDescriptor for the ReshapeLayer.
Definition: Descriptors.hpp:1023
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:815
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn_driver::GetOptionalConvolutionDilationParams
bool GetOptionalConvolutionDilationParams(const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:907
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:775
armnnUtils::within_percentage_tolerance
bool within_percentage_tolerance(float a, float b, float tolerancePercent=1.0f)
Compare two floats and return true if their values are within a specified tolerance of each other.
Definition: FloatingPointComparison.hpp:20
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:68
armnn_driver::DequantizeStatus
DequantizeStatus
Definition: ConversionUtils.hpp:1040
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:987
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:122
armnn::PermutationVector
Definition: Types.hpp:314
armnn_driver::ConversionResult::ErrorMappingPools
@ ErrorMappingPools
armnn_driver::GetInputActivationFunctionImpl
bool GetInputActivationFunctionImpl(const Operation &operation, uint32_t inputIndex, OperandType type, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:833
armnn::ReshapeDescriptor::m_TargetShape
TensorShape m_TargetShape
Target shape value.
Definition: Descriptors.hpp:1039
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn_driver::ConversionData::m_OutputSlotForOperand
std::vector< armnn::IOutputSlot * > m_OutputSlotForOperand
Definition: ConversionUtils.hpp:63
armnn_driver::DequantizeStatus::NOT_REQUIRED
@ NOT_REQUIRED
armnn_driver::LayerInputHandle::GetOutputSlot
armnn::IOutputSlot * GetOutputSlot() const
Definition: ConversionUtils.cpp:77
armnn_driver::ConstTensorPin::IsOptional
bool IsOptional() const
Definition: ConversionUtils.cpp:117
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:662
armnn::LayerType::Shape
@ Shape
armnn_driver::OperationType
::android::nn::OperationType OperationType
Definition: ConversionUtils.hpp:50
armnn_driver::DequantizeIfRequired
DequantizeResult DequantizeIfRequired(size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:663
armnn_driver::ConstTensorPin::ConstTensorPin
ConstTensorPin(bool optional=false)
Definition: ConversionUtils.cpp:82
armnn_driver::LayerInputHandle::Disconnect
void Disconnect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:45
Transpose.hpp
armnn::TransposeDescriptor::m_DimMappings
PermutationVector m_DimMappings
Indicates how to translate tensor elements from a given source into the target destination,...
Definition: Descriptors.hpp:1514
armnn_driver::IsQSymm8
bool IsQSymm8(const Operand &operand)
Definition: ConversionUtils.hpp:1035
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
getMainModel
const android::nn::Model::Subgraph & getMainModel(const android::nn::Model &model)
Definition: ConversionUtils.hpp:30
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:62
armnn_driver::IsWeightsValid
bool IsWeightsValid(const Operation &operation, uint32_t inputIndex, const Model &model, const bool isOptional=true)
Utility functions.
Definition: ConversionUtils.cpp:141
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:731
armnn_driver::ConstTensorPin::GetConstTensorPtr
const armnn::ConstTensor * GetConstTensorPtr() const
Definition: ConversionUtils.cpp:127
armnn_driver::ConversionResult::Success
@ Success
android::nn
Definition: support_library_service.cpp:10
armnn_driver::GetInputActivationFunctionFromTensor
bool GetInputActivationFunctionFromTensor(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:870
armnn::BackendId
Definition: BackendId.hpp:75
armnn_driver::ConvertReduce
bool ConvertReduce(const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
Definition: ConversionUtils.cpp:520
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
Exceptions.hpp
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
ArmNN.hpp
armnn::ReduceOperation
ReduceOperation
Definition: Types.hpp:157
armnn::ConstTensor
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:57
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::PoolingAlgorithm
PoolingAlgorithm
Definition: Types.hpp:150
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:92
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:112
armnnUtils::GetTensorInfo
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout, const armnn::DataType dataType)
Definition: TensorUtils.cpp:40
armnn_driver::DequantizeAndMakeConstTensorPin
ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
Definition: ConversionUtils.cpp:752
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:856
armnn_driver::GetOperandValueReadOnlyAddress
const void * GetOperandValueReadOnlyAddress(const Operand &operand, const Model &model, const ConversionData &data, bool optional)
Definition: ConversionUtils.cpp:798
DataLayoutIndexed.hpp
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:61
armnn_driver::IsConnectedToDequantize
bool IsConnectedToDequantize(armnn::IOutputSlot *ioutputSlot)
Definition: ConversionUtils.cpp:1064
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:28
armnn_driver::GetInputFloat32
bool GetInputFloat32(const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:824
armnn::INetwork
Main network class which provides the interface for building up a neural network.
Definition: INetwork.hpp:347