ArmNN
 24.02
armnn_driver Namespace Reference

Helper classes. More...

Classes

class  ArmnnDevice
 
class  ArmnnDriver
 
class  ArmnnDriverImpl
 
class  ArmnnPreparedModel
 
struct  CanonicalExecutionContext
 
class  ConstTensorPin
 
struct  ConversionData
 
class  Converter
 
class  DriverOptions
 
class  LayerInputHandle
 
class  ModelToINetworkTransformer
 
class  UnsupportedOperand
 

Typedefs

template<typename TensorType >
using DumpElementFunction = void(*)(const TensorType &tensor, unsigned int elementIndex, std::ofstream &fileStream)
 
using Model = ::android::nn::Model
 Helper classes. More...
 
using Operand = ::android::nn::Operand
 
using OperandLifeTime = ::android::nn::Operand::LifeTime
 
using OperandType = ::android::nn::OperandType
 
using Operation = ::android::nn::Operation
 
using OperationType = ::android::nn::OperationType
 
using ErrorStatus = ::android::nn::ErrorStatus
 
using DequantizeResult = std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus >
 
using Half = half_float::half
 

Enumerations

enum  ConversionResult { Success, ErrorMappingPools, UnsupportedFeature }
 
enum  DequantizeStatus { SUCCESS, NOT_REQUIRED, INVALID_OPERAND }
 

Functions

void SwizzleAndroidNn4dTensorToArmNn (armnn::TensorInfo &tensor, const void *input, void *output, const armnn::PermutationVector &mappings)
 Swizzles tensor data in input according to the dimension mappings. More...
 
void * GetMemoryFromPool (DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools)
 Returns a pointer to a specific location in a pool`. More...
 
void * GetMemoryFromPointer (const Request::Argument &requestArg)
 
armnn::TensorInfo GetTensorInfoForOperand (const Operand &operand)
 
std::string GetOperandSummary (const Operand &operand)
 
template<typename TensorType >
void DumpTensor (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const TensorType &tensor)
 
template void DumpTensor< armnn::ConstTensor > (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const armnn::ConstTensor &tensor)
 
template void DumpTensor< armnn::Tensor > (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const armnn::Tensor &tensor)
 
void DumpJsonProfilingIfRequired (bool gpuProfilingEnabled, const std::string &dumpDir, armnn::NetworkId networkId, const armnn::IProfiler *profiler)
 
std::string ExportNetworkGraphToDotFile (const armnn::IOptimizedNetwork &optimizedNetwork, const std::string &dumpDir)
 
std::string SerializeNetwork (const armnn::INetwork &network, const std::string &dumpDir, std::vector< uint8_t > &dataCacheData, bool dataCachingActive)
 
bool IsDynamicTensor (const armnn::TensorInfo &outputInfo)
 Checks if a tensor info represents a dynamic tensor. More...
 
bool AreDynamicTensorsSupported (void)
 Checks for ArmNN support of dynamic tensors. More...
 
bool isQuantizedOperand (const OperandType &operandType)
 
std::string GetModelSummary (const Model &model)
 
std::string GetFileTimestamp ()
 
void RenameExportedFiles (const std::string &existingSerializedFileName, const std::string &existingDotFileName, const std::string &dumpDir, const armnn::NetworkId networkId)
 
void RenameFile (const std::string &existingName, const std::string &extension, const std::string &dumpDir, const armnn::NetworkId networkId)
 
void CommitPools (std::vector<::android::nn::RunTimePoolInfo > &memPools)
 
OutputShape ComputeShape (const armnn::TensorInfo &info)
 
bool IsWeightsValid (const Operation &operation, uint32_t inputIndex, const Model &model, const bool isOptional=true)
 Utility functions. More...
 
ConstTensorPin ConvertOperandToConstTensorPin (const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
 
LayerInputHandle ConvertToLayerInputHandle (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
 
bool ConvertPaddings (const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
 
bool ConvertPooling2d (const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
 
bool ConvertReduce (const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
 
bool ConvertToActivation (const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
 
DequantizeResult DequantizeIfRequired (size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
 
ConstTensorPin DequantizeAndMakeConstTensorPin (const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
 
bool GetInputPaddingScheme (const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
 
const void * GetOperandValueReadOnlyAddress (const Operand &operand, const Model &model, const ConversionData &data, bool optional)
 
bool GetTensorInt32Values (const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
 
armnn::DataLayout OptionalDataLayout (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
 
armnn::IConnectableLayerProcessActivation (const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
 
bool SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
 
bool IsConnectedToDequantize (armnn::IOutputSlot *ioutputSlot)
 
const OperandGetInputOperand (const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
 
const OperandGetOutputOperand (const Operation &operation, uint32_t outputIndex, const Model &model)
 
bool GetOperandType (const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
 
bool IsOperandConstant (const Operand &operand)
 
ConstTensorPin ConvertOperationInputToConstTensorPin (const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
 
template<typename OutputType >
bool GetInputScalar (const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
 
bool GetInputInt32 (const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
 
bool GetInputFloat32 (const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunctionImpl (const Operation &operation, uint32_t inputIndex, OperandType type, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunction (const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunctionFromTensor (const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetOptionalInputActivation (const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
 
template<typename ConvolutionDescriptor >
bool GetOptionalConvolutionDilationParams (const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
 
bool GetOptionalBool (const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
 
bool SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t outputIndex, armnn::IConnectableLayer &layer, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo=nullptr, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc=nullptr, const ActivationFn &activationFunction=ActivationFn::kActivationNone)
 
bool IsQSymm8 (const Operand &operand)
 

Variables

const armnn::PermutationVector g_DontPermute {}
 

Detailed Description

Helper classes.

Typedef Documentation

◆ DequantizeResult

using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>

Definition at line 1047 of file ConversionUtils.hpp.

◆ DumpElementFunction

using DumpElementFunction = void (*)(const TensorType& tensor, unsigned int elementIndex, std::ofstream& fileStream)

Definition at line 206 of file CanonicalUtils.cpp.

◆ ErrorStatus

using ErrorStatus = ::android::nn::ErrorStatus

Definition at line 51 of file ConversionUtils.hpp.

◆ Half

using Half = half_float::half

Definition at line 15 of file Converter.cpp.

◆ Model

using Model = ::android::nn::Model

Helper classes.

Definition at line 45 of file ConversionUtils.hpp.

◆ Operand

using Operand = ::android::nn::Operand

Definition at line 46 of file ConversionUtils.hpp.

◆ OperandLifeTime

using OperandLifeTime = ::android::nn::Operand::LifeTime

Definition at line 47 of file ConversionUtils.hpp.

◆ OperandType

using OperandType = ::android::nn::OperandType

Definition at line 48 of file ConversionUtils.hpp.

◆ Operation

using Operation = ::android::nn::Operation

Definition at line 49 of file ConversionUtils.hpp.

◆ OperationType

using OperationType = ::android::nn::OperationType

Definition at line 50 of file ConversionUtils.hpp.

Enumeration Type Documentation

◆ ConversionResult

enum ConversionResult
strong
Enumerator
Success 
ErrorMappingPools 
UnsupportedFeature 

Definition at line 127 of file ConversionUtils.hpp.

128 {
129  Success,
132 };

◆ DequantizeStatus

enum DequantizeStatus
strong
Enumerator
SUCCESS 
NOT_REQUIRED 
INVALID_OPERAND 

Definition at line 1040 of file ConversionUtils.hpp.

1041 {
1042  SUCCESS,
1043  NOT_REQUIRED,
1045 };

Function Documentation

◆ AreDynamicTensorsSupported()

bool AreDynamicTensorsSupported ( )

Checks for ArmNN support of dynamic tensors.

Definition at line 505 of file CanonicalUtils.cpp.

506 {
507  return true;
508 }

Referenced by ConvertPooling2d(), ConvertReduce(), and ConvertToActivation().

◆ CommitPools()

void CommitPools ( std::vector<::android::nn::RunTimePoolInfo > &  memPools)

Definition at line 615 of file CanonicalUtils.cpp.

616 {
617  // Commit output buffers.
618  // Note that we update *all* pools, even if they aren't actually used as outputs -
619  // this is simpler and is what the CpuExecutor does.
620  for (auto& pool : memPools)
621  {
622  // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
623  // update() has been removed and flush() added.
624  pool.flush();
625  }
626 }

Referenced by ArmnnPreparedModel::ExecuteGraph().

◆ ComputeShape()

OutputShape armnn_driver::ComputeShape ( const armnn::TensorInfo info)
inline

Definition at line 95 of file CanonicalUtils.hpp.

96 {
97  OutputShape shape;
98 
99  armnn::TensorShape tensorShape = info.GetShape();
100  // Android will expect scalars as a zero dimensional tensor
102  {
103  shape.dimensions = std::vector<uint32_t>{};
104  }
105  else
106  {
107  std::vector<uint32_t> dimensions;
108  const unsigned int numDims = tensorShape.GetNumDimensions();
109  dimensions.resize(numDims);
110  for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
111  {
112  dimensions[outputIdx] = tensorShape[outputIdx];
113  }
114  shape.dimensions = dimensions;
115  }
116 
117  shape.isSufficient = true;
118 
119  return shape;
120 }

References TensorShape::GetDimensionality(), TensorShape::GetNumDimensions(), and armnn::Scalar.

Referenced by ArmnnPreparedModel::execute().

◆ ConvertOperandToConstTensorPin()

ConstTensorPin ConvertOperandToConstTensorPin ( const Operand operand,
const Model model,
const ConversionData data,
const armnn::PermutationVector dimensionMappings,
const armnn::TensorShape overrideTensorShape,
bool  optional,
const armnn::DataType overrideDataType 
)

Definition at line 166 of file ConversionUtils.cpp.

173 {
174  if (!IsOperandTypeSupportedForTensors(operand.type))
175  {
176  VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor" << operand.type;
177  return ConstTensorPin();
178  }
179 
180  if (!optional && !IsOperandConstant(operand))
181  {
182  VLOG(DRIVER) << __func__ << ": lifetime for input tensor: r" << operand.lifetime;
183  return ConstTensorPin();
184  }
185 
186  const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data, optional);
187  if (!valueStart)
188  {
189  if (optional)
190  {
191  // optional tensor with no values is not really an error; return it as invalid, but marked as optional
192  return ConstTensorPin(true);
193  }
194  // mandatory tensor with no values
195  Fail("%s: failed to get operand address", __func__);
196  return ConstTensorPin();
197  }
198 
199  armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
200 
201  if (overrideTensorShape)
202  {
203  tensorInfo.SetShape(*overrideTensorShape);
204  }
205 
206  if (overrideDataType)
207  {
208  tensorInfo.SetDataType(*overrideDataType);
209  }
210 
211  // Make sure isConstant flag is set.
212  tensorInfo.SetConstant();
213  return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
214 }

References GetOperandValueReadOnlyAddress(), GetTensorInfoForOperand(), IsOperandConstant(), TensorInfo::SetConstant(), TensorInfo::SetDataType(), and TensorInfo::SetShape().

Referenced by ConvertOperationInputToConstTensorPin(), and ConvertToLayerInputHandle().

◆ ConvertOperationInputToConstTensorPin()

ConstTensorPin armnn_driver::ConvertOperationInputToConstTensorPin ( const Operation operation,
uint32_t  inputIndex,
const Model model,
const ConversionData data,
const armnn::PermutationVector dimensionMappings = g_DontPermute,
const armnn::TensorShape overrideTensorShape = nullptr,
bool  optional = false 
)
inline

Definition at line 751 of file ConversionUtils.hpp.

759 {
760  const Operand* operand = GetInputOperand(operation, inputIndex, model);
761  if (!operand)
762  {
763  Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
764  return ConstTensorPin();
765  }
766  return ConvertOperandToConstTensorPin(*operand,
767  model,
768  data,
769  dimensionMappings,
770  overrideTensorShape,
771  optional);
772 }

References ConvertOperandToConstTensorPin(), and GetInputOperand().

Referenced by DequantizeAndMakeConstTensorPin().

◆ ConvertPaddings()

bool ConvertPaddings ( const Operation operation,
const Model model,
ConversionData data,
unsigned int  rank,
armnn::PadDescriptor padDescriptor 
)

Definition at line 350 of file ConversionUtils.cpp.

355 {
356  const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
357  if (!paddingsOperand)
358  {
359  return Fail("%s: Could not read paddings operand", __func__);
360  }
361 
362  armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
363  if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
364  {
365  return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
366  }
367 
368  std::vector<int32_t> paddings;
369  if (!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
370  {
371  return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
372  }
373 
374  // add padding for each dimension of input tensor.
375  for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
376  {
377  int paddingBeforeInput = paddings[i];
378  int paddingAfterInput = paddings[i + 1];
379 
380  if (paddingBeforeInput < 0 || paddingAfterInput < 0)
381  {
382  return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
383  }
384 
385  padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
386  }
387 
388  return true;
389 }

References GetInputOperand(), TensorShape::GetNumDimensions(), TensorShape::GetNumElements(), GetTensorInt32Values(), and PadDescriptor::m_PadList.

◆ ConvertPooling2d()

bool ConvertPooling2d ( const Operation operation,
const char *  operationName,
armnn::PoolingAlgorithm  poolType,
const Model model,
ConversionData data 
)

Definition at line 392 of file ConversionUtils.cpp.

397 {
398 
399  VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
400 
401  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
402  if (!input.IsValid())
403  {
404  return Fail("%s: Operation Could not read input 0", operationName);
405  }
406 
407  const Operand* output = GetOutputOperand(operation, 0, model);
408  if (!output)
409  {
410  return Fail("%s: Could not read output 0", __func__);
411  }
412 
413  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
414  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
415 
417  desc.m_PoolType = poolType;
420 
421  ActivationFn activation;
422 
423  auto inputSize = operation.inputs.size();
424 
425  if (inputSize >= 10)
426  {
427  // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
428  if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data) ||
429  !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data) ||
430  !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data) ||
431  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data) ||
432  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data) ||
433  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data) ||
434  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data) ||
435  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data) ||
436  !GetInputActivationFunction(operation, 9, activation, model, data))
437  {
438  return Fail("%s: Operation has invalid inputs", operationName);
439  }
440 
441  if (Is12OrLaterOperand(*output))
442  {
443  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
444  }
445  }
446  else
447  {
448  // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
449  ::android::nn::PaddingScheme scheme;
450  if (!GetInputPaddingScheme(operation, 1, scheme, model, data) ||
451  !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data) ||
452  !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data) ||
453  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data) ||
454  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data) ||
455  !GetInputActivationFunction(operation, 6, activation, model, data))
456  {
457  return Fail("%s: Operation has invalid inputs", operationName);
458  }
459 
460  if (Is12OrLaterOperand(*output))
461  {
462  desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
463  }
464 
465  const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
466  const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
467  const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
468 
469  CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
470  CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
471  }
472 
473  bool isSupported = false;
474  armnn::BackendId setBackend;
475  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
476  {
478  IsPooling2dSupported,
479  data.m_Backends,
480  isSupported,
481  setBackend,
482  inputInfo,
483  outputInfo,
484  desc);
485 
486  };
487 
488  if(IsDynamicTensor(outputInfo))
489  {
490  isSupported = AreDynamicTensorsSupported();
491  }
492  else
493  {
494  validateFunc(outputInfo, isSupported);
495  }
496 
497  if (!isSupported)
498  {
499  return false;
500  }
501 
502  armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
503  pooling2dLayer->SetBackendId(setBackend);
504  if (!pooling2dLayer)
505  {
506  return Fail("%s: AddPooling2dLayer failed", __func__);
507  }
508 
509  input.Connect(pooling2dLayer->GetInputSlot(0));
510 
511  if (!isSupported)
512  {
513  return false;
514  }
515 
516  return SetupAndTrackLayerOutputSlot(operation, 0, *pooling2dLayer, model,
517  data, nullptr, validateFunc, activation);
518 }

References AreDynamicTensorsSupported(), LayerInputHandle::Connect(), ConvertToLayerInputHandle(), armnn::Floor, FORWARD_LAYER_SUPPORT_FUNC, DataLayoutIndexed::GetHeightIndex(), GetInputActivationFunction(), GetInputPaddingScheme(), GetInputScalar(), IConnectableLayer::GetInputSlot(), GetOutputOperand(), TensorInfo::GetShape(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), DataLayoutIndexed::GetWidthIndex(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, Pooling2dDescriptor::m_DataLayout, ConversionData::m_Network, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::NHWC, OptionalDataLayout(), IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().

◆ ConvertReduce()

bool ConvertReduce ( const Operation operation,
const Model model,
ConversionData data,
armnn::ReduceOperation  reduceOperation 
)

Definition at line 520 of file ConversionUtils.cpp.

524 {
525  armnn::ReduceDescriptor descriptor;
526  descriptor.m_ReduceOperation = reduceOperation;
527 
528  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
529  if (!input.IsValid())
530  {
531  return Fail("%s: Operation has invalid inputs", __func__);
532  }
533  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
534 
535  const Operand* output = GetOutputOperand(operation, 0, model);
536  if (!output)
537  {
538  return Fail("%s: Could not read output 0", __func__);
539  }
540  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
541 
542  const Operand* axisOperand = GetInputOperand(operation, 1, model);
543  if (!axisOperand)
544  {
545  return Fail("%s: Could not read input 1", __func__);
546  }
547  std::vector<int32_t> axis;
548  if (!GetTensorInt32Values(*axisOperand, axis, model, data))
549  {
550  return Fail("%s: Input 1 has invalid values", __func__);
551  }
552 
553  // Convert the axis to unsigned int and remove duplicates.
554  unsigned int rank = inputInfo.GetNumDimensions();
555  std::set<unsigned int> uniqueAxis;
556  std::transform(axis.begin(), axis.end(),
557  std::inserter(uniqueAxis, uniqueAxis.begin()),
558  [rank](int i) -> unsigned int { return (i + rank) % rank; });
559  descriptor.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
560 
561  // Get the "keep dims" flag.
562  if (!GetInputScalar(operation, 2, OperandType::BOOL, descriptor.m_KeepDims, model, data))
563  {
564  return Fail("%s: Could not read input 2", __func__);
565  }
566 
567  bool isSupported = false;
568  armnn::BackendId setBackend;
569  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
570  {
572  IsReduceSupported,
573  data.m_Backends,
574  isSupported,
575  setBackend,
576  inputInfo,
577  outputInfo,
578  descriptor);
579  };
580 
581  if(!IsDynamicTensor(outputInfo))
582  {
583  validateFunc(outputInfo, isSupported);
584  }
585  else
586  {
587  isSupported = AreDynamicTensorsSupported();
588  }
589 
590  if (!isSupported)
591  {
592  return false;
593  }
594 
595  armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
596  layer->SetBackendId(setBackend);
597  assert(layer != nullptr);
598  input.Connect(layer->GetInputSlot(0));
599 
600  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
601 }

References AreDynamicTensorsSupported(), LayerInputHandle::Connect(), ConvertToLayerInputHandle(), FORWARD_LAYER_SUPPORT_FUNC, GetInputOperand(), GetInputScalar(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), GetOutputOperand(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), GetTensorInt32Values(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, ReduceDescriptor::m_KeepDims, ConversionData::m_Network, ReduceDescriptor::m_ReduceOperation, ReduceDescriptor::m_vAxis, IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().

Referenced by Converter::ConvertOperation().

◆ ConvertToActivation()

bool ConvertToActivation ( const Operation operation,
const char *  operationName,
const armnn::ActivationDescriptor activationDesc,
const Model model,
ConversionData data 
)

Definition at line 604 of file ConversionUtils.cpp.

609 {
610  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
611  if (!input.IsValid())
612  {
613  return Fail("%s: Input 0 is invalid", operationName);
614  }
615 
616  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
617  if (!outputOperand)
618  {
619  return false;
620  }
621 
622  const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
623 
624  bool isSupported = false;
625  armnn::BackendId setBackend;
626  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
627  {
629  IsActivationSupported,
630  data.m_Backends,
631  isSupported,
632  setBackend,
633  input.GetTensorInfo(),
634  outInfo,
635  activationDesc);
636  };
637 
638  if(IsDynamicTensor(outInfo))
639  {
640  isSupported = AreDynamicTensorsSupported();
641  }
642  else
643  {
644  validateFunc(outInfo, isSupported);
645  }
646 
647  if (!isSupported)
648  {
649  return false;
650  }
651 
652  armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
653  if (layer == nullptr)
654  {
655  throw armnn::NullPointerException("failed to add activation layer to network");
656  }
657  layer->SetBackendId(setBackend);
658  input.Connect(layer->GetInputSlot(0));
659 
660  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
661 }

References AreDynamicTensorsSupported(), LayerInputHandle::Connect(), ConvertToLayerInputHandle(), FORWARD_LAYER_SUPPORT_FUNC, IConnectableLayer::GetInputSlot(), GetOutputOperand(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, ConversionData::m_Network, IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().

◆ ConvertToLayerInputHandle()

LayerInputHandle ConvertToLayerInputHandle ( const Operation operation,
uint32_t  inputIndex,
const Model model,
ConversionData data,
const armnn::PermutationVector dimensionMappings,
const LayerInputHandle inputHandle 
)

Definition at line 216 of file ConversionUtils.cpp.

222 {
223 
224  const Operand* operand = GetInputOperand(operation, inputIndex, model);
225  if (!operand)
226  {
227  Fail("%s: failed to get input operand %i", __func__, inputIndex);
228  return LayerInputHandle();
229  }
230 
231  if (!IsOperandTypeSupportedForTensors(operand->type))
232  {
233  VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor: " << operand->type;
234  return LayerInputHandle();
235  }
236 
237  try
238  {
239  armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
240 
241  if (IsDynamicTensor(operandTensorInfo))
242  {
243  data.m_DynamicInputsEncountered = true;
244 
245  const uint32_t operandIndex = operation.inputs[inputIndex];
246 
247  // Check if the dynamic input tensors have been inferred by one of the previous layers
248  // If not we can't support them
249  if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
250  {
251  operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
252  }
253  else
254  {
255  Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
256  return LayerInputHandle();
257  }
258  }
259 
260  switch (operand->lifetime)
261  {
262  case OperandLifeTime::SUBGRAPH_INPUT:
263  {
264  // NOTE: We must check whether we can support the input tensor on at least one
265  // of the provided backends; otherwise we cannot convert the operation
266  bool isInputSupported = false;
268  IsInputSupported,
269  data.m_Backends,
270  isInputSupported,
272  operandTensorInfo);
273 
274  if (!isInputSupported)
275  {
276  Fail("%s: unsupported input tensor", __func__);
277  return LayerInputHandle();
278  }
279 
280  [[clang::fallthrough]]; // intentional fallthrough
281  }
282  case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
283  case OperandLifeTime::SUBGRAPH_OUTPUT:
284  {
285  // The tensor is either an operand internal to the model, or a model input.
286  // It can be associated with an ArmNN output slot for an existing layer.
287 
288  // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
289  const uint32_t operandIndex = operation.inputs[inputIndex];
290  return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
291  }
292  case OperandLifeTime::CONSTANT_COPY: // intentional fallthrough
293  case OperandLifeTime::POINTER:
294  case OperandLifeTime::CONSTANT_REFERENCE:
295  {
296  auto constantTensorDataType = operandTensorInfo.GetDataType();
297  // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
298  ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand,
299  model,
300  data,
301  dimensionMappings,
302  nullptr,
303  false,
304  &constantTensorDataType);
305  if (tensorPin.IsValid())
306  {
307  bool isSupported = false;
308  armnn::BackendId setBackend;
310  IsConstantSupported,
311  data.m_Backends,
312  isSupported,
313  setBackend,
314  tensorPin.GetConstTensor().GetInfo());
315  if (!isSupported)
316  {
317  return LayerInputHandle();
318  }
319 
320  armnn::IConnectableLayer* constantLayer =
321  data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
322  constantLayer->SetBackendId(setBackend);
323  armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
324  armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
325  outputSlot.SetTensorInfo(constantTensorInfo);
326 
327  return LayerInputHandle(true, &outputSlot, constantTensorInfo);
328  }
329  else
330  {
331  Fail("%s: invalid operand tensor", __func__);
332  return LayerInputHandle();
333  }
334  break;
335  }
336  default:
337  {
338  VLOG(DRIVER) << __func__ << ": unsupported lifetime for input tensor: " << operand->lifetime;
339  return LayerInputHandle();
340  }
341  }
342  }
344  {
345  VLOG(DRIVER) << __func__ << ": Operand type: " << e.m_type << " not supported in ArmnnDriver";
346  return LayerInputHandle();
347  }
348 }

References ConvertOperandToConstTensorPin(), FORWARD_LAYER_SUPPORT_FUNC, ConstTensorPin::GetConstTensor(), TensorInfo::GetDataType(), BaseTensor< MemoryType >::GetInfo(), GetInputOperand(), IConnectableLayer::GetOutputSlot(), GetTensorInfoForOperand(), IsDynamicTensor(), ConstTensorPin::IsValid(), ConversionData::m_Backends, ConversionData::m_DynamicInputsEncountered, ConversionData::m_Network, ConversionData::m_OutputSlotForOperand, UnsupportedOperand< OperandType >::m_type, IConnectableLayer::SetBackendId(), and IOutputSlot::SetTensorInfo().

Referenced by ConvertPooling2d(), ConvertReduce(), and ConvertToActivation().

◆ DequantizeAndMakeConstTensorPin()

ConstTensorPin DequantizeAndMakeConstTensorPin ( const Operation operation,
const Model model,
const ConversionData data,
size_t  operandIndex,
bool  optional 
)

Definition at line 752 of file ConversionUtils.cpp.

757 {
758  DequantizeResult dequantized = DequantizeIfRequired(operandIndex,operation, model, data);
759 
760  DequantizeStatus status = std::get<3>(dequantized);
761  switch (status)
762  {
763  case DequantizeStatus::INVALID_OPERAND:
764  {
765  // return invalid const tensor pin
766  return ConstTensorPin();
767  }
768  case DequantizeStatus::NOT_REQUIRED:
769  {
771  operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
772  }
773  case DequantizeStatus::SUCCESS:
774  default:
775  {
776  return ConstTensorPin(
777  std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
778  }
779  }
780 }

References ConvertOperationInputToConstTensorPin(), DequantizeIfRequired(), g_DontPermute, INVALID_OPERAND, NOT_REQUIRED, and SUCCESS.

◆ DequantizeIfRequired()

DequantizeResult DequantizeIfRequired ( size_t  operand_index,
const Operation operation,
const Model model,
const ConversionData data 
)

Definition at line 663 of file ConversionUtils.cpp.

667 {
668  const Operand* weightsOperand = GetInputOperand(operation, operand_index, model);
669  if (!weightsOperand)
670  {
671  return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
672  }
673 
674  if (IsOperandConstant(*weightsOperand))
675  {
676  // Weights are already constant
677  return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
678  }
679 
680  const size_t weightsInputIndex = operation.inputs[operand_index];
681 
682  // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
683  // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
684  for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
685  {
686  // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
687  const auto& operationIt = getMainModel(model).operations[operationIdx];
688  if (operationIt.type != OperationType::DEQUANTIZE)
689  {
690  continue;
691  }
692 
693  size_t outOpIndex = weightsInputIndex + 1;
694  for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
695  {
696  outOpIndex = operationIt.outputs[i];
697  }
698 
699  if (outOpIndex != weightsInputIndex)
700  {
701  continue;
702  }
703 
704  const Operand* operand = GetInputOperand(operationIt, 0, model);
705  if (operand == nullptr)
706  {
707  throw armnn::Exception("failed to get input operand 0");
708  }
709 
710  if (!IsQSymm8(*operand))
711  {
712  // Only supporting dequantize from QSYMM8 to FLOAT
713  break;
714  }
715 
716  // Allocate a new buffer for the dequantized data and manually dequantize
717  const void* startValue = GetOperandValueReadOnlyAddress(*operand, model, data);
718  if (!startValue)
719  {
720  // Failed to get the operand address
721  break;
722  }
723 
724  const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
725  size_t dequantizedBufferLength = operand->location.length;
726  const float quantizationScale = operand->scale;
727 
728  auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
729  for (size_t i = 0; i < dequantizedBufferLength; ++i)
730  {
731  float* dstPtr = dequantizedBuffer.get();
732  if (dstPtr == nullptr)
733  {
734  throw armnn::NullPointerException("dequantizedBuffer unique pointer is null");
735  }
736  *dstPtr++ = quantizedBuffer[i] * quantizationScale;
737  }
738 
739  // Construct tensor info for dequantized ConstTensor
740  armnn::TensorInfo tensorInfo(operand->dimensions.size(),
741  operand->dimensions.data(),
743 
744  return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
745  std::move(tensorInfo),
746  DequantizeStatus::SUCCESS };
747  }
748 
749  return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
750 }

References armnn::Float32, GetInputOperand(), getMainModel(), GetOperandValueReadOnlyAddress(), INVALID_OPERAND, IsOperandConstant(), IsQSymm8(), NOT_REQUIRED, and SUCCESS.

Referenced by DequantizeAndMakeConstTensorPin().

◆ DumpJsonProfilingIfRequired()

void DumpJsonProfilingIfRequired ( bool  gpuProfilingEnabled,
const std::string &  dumpDir,
armnn::NetworkId  networkId,
const armnn::IProfiler profiler 
)

Definition at line 352 of file CanonicalUtils.cpp.

356 {
357  // Check if profiling is required.
358  if (!gpuProfilingEnabled)
359  {
360  return;
361  }
362 
363  // The dump directory must exist in advance.
364  if (dumpDir.empty())
365  {
366  return;
367  }
368 
369  if (profiler == nullptr)
370  {
371  throw armnn::InvalidArgumentException("DumpJsonProfilingIfRequired: pointer to profiler handed in is null");
372  }
373 
374  // Set the name of the output profiling file.
375  fs::path dumpPath = dumpDir;
376  const fs::path fileName = dumpPath / (std::to_string(networkId) + "_profiling.json");
377 
378  // Open the ouput file for writing.
379  std::ofstream fileStream;
380  fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
381 
382  if (!fileStream.good())
383  {
384  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
385  return;
386  }
387 
388  // Write the profiling info to a JSON file.
389  profiler->Print(fileStream);
390 }

References IProfiler::Print().

Referenced by ArmnnPreparedModel::~ArmnnPreparedModel().

◆ DumpTensor()

void DumpTensor ( const std::string &  dumpDir,
const std::string &  requestName,
const std::string &  tensorName,
const TensorType &  tensor 
)

Definition at line 219 of file CanonicalUtils.cpp.

223 {
224  // The dump directory must exist in advance.
225  fs::path dumpPath = dumpDir;
226  const fs::path fileName = dumpPath / (requestName + "_" + tensorName + ".dump");
227 
228  std::ofstream fileStream;
229  fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
230 
231  if (!fileStream.good())
232  {
233  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
234  return;
235  }
236 
237  DumpElementFunction<TensorType> dumpElementFunction = nullptr;
238 
239  switch (tensor.GetDataType())
240  {
242  {
243  dumpElementFunction = &DumpTensorElement<TensorType, float>;
244  break;
245  }
247  {
248  dumpElementFunction = &DumpTensorElement<TensorType, uint8_t, uint32_t>;
249  break;
250  }
252  {
253  dumpElementFunction = &DumpTensorElement<TensorType, int32_t>;
254  break;
255  }
257  {
258  dumpElementFunction = &DumpTensorElement<TensorType, armnn::Half>;
259  break;
260  }
262  {
263  dumpElementFunction = &DumpTensorElement<TensorType, int8_t, int32_t>;
264  break;
265  }
267  {
268  dumpElementFunction = &DumpTensorElement<TensorType, bool>;
269  break;
270  }
271  default:
272  {
273  dumpElementFunction = nullptr;
274  }
275  }
276 
277  if (dumpElementFunction != nullptr)
278  {
279  const unsigned int numDimensions = tensor.GetNumDimensions();
280  const armnn::TensorShape shape = tensor.GetShape();
281 
282  if (!shape.AreAllDimensionsSpecified())
283  {
284  fileStream << "Cannot dump tensor elements: not all dimensions are specified" << std::endl;
285  return;
286  }
287  fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
288 
289  if (numDimensions == 0)
290  {
291  fileStream << "# Shape []" << std::endl;
292  return;
293  }
294  fileStream << "# Shape [" << shape[0];
295  for (unsigned int d = 1; d < numDimensions; ++d)
296  {
297  fileStream << "," << shape[d];
298  }
299  fileStream << "]" << std::endl;
300  fileStream << "Each line contains the data of each of the elements of dimension0. In NCHW and NHWC, each line"
301  " will be a batch" << std::endl << std::endl;
302 
303  // Split will create a new line after all elements of the first dimension
304  // (in a 4, 3, 2, 3 tensor, there will be 4 lines of 18 elements)
305  unsigned int split = 1;
306  if (numDimensions == 1)
307  {
308  split = shape[0];
309  }
310  else
311  {
312  for (unsigned int i = 1; i < numDimensions; ++i)
313  {
314  split *= shape[i];
315  }
316  }
317 
318  // Print all elements in the tensor
319  for (unsigned int elementIndex = 0; elementIndex < tensor.GetNumElements(); ++elementIndex)
320  {
321  (*dumpElementFunction)(tensor, elementIndex, fileStream);
322 
323  if ( (elementIndex + 1) % split == 0 )
324  {
325  fileStream << std::endl;
326  }
327  }
328  fileStream << std::endl;
329  }
330  else
331  {
332  fileStream << "Cannot dump tensor elements: Unsupported data type "
333  << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
334  }
335 
336  if (!fileStream.good())
337  {
338  VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
339  }
340 }

References TensorShape::AreAllDimensionsSpecified(), armnn::Boolean, armnn::Float16, armnn::Float32, TensorShape::GetNumElements(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::Signed32.

◆ DumpTensor< armnn::ConstTensor >()

template void armnn_driver::DumpTensor< armnn::ConstTensor > ( const std::string &  dumpDir,
const std::string &  requestName,
const std::string &  tensorName,
const armnn::ConstTensor tensor 
)

◆ DumpTensor< armnn::Tensor >()

template void armnn_driver::DumpTensor< armnn::Tensor > ( const std::string &  dumpDir,
const std::string &  requestName,
const std::string &  tensorName,
const armnn::Tensor tensor 
)

◆ ExportNetworkGraphToDotFile()

std::string ExportNetworkGraphToDotFile ( const armnn::IOptimizedNetwork optimizedNetwork,
const std::string &  dumpDir 
)

Definition at line 392 of file CanonicalUtils.cpp.

394 {
395  std::string fileName;
396  // The dump directory must exist in advance.
397  if (dumpDir.empty())
398  {
399  return fileName;
400  }
401 
402  std::string timestamp = GetFileTimestamp();
403  if (timestamp.empty())
404  {
405  return fileName;
406  }
407 
408  // Set the name of the output .dot file.
409  fs::path dumpPath = dumpDir;
410  fs::path tempFilePath = dumpPath / (timestamp + "_networkgraph.dot");
411  fileName = tempFilePath.string();
412 
413  VLOG(DRIVER) << "Exporting the optimized network graph to file: %s" << fileName.c_str();
414 
415  // Write the network graph to a dot file.
416  std::ofstream fileStream;
417  fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
418 
419  if (!fileStream.good())
420  {
421  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
422  return fileName;
423  }
424 
425  if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
426  {
427  VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
428  }
429  return fileName;
430 }

References GetFileTimestamp(), IOptimizedNetwork::SerializeToDot(), and armnn::Success.

Referenced by ArmnnDriverImpl::PrepareArmnnModel(), and ArmnnDriverImpl::PrepareArmnnModelFromCache().

◆ GetFileTimestamp()

std::string GetFileTimestamp ( )

Definition at line 560 of file CanonicalUtils.cpp.

561 {
562  // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
563  // and getSupportedOperations.txt files)
564  timespec ts;
565  int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
566  std::stringstream ss;
567  if (iRet == 0)
568  {
569  ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
570  }
571  else
572  {
573  VLOG(DRIVER) << "clock_gettime failed with errno " <<
574  std::to_string(errno).c_str() << " : " <<
575  std::strerror(errno);
576  }
577  return ss.str();
578 }

Referenced by ExportNetworkGraphToDotFile(), and SerializeNetwork().

◆ GetInputActivationFunction()

bool armnn_driver::GetInputActivationFunction ( const Operation operation,
uint32_t  inputIndex,
ActivationFn &  outActivationFunction,
const Model model,
const ConversionData data 
)
inline

Definition at line 856 of file ConversionUtils.hpp.

861 {
862  return GetInputActivationFunctionImpl(operation,
863  inputIndex,
864  OperandType::INT32,
865  outActivationFunction,
866  model,
867  data);
868 }

References GetInputActivationFunctionImpl().

Referenced by ConvertPooling2d(), and GetOptionalInputActivation().

◆ GetInputActivationFunctionFromTensor()

bool armnn_driver::GetInputActivationFunctionFromTensor ( const Operation operation,
uint32_t  inputIndex,
ActivationFn &  outActivationFunction,
const Model model,
const ConversionData data 
)
inline

Definition at line 870 of file ConversionUtils.hpp.

875 {
876  // This only accepts a 1-D tensor of size 1
877  return GetInputActivationFunctionImpl(operation,
878  inputIndex,
879  OperandType::INT32,
880  outActivationFunction,
881  model,
882  data);
883 }

References GetInputActivationFunctionImpl().

◆ GetInputActivationFunctionImpl()

bool armnn_driver::GetInputActivationFunctionImpl ( const Operation operation,
uint32_t  inputIndex,
OperandType  type,
ActivationFn &  outActivationFunction,
const Model model,
const ConversionData data 
)
inline

Definition at line 833 of file ConversionUtils.hpp.

839 {
840  if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
841  {
842  VLOG(DRIVER) << __func__ << ": unexpected operand type: " << type
843  << " should be OperandType::INT32 or OperandType::TENSOR_INT32";
844  return false;
845  }
846 
847  int32_t activationFunctionAsInt;
848  if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
849  {
850  return Fail("%s: failed to get activation input value", __func__);
851  }
852  outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
853  return true;
854 }

References GetInputScalar().

Referenced by GetInputActivationFunction(), and GetInputActivationFunctionFromTensor().

◆ GetInputFloat32()

bool armnn_driver::GetInputFloat32 ( const Operation operation,
uint32_t  inputIndex,
float &  outValue,
const Model model,
const ConversionData data 
)
inline

Definition at line 824 of file ConversionUtils.hpp.

829 {
830  return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
831 }

References GetInputScalar().

◆ GetInputInt32()

bool armnn_driver::GetInputInt32 ( const Operation operation,
uint32_t  inputIndex,
int32_t &  outValue,
const Model model,
const ConversionData data 
)
inline

Definition at line 815 of file ConversionUtils.hpp.

820 {
821  return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
822 }

References GetInputScalar().

Referenced by GetInputPaddingScheme().

◆ GetInputOperand()

const Operand* armnn_driver::GetInputOperand ( const Operation operation,
uint32_t  inputIndex,
const Model model,
bool  failOnIndexOutOfBounds = true 
)
inline

Definition at line 662 of file ConversionUtils.hpp.

666 {
667  if (inputIndex >= operation.inputs.size())
668  {
669  if (failOnIndexOutOfBounds)
670  {
671  Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
672  }
673  return nullptr;
674  }
675 
676  // Model should have been validated beforehand
677  if (!(operation.inputs[inputIndex] < getMainModel(model).operands.size()))
678  {
679  std::ostringstream os;
680  os << "GetInputOperand: inputIndex [" << inputIndex << "]";
681  os << " is too large. The number of main model operands is [";
682  os << getMainModel(model).operands.size() << "]";
683  throw armnn::InvalidArgumentException(os.str());
684  }
685  return &getMainModel(model).operands[operation.inputs[inputIndex]];
686 }

References getMainModel().

Referenced by ConvertOperationInputToConstTensorPin(), ConvertPaddings(), ConvertReduce(), ConvertToLayerInputHandle(), DequantizeIfRequired(), GetInputScalar(), GetOperandType(), GetOptionalBool(), IsWeightsValid(), and OptionalDataLayout().

◆ GetInputPaddingScheme()

bool GetInputPaddingScheme ( const Operation operation,
uint32_t  inputIndex,
PaddingScheme &  outPaddingScheme,
const Model model,
const ConversionData data 
)

Definition at line 782 of file ConversionUtils.cpp.

787 {
788  int32_t paddingSchemeAsInt;
789  if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
790  {
791  return Fail("%s: failed to get padding scheme input value", __func__);
792  }
793 
794  outPaddingScheme = static_cast<::android::nn::PaddingScheme>(paddingSchemeAsInt);
795  return true;
796 }

References GetInputInt32().

Referenced by ConvertPooling2d().

◆ GetInputScalar()

bool armnn_driver::GetInputScalar ( const Operation operation,
uint32_t  inputIndex,
OperandType  type,
OutputType &  outValue,
const Model model,
const ConversionData data,
bool  optional = false 
)

Definition at line 775 of file ConversionUtils.hpp.

782 {
783  const Operand* operand = GetInputOperand(operation, inputIndex, model);
784  if (!optional && !operand)
785  {
786  return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
787  }
788 
789  if (!optional && operand->type != type)
790  {
791  VLOG(DRIVER) << __func__ << ": unexpected operand type: " << operand->type << " should be: " << type;
792  return false;
793  }
794 
795  if (!optional && operand->location.length != sizeof(OutputType))
796  {
797  return Fail("%s: incorrect operand location length: %i (should be %i)",
798  __func__, operand->location.length, sizeof(OutputType));
799  }
800 
801  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
802  if (!optional && !valueAddress)
803  {
804  return Fail("%s: failed to get address for operand", __func__);
805  }
806 
807  if(!optional)
808  {
809  outValue = *(static_cast<const OutputType*>(valueAddress));
810  }
811 
812  return true;
813 }

References GetInputOperand(), and GetOperandValueReadOnlyAddress().

Referenced by ConvertPooling2d(), ConvertReduce(), GetInputActivationFunctionImpl(), GetInputFloat32(), GetInputInt32(), and GetOptionalConvolutionDilationParams().

◆ GetMemoryFromPointer()

void * GetMemoryFromPointer ( const Request::Argument &  requestArg)

Definition at line 77 of file CanonicalUtils.cpp.

78 {
79  // get the pointer memory
80  auto ptrMemory = std::visit([](std::variant<const void*, void*>&& memory)
81  {
82  if (std::holds_alternative<const void*>(memory))
83  {
84  auto ptr = std::get<const void*>(memory);
85  auto ptrMemory = static_cast<const uint8_t*>(ptr);
86  return const_cast<uint8_t*>(ptrMemory);
87  }
88  else
89  {
90  auto ptr = std::get<void*>(memory);
91  return static_cast<uint8_t*>(ptr);
92  }
93  }, requestArg.location.pointer);
94  return ptrMemory;
95 }

◆ GetMemoryFromPool()

void * GetMemoryFromPool ( DataLocation  location,
const std::vector< android::nn::RunTimePoolInfo > &  memPools 
)

Returns a pointer to a specific location in a pool`.

Definition at line 66 of file CanonicalUtils.cpp.

67 {
68  // find the location within the pool
69  assert(location.poolIndex < memPools.size());
70 
71  const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
72  uint8_t* memPoolBuffer = memPool.getBuffer();
73  uint8_t* memory = memPoolBuffer + location.offset;
74  return memory;
75 }

Referenced by GetOperandValueReadOnlyAddress().

◆ GetModelSummary()

std::string GetModelSummary ( const Model model)

Definition at line 526 of file CanonicalUtils.cpp.

527 {
528  std::stringstream result;
529 
530  result << model.main.inputIndexes.size() << " input(s), "
531  << model.main.operations.size() << " operation(s), "
532  << model.main.outputIndexes.size() << " output(s), "
533  << model.main.operands.size() << " operand(s) "
534  << std::endl;
535 
536  result << "Inputs: ";
537  for (uint32_t i = 0; i < model.main.inputIndexes.size(); i++)
538  {
539  result << GetOperandSummary(model.main.operands[model.main.inputIndexes[i]]) << ", ";
540  }
541  result << std::endl;
542 
543  result << "Operations: ";
544  for (uint32_t i = 0; i < model.main.operations.size(); i++)
545  {
546  result << model.main.operations[i].type << ", ";
547  }
548  result << std::endl;
549 
550  result << "Outputs: ";
551  for (uint32_t i = 0; i < model.main.outputIndexes.size(); i++)
552  {
553  result << GetOperandSummary(model.main.operands[model.main.outputIndexes[i]]) << ", ";
554  }
555  result << std::endl;
556 
557  return result.str();
558 }

References GetOperandSummary().

Referenced by ArmnnPreparedModel::execute(), and ArmnnPreparedModel::executeFenced().

◆ GetOperandSummary()

std::string GetOperandSummary ( const Operand operand)

Definition at line 191 of file CanonicalUtils.cpp.

192 {
193  std::stringstream ss;
194  ss << "operand dimensions: [ ";
195  for (unsigned int i = 0; i < operand.dimensions.size(); ++i)
196  {
197  ss << operand.dimensions[i] << " ";
198  }
199  ss << "] operand type: " << operand.type;
200  return ss.str();
201 }

Referenced by GetModelSummary().

◆ GetOperandType()

bool armnn_driver::GetOperandType ( const Operation operation,
uint32_t  inputIndex,
const Model model,
OperandType type 
)
inline

Definition at line 716 of file ConversionUtils.hpp.

720 {
721  const Operand* operand = GetInputOperand(operation, inputIndex, model);
722  if (!operand)
723  {
724  return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
725  }
726 
727  type = operand->type;
728  return true;
729 }

References GetInputOperand().

◆ GetOperandValueReadOnlyAddress()

const void * GetOperandValueReadOnlyAddress ( const Operand operand,
const Model model,
const ConversionData data,
bool  optional 
)

Definition at line 798 of file ConversionUtils.cpp.

802 {
803  const void* valueStart = nullptr;
804  switch (operand.lifetime)
805  {
806  case OperandLifeTime::CONSTANT_COPY:
807  {
808  valueStart = model.operandValues.data() + operand.location.offset;
809  break;
810  }
811  case OperandLifeTime::POINTER:
812  {
813  // Pointer specified in the model
814  valueStart = std::get<const void*>(operand.location.pointer);
815  break;
816  }
817  case OperandLifeTime::CONSTANT_REFERENCE:
818  {
819  // Constant specified via a Memory object
820  valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
821  break;
822  }
823  case OperandLifeTime::NO_VALUE:
824  {
825  // An optional input tensor with no values is not an error so should not register as a fail
826  if (optional)
827  {
828  valueStart = nullptr;
829  break;
830  }
831  [[fallthrough]];
832  }
833  default:
834  {
835  VLOG(DRIVER) << __func__ << ": unsupported/invalid operand lifetime:: " << operand.lifetime;
836  valueStart = nullptr;
837  }
838  }
839 
840  return valueStart;
841 }

References GetMemoryFromPool(), and ConversionData::m_MemPools.

Referenced by ConvertOperandToConstTensorPin(), DequantizeIfRequired(), GetInputScalar(), GetOptionalBool(), GetTensorInt32Values(), and OptionalDataLayout().

◆ GetOptionalBool()

bool armnn_driver::GetOptionalBool ( const Operation operation,
uint32_t  inputIndex,
const Model model,
const ConversionData data 
)
inline

Definition at line 933 of file ConversionUtils.hpp.

937 {
938  const Operand* operand = GetInputOperand(operation, inputIndex, model);
939  if (!operand)
940  {
941  return false;
942  }
943 
944  if (!IsBool(*operand))
945  {
946  return false;
947  }
948 
949  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
950  if (!valueAddress)
951  {
952  return false;
953  }
954 
955  return *(static_cast<const bool*>(valueAddress));
956 }

References GetInputOperand(), and GetOperandValueReadOnlyAddress().

◆ GetOptionalConvolutionDilationParams()

bool armnn_driver::GetOptionalConvolutionDilationParams ( const Operation operation,
uint32_t  dilationXIndex,
ConvolutionDescriptor &  descriptor,
const Model model,
const ConversionData data 
)

Definition at line 907 of file ConversionUtils.hpp.

912 {
913  bool success = true;
914  if (operation.inputs.size() >= dilationXIndex + 2)
915  {
916  success &= GetInputScalar(operation,
917  dilationXIndex,
918  OperandType::INT32,
919  descriptor.m_DilationX,
920  model,
921  data);
922  success &= GetInputScalar(operation,
923  dilationXIndex + 1,
924  OperandType::INT32,
925  descriptor.m_DilationY,
926  model,
927  data);
928  }
929 
930  return success;
931 }

References GetInputScalar().

◆ GetOptionalInputActivation()

bool armnn_driver::GetOptionalInputActivation ( const Operation operation,
uint32_t  inputIndex,
ActivationFn &  activationFunction,
const Model model,
const ConversionData data 
)
inline

Definition at line 886 of file ConversionUtils.hpp.

891 {
892  if (operation.inputs.size() <= inputIndex)
893  {
894  activationFunction = ActivationFn::kActivationNone;
895  }
896  else
897  {
898  if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
899  {
900  return Fail("%s: Operation has invalid inputs", __func__);
901  }
902  }
903  return true;
904 }

References GetInputActivationFunction().

◆ GetOutputOperand()

const Operand* armnn_driver::GetOutputOperand ( const Operation operation,
uint32_t  outputIndex,
const Model model 
)
inline

Definition at line 688 of file ConversionUtils.hpp.

691 {
692  if (outputIndex >= operation.outputs.size())
693  {
694  Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
695  return nullptr;
696  }
697 
698  // Model should have been validated beforehand
699  if (!(operation.outputs[outputIndex] < getMainModel(model).operands.size()))
700  {
701  std::ostringstream os;
702  os << "GetOutputOperand: outputIndex [" << outputIndex << "]";
703  os << " is too large. The number of main model operands is [";
704  os << getMainModel(model).operands.size() << "]";
705  throw armnn::InvalidArgumentException(os.str());
706  }
707 
708  return &getMainModel(model).operands[operation.outputs[outputIndex]];
709 }

References getMainModel().

Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), and SetupAndTrackLayerOutputSlot().

◆ GetTensorInfoForOperand()

armnn::TensorInfo GetTensorInfoForOperand ( const Operand operand)

Definition at line 97 of file CanonicalUtils.cpp.

98 {
99  using namespace armnn;
100  bool perChannel = false;
101  bool isScalar = false;
102 
103  DataType type;
104  switch (operand.type)
105  {
106  case OperandType::TENSOR_BOOL8:
108  break;
109  case OperandType::TENSOR_FLOAT32:
111  break;
112  case OperandType::TENSOR_FLOAT16:
114  break;
115  case OperandType::TENSOR_QUANT8_ASYMM:
117  break;
118  case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
119  perChannel=true;
121  case OperandType::TENSOR_QUANT8_SYMM:
123  break;
124  case OperandType::TENSOR_QUANT16_SYMM:
126  break;
127  case OperandType::TENSOR_INT32:
129  break;
130  case OperandType::INT32:
132  isScalar = true;
133  break;
134  case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
136  break;
137  default:
138  throw UnsupportedOperand<OperandType>(operand.type);
139  }
140 
141  TensorInfo ret;
142  if (isScalar)
143  {
145  }
146  else
147  {
148  if (operand.dimensions.size() == 0)
149  {
151  ret = TensorInfo(tensorShape, type);
152  }
153  else
154  {
155  std::vector<unsigned char> dimensionsSpecificity(operand.dimensions.size(), true);
156  int count = 0;
157  std::for_each(operand.dimensions.data(),
158  operand.dimensions.data() + operand.dimensions.size(),
159  [&](const unsigned int val)
160  {
161  if (val == 0)
162  {
163  dimensionsSpecificity[count] = false;
164  }
165  count++;
166  });
167 
168  TensorShape tensorShape(operand.dimensions.size(),
169  operand.dimensions.data(),
170  reinterpret_cast<const bool *>(dimensionsSpecificity.data()));
171  ret = TensorInfo(tensorShape, type);
172  }
173  }
174 
175  if (perChannel)
176  {
177  // ExtraParams is expected to be of type channelQuant
178  const auto& perAxisQuantParams = std::get<Operand::SymmPerChannelQuantParams>(operand.extraParams);
179 
180  ret.SetQuantizationScales(perAxisQuantParams.scales);
181  ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
182  }
183  else
184  {
185  ret.SetQuantizationScale(operand.scale);
186  ret.SetQuantizationOffset(operand.zeroPoint);
187  }
188  return ret;
189 }

References ARMNN_FALLTHROUGH, armnn::Boolean, armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Scalar, and armnn::Signed32.

Referenced by ConvertOperandToConstTensorPin(), ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), ConvertToLayerInputHandle(), and SetupAndTrackLayerOutputSlot().

◆ GetTensorInt32Values()

bool GetTensorInt32Values ( const Operand operand,
std::vector< int32_t > &  outValues,
const Model model,
const ConversionData data 
)

Definition at line 843 of file ConversionUtils.cpp.

847 {
848  if (operand.type != OperandType::TENSOR_INT32)
849  {
850  VLOG(DRIVER) << __func__ << ": invalid operand type: " << operand.type;
851  return false;
852  }
853 
854  const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
855  if (!startAddress)
856  {
857  VLOG(DRIVER) << __func__ << ": failed to get operand address " << operand.type;
858  return false;
859  }
860 
861  // Check number of bytes is sensible
862  const uint32_t numBytes = operand.location.length;
863  if (numBytes % sizeof(int32_t) != 0)
864  {
865  return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
866  __func__, numBytes, sizeof(int32_t));
867  }
868 
869  outValues.resize(numBytes / sizeof(int32_t));
870  memcpy(outValues.data(), startAddress, numBytes);
871  return true;
872 }

References GetOperandValueReadOnlyAddress().

Referenced by ConvertPaddings(), and ConvertReduce().

◆ IsConnectedToDequantize()

bool IsConnectedToDequantize ( armnn::IOutputSlot ioutputSlot)

Definition at line 1064 of file ConversionUtils.cpp.

1065 {
1066  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize()";
1067  if (!ioutputSlot)
1068  {
1069  return false;
1070  }
1071  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() ioutputSlot is valid.";
1072  // Find the connections and layers..
1073  armnn::IConnectableLayer& owningLayer = ioutputSlot->GetOwningIConnectableLayer();
1074  if (owningLayer.GetType() == armnn::LayerType::Dequantize)
1075  {
1076  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() connected to Dequantize Layer.";
1077  armnn::IInputSlot& inputSlot = owningLayer.GetInputSlot(0);
1078  armnn::IOutputSlot* connection = inputSlot.GetConnection();
1079  if (connection)
1080  {
1081  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer has a connection.";
1082  armnn::IConnectableLayer& connectedLayer =
1083  connection->GetOwningIConnectableLayer();
1084  if (connectedLayer.GetType() == armnn::LayerType::Constant)
1085  {
1086  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer connected to Constant";
1087  return true;
1088  }
1089  }
1090  }
1091  return false;
1092 }

References armnn::Constant, armnn::Dequantize, IInputSlot::GetConnection(), IConnectableLayer::GetInputSlot(), IOutputSlot::GetOwningIConnectableLayer(), and IConnectableLayer::GetType().

◆ IsDynamicTensor()

bool IsDynamicTensor ( const armnn::TensorInfo tensorInfo)

Checks if a tensor info represents a dynamic tensor.

Definition at line 491 of file CanonicalUtils.cpp.

492 {
494  {
495  return true;
496  }
497  // Account for the usage of the TensorShape empty constructor
498  if (tensorInfo.GetNumDimensions() == 0)
499  {
500  return true;
501  }
502  return !tensorInfo.GetShape().AreAllDimensionsSpecified();
503 }

References TensorShape::AreAllDimensionsSpecified(), TensorShape::GetDimensionality(), TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), and armnn::NotSpecified.

Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), ConvertToLayerInputHandle(), and SetupAndTrackLayerOutputSlot().

◆ IsOperandConstant()

bool armnn_driver::IsOperandConstant ( const Operand operand)
inline

Definition at line 731 of file ConversionUtils.hpp.

732 {
733  OperandLifeTime lifetime = operand.lifetime;
734 
735  return lifetime == OperandLifeTime::CONSTANT_COPY ||
736  lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
737  lifetime == OperandLifeTime::POINTER ||
738  lifetime == OperandLifeTime::NO_VALUE;
739 }

Referenced by ConvertOperandToConstTensorPin(), and DequantizeIfRequired().

◆ IsQSymm8()

bool armnn_driver::IsQSymm8 ( const Operand operand)
inline

Definition at line 1035 of file ConversionUtils.hpp.

1036 {
1037  return operand.type == OperandType::TENSOR_QUANT8_SYMM;
1038 }

Referenced by DequantizeIfRequired().

◆ isQuantizedOperand()

bool isQuantizedOperand ( const OperandType operandType)

Definition at line 510 of file CanonicalUtils.cpp.

511 {
512  if (operandType == OperandType::TENSOR_QUANT8_ASYMM ||
513  operandType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
514  operandType == OperandType::TENSOR_QUANT8_SYMM ||
515  operandType == OperandType::TENSOR_QUANT16_SYMM ||
516  operandType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
517  {
518  return true;
519  }
520  else
521  {
522  return false;
523  }
524 }

◆ IsWeightsValid()

bool IsWeightsValid ( const Operation operation,
uint32_t  inputIndex,
const Model model,
const bool  isOptional = true 
)

Utility functions.

Definition at line 141 of file ConversionUtils.cpp.

145 {
146  const Operand* operand = GetInputOperand(operation, inputIndex, model);
147  if (!operand)
148  {
149  Fail("%s: failed to get input operand %i", __func__, inputIndex);
150  return false;
151  }
152  // If the operand is not an optional operand it cannot have a NO_VALUE lifetime
153  if (!isOptional && operand->lifetime == OperandLifeTime::NO_VALUE)
154  {
155  return false;
156  }
157  if (operand->lifetime != OperandLifeTime::CONSTANT_COPY
158  && operand->lifetime != OperandLifeTime::CONSTANT_REFERENCE
159  && operand->lifetime != OperandLifeTime::NO_VALUE)
160  {
161  return false;
162  }
163  return true;
164 }

References GetInputOperand().

◆ OptionalDataLayout()

armnn::DataLayout OptionalDataLayout ( const Operation operation,
uint32_t  inputIndex,
const Model model,
ConversionData data 
)

Definition at line 874 of file ConversionUtils.cpp.

878 {
879  const Operand* operand = GetInputOperand(operation, inputIndex, model);
880  if (!operand)
881  {
883  }
884 
885  if (!IsBool(*operand))
886  {
888  }
889 
890  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
891  if (!valueAddress)
892  {
894  }
895 
896  if (*(static_cast<const bool*>(valueAddress)))
897  {
899  }
900  else
901  {
903  }
904 }

References GetInputOperand(), GetOperandValueReadOnlyAddress(), armnn::NCHW, and armnn::NHWC.

Referenced by ConvertPooling2d().

◆ ProcessActivation()

armnn::IConnectableLayer * ProcessActivation ( const armnn::TensorInfo tensorInfo,
ActivationFn  activation,
armnn::IConnectableLayer prevLayer,
ConversionData data 
)

Definition at line 906 of file ConversionUtils.cpp.

910 {
911  if (prevLayer->GetNumOutputSlots() != 1)
912  {
913  throw armnn::Exception("ProcessActivation: previous layer does not have a single output slot");
914  }
915 
916  prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
917 
918  armnn::IConnectableLayer* activationLayer = prevLayer;
919 
920  if (activation != ActivationFn::kActivationNone)
921  {
922  armnn::ActivationDescriptor activationDesc;
923  switch (activation)
924  {
925  case ActivationFn::kActivationRelu:
926  {
928  break;
929  }
930  case ActivationFn::kActivationRelu1:
931  {
933  activationDesc.m_A = 1.0f;
934  activationDesc.m_B = -1.0f;
935  break;
936  }
937  case ActivationFn::kActivationRelu6:
938  {
940  activationDesc.m_A = 6.0f;
941  break;
942  }
943  case ActivationFn::kActivationSigmoid:
944  {
946  break;
947  }
948  case ActivationFn::kActivationTanh:
949  {
951  activationDesc.m_A = 1.0f;
952  activationDesc.m_B = 1.0f;
953  break;
954  }
955  default:
956  {
957  Fail("%s: Invalid activation enum value %i", __func__, activation);
958  return nullptr;
959  }
960  }
961 
962  bool isSupported = false;
963  armnn::BackendId setBackend;
965  IsActivationSupported,
966  data.m_Backends,
967  isSupported,
968  setBackend,
969  prevLayer->GetOutputSlot(0).GetTensorInfo(),
970  tensorInfo,
971  activationDesc);
972  if (!isSupported)
973  {
974  return nullptr;
975  }
976 
977  activationLayer = data.m_Network->AddActivationLayer(activationDesc);
978  activationLayer->SetBackendId(setBackend);
979 
980  prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
981  activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
982  }
983 
984  return activationLayer;
985 }

References armnn::BoundedReLu, IOutputSlot::Connect(), FORWARD_LAYER_SUPPORT_FUNC, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ConversionData::m_Backends, ActivationDescriptor::m_Function, ConversionData::m_Network, armnn::ReLu, IConnectableLayer::SetBackendId(), IOutputSlot::SetTensorInfo(), armnn::Sigmoid, and armnn::TanH.

Referenced by SetupAndTrackLayerOutputSlot().

◆ RenameExportedFiles()

void RenameExportedFiles ( const std::string &  existingSerializedFileName,
const std::string &  existingDotFileName,
const std::string &  dumpDir,
const armnn::NetworkId  networkId 
)

Definition at line 580 of file CanonicalUtils.cpp.

584 {
585  if (dumpDir.empty())
586  {
587  return;
588  }
589  RenameFile(existingSerializedFileName, std::string("_network.armnn"), dumpDir, networkId);
590  RenameFile(existingDotFileName, std::string("_networkgraph.dot"), dumpDir, networkId);
591 }

References RenameFile().

Referenced by ArmnnDriverImpl::PrepareArmnnModel().

◆ RenameFile()

void RenameFile ( const std::string &  existingName,
const std::string &  extension,
const std::string &  dumpDir,
const armnn::NetworkId  networkId 
)

Definition at line 593 of file CanonicalUtils.cpp.

597 {
598  if (existingName.empty() || dumpDir.empty())
599  {
600  return;
601  }
602 
603  fs::path dumpPath = dumpDir;
604  const fs::path newFileName = dumpPath / (std::to_string(networkId) + extension);
605  int iRet = rename(existingName.c_str(), newFileName.c_str());
606  if (iRet != 0)
607  {
608  std::stringstream ss;
609  ss << "rename of [" << existingName << "] to [" << newFileName << "] failed with errno "
610  << std::to_string(errno) << " : " << std::strerror(errno);
611  VLOG(DRIVER) << ss.str().c_str();
612  }
613 }

Referenced by RenameExportedFiles().

◆ SerializeNetwork()

std::string SerializeNetwork ( const armnn::INetwork network,
const std::string &  dumpDir,
std::vector< uint8_t > &  dataCacheData,
bool  dataCachingActive 
)

Definition at line 432 of file CanonicalUtils.cpp.

436 {
437  std::string fileName;
438  bool bSerializeToFile = true;
439  if (dumpDir.empty())
440  {
441  bSerializeToFile = false;
442  }
443  else
444  {
445  std::string timestamp = GetFileTimestamp();
446  if (timestamp.empty())
447  {
448  bSerializeToFile = false;
449  }
450  }
451  if (!bSerializeToFile && !dataCachingActive)
452  {
453  return fileName;
454  }
455 
457  // Serialize the Network
458  serializer->Serialize(network);
459  if (dataCachingActive)
460  {
461  std::stringstream stream;
462  auto serialized = serializer->SaveSerializedToStream(stream);
463  if (serialized)
464  {
465  std::string const serializedString{stream.str()};
466  std::copy(serializedString.begin(),
467  serializedString.end(),
468  std::back_inserter(dataCacheData));
469  }
470  }
471 
472  if (bSerializeToFile)
473  {
474  // Set the name of the output .armnn file.
475  fs::path dumpPath = dumpDir;
476  std::string timestamp = GetFileTimestamp();
477  fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
478  fileName = tempFilePath.string();
479 
480  // Save serialized network to a file
481  std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
482  auto serialized = serializer->SaveSerializedToStream(serializedFile);
483  if (!serialized)
484  {
485  VLOG(DRIVER) << "An error occurred when serializing to file %s" << fileName.c_str();
486  }
487  }
488  return fileName;
489 }

References ISerializer::Create(), and GetFileTimestamp().

Referenced by ArmnnDriverImpl::PrepareArmnnModel().

◆ SetupAndTrackLayerOutputSlot() [1/2]

bool SetupAndTrackLayerOutputSlot ( const Operation operation,
uint32_t  operationOutputIndex,
armnn::IConnectableLayer layer,
uint32_t  layerOutputIndex,
const Model model,
ConversionData data,
const armnn::TensorInfo overrideOutputInfo,
const std::function< void(const armnn::TensorInfo &, bool &)> &  validateFunc,
const ActivationFn &  activationFunction,
bool  inferOutputShapes 
)

Definition at line 987 of file ConversionUtils.cpp.

997 {
998  const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
999  if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1000  {
1001  return false;
1002  }
1003 
1004  armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1005  if (overrideOutputInfo == nullptr)
1006  {
1007  outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1008  }
1009  else
1010  {
1011  outputSlot.SetTensorInfo(*overrideOutputInfo);
1012  }
1013 
1014  bool isSupported = false;
1015  if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
1016  {
1017  // Type one dynamic tensors require the previous layer's output shape for inference
1018  for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1019  {
1020  if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
1021  {
1022  return false;
1023  }
1024  }
1025  // IsTensorInfoSet will infer the dynamic output shape
1026  outputSlot.IsTensorInfoSet();
1027  // Once the shape is inferred we can validate it
1028  validateFunc(outputSlot.GetTensorInfo(), isSupported);
1029 
1030  if(!isSupported)
1031  {
1032  for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1033  {
1034  layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1035  }
1036  return false;
1037  }
1038  }
1039 
1040  const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1041 
1042  if (activationFunction != ActivationFn::kActivationNone)
1043  {
1044  const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1045  armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1046  &layer, data);
1047 
1048  if (!endLayer)
1049  {
1050  return Fail("%s: ProcessActivation failed", __func__);
1051  }
1052 
1053  armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1054  data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1055  }
1056  else
1057  {
1058  data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1059  }
1060 
1061  return true;
1062 }

References IOutputSlot::Disconnect(), IInputSlot::GetConnection(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), GetOutputOperand(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), GetTensorInfoForOperand(), IsDynamicTensor(), IOutputSlot::IsTensorInfoSet(), ConversionData::m_OutputSlotForOperand, ProcessActivation(), and IOutputSlot::SetTensorInfo().

Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), and SetupAndTrackLayerOutputSlot().

◆ SetupAndTrackLayerOutputSlot() [2/2]

bool armnn_driver::SetupAndTrackLayerOutputSlot ( const Operation operation,
uint32_t  outputIndex,
armnn::IConnectableLayer layer,
const Model model,
ConversionData data,
const armnn::TensorInfo overrideOutputInfo = nullptr,
const std::function< void(const armnn::TensorInfo &, bool &)> &  validateFunc = nullptr,
const ActivationFn &  activationFunction = ActivationFn::kActivationNone 
)
inline

Definition at line 992 of file ConversionUtils.hpp.

1001 {
1002  return SetupAndTrackLayerOutputSlot(operation,
1003  outputIndex,
1004  layer,
1005  outputIndex,
1006  model,
1007  data,
1008  overrideOutputInfo,
1009  validateFunc,
1010  activationFunction);
1011 }

References SetupAndTrackLayerOutputSlot().

◆ SwizzleAndroidNn4dTensorToArmNn()

void SwizzleAndroidNn4dTensorToArmNn ( armnn::TensorInfo tensorInfo,
const void *  input,
void *  output,
const armnn::PermutationVector mappings 
)

Swizzles tensor data in input according to the dimension mappings.

Definition at line 40 of file CanonicalUtils.cpp.

44 {
45  assert(tensorInfo.GetNumDimensions() == 4U);
46 
47  armnn::DataType dataType = tensorInfo.GetDataType();
48  switch (dataType)
49  {
55  // First swizzle tensor info
56  tensorInfo = armnnUtils::Permuted(tensorInfo, mappings);
57  // Then swizzle tensor data
58  armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
59  break;
60  default:
61  VLOG(DRIVER) << "Unknown armnn::DataType for swizzling";
62  assert(0);
63  }
64 }

References armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::GetDataTypeSize(), TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), armnnUtils::Permute(), armnnUtils::Permuted(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS8.

Referenced by ConstTensorPin::ConstTensorPin().

Variable Documentation

◆ g_DontPermute

const armnn::PermutationVector g_DontPermute {}

Definition at line 38 of file CanonicalUtils.cpp.

Referenced by DequantizeAndMakeConstTensorPin().

armnn_driver::ProcessActivation
armnn::IConnectableLayer * ProcessActivation(const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
Definition: ConversionUtils.cpp:906
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn::DataType::Boolean
@ Boolean
armnn::TensorInfo::SetQuantizationDim
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:499
armnn_driver::DequantizeResult
std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus > DequantizeResult
Definition: ConversionUtils.hpp:1047
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:843
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn_driver::ConversionData::m_MemPools
std::vector<::android::nn::RunTimePoolInfo > m_MemPools
Definition: ConversionUtils.hpp:64
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:417
armnn::DataLayout::NHWC
@ NHWC
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:46
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:491
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:33
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:751
armnn_driver::ConvertOperandToConstTensorPin
ConstTensorPin ConvertOperandToConstTensorPin(const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
Definition: ConversionUtils.cpp:166
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:216
armnn::TensorInfo::SetDataType
void SetDataType(DataType type)
Definition: Tensor.hpp:201
armnn::IOutputSlot::Disconnect
virtual void Disconnect(IInputSlot &slot)=0
armnn_driver::OperandLifeTime
::android::nn::Operand::LifeTime OperandLifeTime
Definition: ConversionUtils.hpp:47
armnn::IOptimizedNetwork::SerializeToDot
Status SerializeToDot(std::ostream &stream) const
Definition: Network.cpp:710
armnn::OutputShapeRounding::Floor
@ Floor
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:421
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:197
armnnUtils::DataLayoutIndexed
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
Definition: DataLayoutIndexed.hpp:17
armnn::DataType::Float32
@ Float32
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t outputIndex, armnn::IConnectableLayer &layer, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo=nullptr, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc=nullptr, const ActivationFn &activationFunction=ActivationFn::kActivationNone)
Definition: ConversionUtils.hpp:992
armnn::ActivationFunction::TanH
@ TanH
armnn::TensorShape::AreAllDimensionsSpecified
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:411
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:155
armnnSerializer
Definition: ISerializer.hpp:11
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:688
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::DataType::QSymmS8
@ QSymmS8
armnn_driver::ConversionData::m_DynamicInputsEncountered
bool m_DynamicInputsEncountered
Definition: ConversionUtils.hpp:65
armnnUtils::Permute
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:164
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:874
armnnUtils::Permuted
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:125
armnn_driver::DequantizeStatus::SUCCESS
@ SUCCESS
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:415
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn_driver::ConversionResult::UnsupportedFeature
@ UnsupportedFeature
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1558
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:782
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::TensorInfo::SetQuantizationScale
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
armnn::TensorInfo::SetQuantizationScales
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:456
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn::DataType::Float16
@ Float16
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:427
armnn_driver::GetFileTimestamp
std::string GetFileTimestamp()
Definition: CanonicalUtils.cpp:560
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:413
armnn_driver::DequantizeStatus::INVALID_OPERAND
@ INVALID_OPERAND
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:409
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn_driver::GetMemoryFromPool
void * GetMemoryFromPool(DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools)
Returns a pointer to a specific location in a pool`.
Definition: CanonicalUtils.cpp:66
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::Dimensionality::Scalar
@ Scalar
armnn_driver::UnsupportedOperand
Definition: CanonicalUtils.hpp:27
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:815
armnn::InvalidArgumentException
Definition: Exceptions.hpp:80
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:775
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:68
armnn_driver::DequantizeStatus
DequantizeStatus
Definition: ConversionUtils.hpp:1040
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:987
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:182
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:122
armnnSerializer::ISerializer::Create
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnn_driver::ConversionResult::ErrorMappingPools
@ ErrorMappingPools
armnn::Status::Success
@ Success
armnn_driver::GetInputActivationFunctionImpl
bool GetInputActivationFunctionImpl(const Operation &operation, uint32_t inputIndex, OperandType type, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:833
armnn::Dimensionality::NotSpecified
@ NotSpecified
armnn::Exception
Base class for all ArmNN exceptions so that users can filter to just those.
Definition: Exceptions.hpp:46
armnn_driver::ConversionData::m_OutputSlotForOperand
std::vector< armnn::IOutputSlot * > m_OutputSlotForOperand
Definition: ConversionUtils.hpp:63
armnn_driver::RenameFile
void RenameFile(const std::string &existingName, const std::string &extension, const std::string &dumpDir, const armnn::NetworkId networkId)
Definition: CanonicalUtils.cpp:593
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:297
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:407
armnn::BoostLogSeverityMapping::info
@ info
armnn_driver::DequantizeStatus::NOT_REQUIRED
@ NOT_REQUIRED
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:200
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:662
armnn::DataType::Signed32
@ Signed32
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1554
armnn::DataType::QAsymmS8
@ QAsymmS8
ARMNN_FALLTHROUGH
#define ARMNN_FALLTHROUGH
Definition: Utils.hpp:36
armnn_driver::DequantizeIfRequired
DequantizeResult DequantizeIfRequired(size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:663
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:419
armnn::TensorInfo::SetQuantizationOffset
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
armnn::IOutputSlot::GetOwningIConnectableLayer
virtual const IConnectableLayer & GetOwningIConnectableLayer() const =0
armnn_driver::IsQSymm8
bool IsQSymm8(const Operand &operand)
Definition: ConversionUtils.hpp:1035
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
getMainModel
const android::nn::Model::Subgraph & getMainModel(const android::nn::Model &model)
Definition: ConversionUtils.hpp:30
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:193
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:62
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1556
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1218
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:731
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn::BackendId
Definition: BackendId.hpp:75
armnn::ActivationFunction::ReLu
@ ReLu
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:195
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::IProfiler::Print
void Print(std::ostream &outStream) const
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:630
armnn_driver::UnsupportedOperand::m_type
OperandType m_type
Definition: CanonicalUtils.hpp:35
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:57
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:423
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:505
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:92
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:112
armnn::TensorInfo::SetConstant
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:514
armnn_driver::GetOperandSummary
std::string GetOperandSummary(const Operand &operand)
Definition: CanonicalUtils.cpp:191
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::Dequantize
@ Dequantize
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1538
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:856
armnn_driver::GetOperandValueReadOnlyAddress
const void * GetOperandValueReadOnlyAddress(const Operand &operand, const Model &model, const ConversionData &data, bool optional)
Definition: ConversionUtils.cpp:798
armnn::NullPointerException
Definition: Exceptions.hpp:146
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:61
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:405
armnn::LayerType::Constant
@ Constant
armnn::DataLayout::NCHW
@ NCHW
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:28
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0