ArmNN
 23.08
armnn_driver Namespace Reference

Helper classes. More...

Classes

class  ArmnnDevice
 
class  ArmnnDriver
 
class  ArmnnDriverImpl
 
class  ArmnnPreparedModel
 
struct  CanonicalExecutionContext
 
class  ConstTensorPin
 
struct  ConversionData
 
class  Converter
 
class  DriverOptions
 
class  LayerInputHandle
 
class  ModelToINetworkTransformer
 
class  UnsupportedOperand
 

Typedefs

template<typename TensorType >
using DumpElementFunction = void(*)(const TensorType &tensor, unsigned int elementIndex, std::ofstream &fileStream)
 
using Model = ::android::nn::Model
 Helper classes. More...
 
using Operand = ::android::nn::Operand
 
using OperandLifeTime = ::android::nn::Operand::LifeTime
 
using OperandType = ::android::nn::OperandType
 
using Operation = ::android::nn::Operation
 
using OperationType = ::android::nn::OperationType
 
using ErrorStatus = ::android::nn::ErrorStatus
 
using DequantizeResult = std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus >
 
using Half = half_float::half
 

Enumerations

enum  ConversionResult { Success, ErrorMappingPools, UnsupportedFeature }
 
enum  DequantizeStatus { SUCCESS, NOT_REQUIRED, INVALID_OPERAND }
 

Functions

void SwizzleAndroidNn4dTensorToArmNn (armnn::TensorInfo &tensor, const void *input, void *output, const armnn::PermutationVector &mappings)
 Swizzles tensor data in input according to the dimension mappings. More...
 
void * GetMemoryFromPool (DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools)
 Returns a pointer to a specific location in a pool`. More...
 
void * GetMemoryFromPointer (const Request::Argument &requestArg)
 
armnn::TensorInfo GetTensorInfoForOperand (const Operand &operand)
 
std::string GetOperandSummary (const Operand &operand)
 
template<typename TensorType >
void DumpTensor (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const TensorType &tensor)
 
template void DumpTensor< armnn::ConstTensor > (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const armnn::ConstTensor &tensor)
 
template void DumpTensor< armnn::Tensor > (const std::string &dumpDir, const std::string &requestName, const std::string &tensorName, const armnn::Tensor &tensor)
 
void DumpJsonProfilingIfRequired (bool gpuProfilingEnabled, const std::string &dumpDir, armnn::NetworkId networkId, const armnn::IProfiler *profiler)
 
std::string ExportNetworkGraphToDotFile (const armnn::IOptimizedNetwork &optimizedNetwork, const std::string &dumpDir)
 
std::string SerializeNetwork (const armnn::INetwork &network, const std::string &dumpDir, std::vector< uint8_t > &dataCacheData, bool dataCachingActive)
 
bool IsDynamicTensor (const armnn::TensorInfo &outputInfo)
 Checks if a tensor info represents a dynamic tensor. More...
 
bool AreDynamicTensorsSupported (void)
 Checks for ArmNN support of dynamic tensors. More...
 
bool isQuantizedOperand (const OperandType &operandType)
 
std::string GetModelSummary (const Model &model)
 
std::string GetFileTimestamp ()
 
void RenameExportedFiles (const std::string &existingSerializedFileName, const std::string &existingDotFileName, const std::string &dumpDir, const armnn::NetworkId networkId)
 
void RenameFile (const std::string &existingName, const std::string &extension, const std::string &dumpDir, const armnn::NetworkId networkId)
 
void CommitPools (std::vector<::android::nn::RunTimePoolInfo > &memPools)
 
OutputShape ComputeShape (const armnn::TensorInfo &info)
 
bool IsWeightsValid (const Operation &operation, uint32_t inputIndex, const Model &model)
 Utility functions. More...
 
ConstTensorPin ConvertOperandToConstTensorPin (const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
 
LayerInputHandle ConvertToLayerInputHandle (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
 
bool ConvertPaddings (const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
 
bool ConvertPooling2d (const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
 
bool ConvertReduce (const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
 
bool ConvertToActivation (const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
 
DequantizeResult DequantizeIfRequired (size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
 
ConstTensorPin DequantizeAndMakeConstTensorPin (const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
 
bool GetInputPaddingScheme (const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
 
const void * GetOperandValueReadOnlyAddress (const Operand &operand, const Model &model, const ConversionData &data, bool optional)
 
bool GetTensorInt32Values (const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
 
armnn::DataLayout OptionalDataLayout (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
 
armnn::IConnectableLayerProcessActivation (const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
 
bool SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
 
bool IsConnectedToDequantize (armnn::IOutputSlot *ioutputSlot)
 
const OperandGetInputOperand (const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
 
const OperandGetOutputOperand (const Operation &operation, uint32_t outputIndex, const Model &model)
 
bool GetOperandType (const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
 
bool IsOperandConstant (const Operand &operand)
 
ConstTensorPin ConvertOperationInputToConstTensorPin (const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
 
template<typename OutputType >
bool GetInputScalar (const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
 
bool GetInputInt32 (const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
 
bool GetInputFloat32 (const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunctionImpl (const Operation &operation, uint32_t inputIndex, OperandType type, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunction (const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunctionFromTensor (const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetOptionalInputActivation (const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
 
template<typename ConvolutionDescriptor >
bool GetOptionalConvolutionDilationParams (const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
 
bool GetOptionalBool (const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
 
bool SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t outputIndex, armnn::IConnectableLayer &layer, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo=nullptr, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc=nullptr, const ActivationFn &activationFunction=ActivationFn::kActivationNone)
 
bool IsQSymm8 (const Operand &operand)
 

Variables

const armnn::PermutationVector g_DontPermute {}
 

Detailed Description

Helper classes.

Typedef Documentation

◆ DequantizeResult

using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>

Definition at line 1014 of file ConversionUtils.hpp.

◆ DumpElementFunction

using DumpElementFunction = void (*)(const TensorType& tensor, unsigned int elementIndex, std::ofstream& fileStream)

Definition at line 206 of file CanonicalUtils.cpp.

◆ ErrorStatus

using ErrorStatus = ::android::nn::ErrorStatus

Definition at line 49 of file ConversionUtils.hpp.

◆ Half

using Half = half_float::half

Definition at line 14 of file Converter.cpp.

◆ Model

using Model = ::android::nn::Model

Helper classes.

Definition at line 43 of file ConversionUtils.hpp.

◆ Operand

using Operand = ::android::nn::Operand

Definition at line 44 of file ConversionUtils.hpp.

◆ OperandLifeTime

using OperandLifeTime = ::android::nn::Operand::LifeTime

Definition at line 45 of file ConversionUtils.hpp.

◆ OperandType

using OperandType = ::android::nn::OperandType

Definition at line 46 of file ConversionUtils.hpp.

◆ Operation

using Operation = ::android::nn::Operation

Definition at line 47 of file ConversionUtils.hpp.

◆ OperationType

using OperationType = ::android::nn::OperationType

Definition at line 48 of file ConversionUtils.hpp.

Enumeration Type Documentation

◆ ConversionResult

enum ConversionResult
strong
Enumerator
Success 
ErrorMappingPools 
UnsupportedFeature 

Definition at line 125 of file ConversionUtils.hpp.

126 {
127  Success,
130 };

◆ DequantizeStatus

enum DequantizeStatus
strong
Enumerator
SUCCESS 
NOT_REQUIRED 
INVALID_OPERAND 

Definition at line 1007 of file ConversionUtils.hpp.

1008 {
1009  SUCCESS,
1010  NOT_REQUIRED,
1012 };

Function Documentation

◆ AreDynamicTensorsSupported()

bool AreDynamicTensorsSupported ( )

Checks for ArmNN support of dynamic tensors.

Definition at line 502 of file CanonicalUtils.cpp.

503 {
504  return true;
505 }

Referenced by ConvertPooling2d(), ConvertReduce(), and ConvertToActivation().

◆ CommitPools()

void CommitPools ( std::vector<::android::nn::RunTimePoolInfo > &  memPools)

Definition at line 612 of file CanonicalUtils.cpp.

613 {
614  // Commit output buffers.
615  // Note that we update *all* pools, even if they aren't actually used as outputs -
616  // this is simpler and is what the CpuExecutor does.
617  for (auto& pool : memPools)
618  {
619  // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
620  // update() has been removed and flush() added.
621  pool.flush();
622  }
623 }

Referenced by ArmnnPreparedModel::ExecuteGraph().

◆ ComputeShape()

OutputShape armnn_driver::ComputeShape ( const armnn::TensorInfo info)
inline

Definition at line 95 of file CanonicalUtils.hpp.

96 {
97  OutputShape shape;
98 
99  armnn::TensorShape tensorShape = info.GetShape();
100  // Android will expect scalars as a zero dimensional tensor
102  {
103  shape.dimensions = std::vector<uint32_t>{};
104  }
105  else
106  {
107  std::vector<uint32_t> dimensions;
108  const unsigned int numDims = tensorShape.GetNumDimensions();
109  dimensions.resize(numDims);
110  for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
111  {
112  dimensions[outputIdx] = tensorShape[outputIdx];
113  }
114  shape.dimensions = dimensions;
115  }
116 
117  shape.isSufficient = true;
118 
119  return shape;
120 }

References TensorShape::GetDimensionality(), TensorShape::GetNumDimensions(), and armnn::Scalar.

Referenced by ArmnnPreparedModel::execute().

◆ ConvertOperandToConstTensorPin()

ConstTensorPin ConvertOperandToConstTensorPin ( const Operand operand,
const Model model,
const ConversionData data,
const armnn::PermutationVector dimensionMappings,
const armnn::TensorShape overrideTensorShape,
bool  optional,
const armnn::DataType overrideDataType 
)

Definition at line 154 of file ConversionUtils.cpp.

161 {
162  if (!IsOperandTypeSupportedForTensors(operand.type))
163  {
164  VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor" << operand.type;
165  return ConstTensorPin();
166  }
167 
168  if (!optional && !IsOperandConstant(operand))
169  {
170  VLOG(DRIVER) << __func__ << ": lifetime for input tensor: r" << operand.lifetime;
171  return ConstTensorPin();
172  }
173 
174  const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data, optional);
175  if (!valueStart)
176  {
177  if (optional)
178  {
179  // optional tensor with no values is not really an error; return it as invalid, but marked as optional
180  return ConstTensorPin(true);
181  }
182  // mandatory tensor with no values
183  Fail("%s: failed to get operand address", __func__);
184  return ConstTensorPin();
185  }
186 
187  armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
188 
189  if (overrideTensorShape)
190  {
191  tensorInfo.SetShape(*overrideTensorShape);
192  }
193 
194  if (overrideDataType)
195  {
196  tensorInfo.SetDataType(*overrideDataType);
197  }
198 
199  // Make sure isConstant flag is set.
200  tensorInfo.SetConstant();
201  return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
202 }

References GetOperandValueReadOnlyAddress(), GetTensorInfoForOperand(), IsOperandConstant(), TensorInfo::SetConstant(), TensorInfo::SetDataType(), and TensorInfo::SetShape().

Referenced by ConvertOperationInputToConstTensorPin(), and ConvertToLayerInputHandle().

◆ ConvertOperationInputToConstTensorPin()

ConstTensorPin armnn_driver::ConvertOperationInputToConstTensorPin ( const Operation operation,
uint32_t  inputIndex,
const Model model,
const ConversionData data,
const armnn::PermutationVector dimensionMappings = g_DontPermute,
const armnn::TensorShape overrideTensorShape = nullptr,
bool  optional = false 
)
inline

Definition at line 718 of file ConversionUtils.hpp.

726 {
727  const Operand* operand = GetInputOperand(operation, inputIndex, model);
728  if (!operand)
729  {
730  Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
731  return ConstTensorPin();
732  }
733  return ConvertOperandToConstTensorPin(*operand,
734  model,
735  data,
736  dimensionMappings,
737  overrideTensorShape,
738  optional);
739 }

References ConvertOperandToConstTensorPin(), and GetInputOperand().

Referenced by DequantizeAndMakeConstTensorPin().

◆ ConvertPaddings()

bool ConvertPaddings ( const Operation operation,
const Model model,
ConversionData data,
unsigned int  rank,
armnn::PadDescriptor padDescriptor 
)

Definition at line 338 of file ConversionUtils.cpp.

343 {
344  const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
345  if (!paddingsOperand)
346  {
347  return Fail("%s: Could not read paddings operand", __func__);
348  }
349 
350  armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
351  if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
352  {
353  return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
354  }
355 
356  std::vector<int32_t> paddings;
357  if (!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
358  {
359  return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
360  }
361 
362  // add padding for each dimension of input tensor.
363  for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
364  {
365  int paddingBeforeInput = paddings[i];
366  int paddingAfterInput = paddings[i + 1];
367 
368  if (paddingBeforeInput < 0 || paddingAfterInput < 0)
369  {
370  return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
371  }
372 
373  padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
374  }
375 
376  return true;
377 }

References GetInputOperand(), TensorShape::GetNumDimensions(), TensorShape::GetNumElements(), GetTensorInt32Values(), and PadDescriptor::m_PadList.

◆ ConvertPooling2d()

bool ConvertPooling2d ( const Operation operation,
const char *  operationName,
armnn::PoolingAlgorithm  poolType,
const Model model,
ConversionData data 
)

Definition at line 380 of file ConversionUtils.cpp.

385 {
386 
387  VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
388 
389  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
390  if (!input.IsValid())
391  {
392  return Fail("%s: Operation Could not read input 0", operationName);
393  }
394 
395  const Operand* output = GetOutputOperand(operation, 0, model);
396  if (!output)
397  {
398  return Fail("%s: Could not read output 0", __func__);
399  }
400 
401  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
402  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
403 
405  desc.m_PoolType = poolType;
408 
409  ActivationFn activation;
410 
411  auto inputSize = operation.inputs.size();
412 
413  if (inputSize >= 10)
414  {
415  // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
416  if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data) ||
417  !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data) ||
418  !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data) ||
419  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data) ||
420  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data) ||
421  !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data) ||
422  !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data) ||
423  !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data) ||
424  !GetInputActivationFunction(operation, 9, activation, model, data))
425  {
426  return Fail("%s: Operation has invalid inputs", operationName);
427  }
428 
429  if (Is12OrLaterOperand(*output))
430  {
431  desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
432  }
433  }
434  else
435  {
436  // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
437  ::android::nn::PaddingScheme scheme;
438  if (!GetInputPaddingScheme(operation, 1, scheme, model, data) ||
439  !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data) ||
440  !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data) ||
441  !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data) ||
442  !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data) ||
443  !GetInputActivationFunction(operation, 6, activation, model, data))
444  {
445  return Fail("%s: Operation has invalid inputs", operationName);
446  }
447 
448  if (Is12OrLaterOperand(*output))
449  {
450  desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
451  }
452 
453  const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
454  const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
455  const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
456 
457  CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
458  CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
459  }
460 
461  bool isSupported = false;
462  armnn::BackendId setBackend;
463  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
464  {
466  IsPooling2dSupported,
467  data.m_Backends,
468  isSupported,
469  setBackend,
470  inputInfo,
471  outputInfo,
472  desc);
473 
474  };
475 
476  if(IsDynamicTensor(outputInfo))
477  {
478  isSupported = AreDynamicTensorsSupported();
479  }
480  else
481  {
482  validateFunc(outputInfo, isSupported);
483  }
484 
485  if (!isSupported)
486  {
487  return false;
488  }
489 
490  armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
491  pooling2dLayer->SetBackendId(setBackend);
492  if (!pooling2dLayer)
493  {
494  return Fail("%s: AddPooling2dLayer failed", __func__);
495  }
496 
497  input.Connect(pooling2dLayer->GetInputSlot(0));
498 
499  if (!isSupported)
500  {
501  return false;
502  }
503 
504  return SetupAndTrackLayerOutputSlot(operation, 0, *pooling2dLayer, model,
505  data, nullptr, validateFunc, activation);
506 }

References AreDynamicTensorsSupported(), LayerInputHandle::Connect(), ConvertToLayerInputHandle(), armnn::Floor, FORWARD_LAYER_SUPPORT_FUNC, DataLayoutIndexed::GetHeightIndex(), GetInputActivationFunction(), GetInputPaddingScheme(), GetInputScalar(), IConnectableLayer::GetInputSlot(), GetOutputOperand(), TensorInfo::GetShape(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), DataLayoutIndexed::GetWidthIndex(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, Pooling2dDescriptor::m_DataLayout, ConversionData::m_Network, Pooling2dDescriptor::m_OutputShapeRounding, Pooling2dDescriptor::m_PadBottom, Pooling2dDescriptor::m_PadLeft, Pooling2dDescriptor::m_PadRight, Pooling2dDescriptor::m_PadTop, Pooling2dDescriptor::m_PoolHeight, Pooling2dDescriptor::m_PoolType, Pooling2dDescriptor::m_PoolWidth, Pooling2dDescriptor::m_StrideX, Pooling2dDescriptor::m_StrideY, armnn::NHWC, OptionalDataLayout(), IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().

◆ ConvertReduce()

bool ConvertReduce ( const Operation operation,
const Model model,
ConversionData data,
armnn::ReduceOperation  reduceOperation 
)

Definition at line 508 of file ConversionUtils.cpp.

512 {
513  armnn::ReduceDescriptor descriptor;
514  descriptor.m_ReduceOperation = reduceOperation;
515 
516  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
517  if (!input.IsValid())
518  {
519  return Fail("%s: Operation has invalid inputs", __func__);
520  }
521  const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
522 
523  const Operand* output = GetOutputOperand(operation, 0, model);
524  if (!output)
525  {
526  return Fail("%s: Could not read output 0", __func__);
527  }
528  const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
529 
530  const Operand* axisOperand = GetInputOperand(operation, 1, model);
531  if (!axisOperand)
532  {
533  return Fail("%s: Could not read input 1", __func__);
534  }
535  std::vector<int32_t> axis;
536  if (!GetTensorInt32Values(*axisOperand, axis, model, data))
537  {
538  return Fail("%s: Input 1 has invalid values", __func__);
539  }
540 
541  // Convert the axis to unsigned int and remove duplicates.
542  unsigned int rank = inputInfo.GetNumDimensions();
543  std::set<unsigned int> uniqueAxis;
544  std::transform(axis.begin(), axis.end(),
545  std::inserter(uniqueAxis, uniqueAxis.begin()),
546  [rank](int i) -> unsigned int { return (i + rank) % rank; });
547  descriptor.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
548 
549  // Get the "keep dims" flag.
550  if (!GetInputScalar(operation, 2, OperandType::BOOL, descriptor.m_KeepDims, model, data))
551  {
552  return Fail("%s: Could not read input 2", __func__);
553  }
554 
555  bool isSupported = false;
556  armnn::BackendId setBackend;
557  auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
558  {
560  IsReduceSupported,
561  data.m_Backends,
562  isSupported,
563  setBackend,
564  inputInfo,
565  outputInfo,
566  descriptor);
567  };
568 
569  if(!IsDynamicTensor(outputInfo))
570  {
571  validateFunc(outputInfo, isSupported);
572  }
573  else
574  {
575  isSupported = AreDynamicTensorsSupported();
576  }
577 
578  if (!isSupported)
579  {
580  return false;
581  }
582 
583  armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
584  layer->SetBackendId(setBackend);
585  assert(layer != nullptr);
586  input.Connect(layer->GetInputSlot(0));
587 
588  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
589 }

References AreDynamicTensorsSupported(), LayerInputHandle::Connect(), ConvertToLayerInputHandle(), FORWARD_LAYER_SUPPORT_FUNC, GetInputOperand(), GetInputScalar(), IConnectableLayer::GetInputSlot(), TensorInfo::GetNumDimensions(), GetOutputOperand(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), GetTensorInt32Values(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, ReduceDescriptor::m_KeepDims, ConversionData::m_Network, ReduceDescriptor::m_ReduceOperation, ReduceDescriptor::m_vAxis, IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().

Referenced by Converter::ConvertOperation().

◆ ConvertToActivation()

bool ConvertToActivation ( const Operation operation,
const char *  operationName,
const armnn::ActivationDescriptor activationDesc,
const Model model,
ConversionData data 
)

Definition at line 592 of file ConversionUtils.cpp.

597 {
598  LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
599  if (!input.IsValid())
600  {
601  return Fail("%s: Input 0 is invalid", operationName);
602  }
603 
604  const Operand* outputOperand = GetOutputOperand(operation, 0, model);
605  if (!outputOperand)
606  {
607  return false;
608  }
609 
610  const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
611 
612  bool isSupported = false;
613  armnn::BackendId setBackend;
614  auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
615  {
617  IsActivationSupported,
618  data.m_Backends,
619  isSupported,
620  setBackend,
621  input.GetTensorInfo(),
622  outInfo,
623  activationDesc);
624  };
625 
626  if(IsDynamicTensor(outInfo))
627  {
628  isSupported = AreDynamicTensorsSupported();
629  }
630  else
631  {
632  validateFunc(outInfo, isSupported);
633  }
634 
635  if (!isSupported)
636  {
637  return false;
638  }
639 
640  armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
641  layer->SetBackendId(setBackend);
642  ARMNN_ASSERT(layer != nullptr);
643  input.Connect(layer->GetInputSlot(0));
644 
645  return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
646 }

References AreDynamicTensorsSupported(), ARMNN_ASSERT, LayerInputHandle::Connect(), ConvertToLayerInputHandle(), FORWARD_LAYER_SUPPORT_FUNC, IConnectableLayer::GetInputSlot(), GetOutputOperand(), LayerInputHandle::GetTensorInfo(), GetTensorInfoForOperand(), IsDynamicTensor(), LayerInputHandle::IsValid(), ConversionData::m_Backends, ConversionData::m_Network, IConnectableLayer::SetBackendId(), and SetupAndTrackLayerOutputSlot().

◆ ConvertToLayerInputHandle()

LayerInputHandle ConvertToLayerInputHandle ( const Operation operation,
uint32_t  inputIndex,
const Model model,
ConversionData data,
const armnn::PermutationVector dimensionMappings,
const LayerInputHandle inputHandle 
)

Definition at line 204 of file ConversionUtils.cpp.

210 {
211 
212  const Operand* operand = GetInputOperand(operation, inputIndex, model);
213  if (!operand)
214  {
215  Fail("%s: failed to get input operand %i", __func__, inputIndex);
216  return LayerInputHandle();
217  }
218 
219  if (!IsOperandTypeSupportedForTensors(operand->type))
220  {
221  VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor: " << operand->type;
222  return LayerInputHandle();
223  }
224 
225  try
226  {
227  armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
228 
229  if (IsDynamicTensor(operandTensorInfo))
230  {
231  data.m_DynamicInputsEncountered = true;
232 
233  const uint32_t operandIndex = operation.inputs[inputIndex];
234 
235  // Check if the dynamic input tensors have been inferred by one of the previous layers
236  // If not we can't support them
237  if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
238  {
239  operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
240  }
241  else
242  {
243  Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
244  return LayerInputHandle();
245  }
246  }
247 
248  switch (operand->lifetime)
249  {
250  case OperandLifeTime::SUBGRAPH_INPUT:
251  {
252  // NOTE: We must check whether we can support the input tensor on at least one
253  // of the provided backends; otherwise we cannot convert the operation
254  bool isInputSupported = false;
256  IsInputSupported,
257  data.m_Backends,
258  isInputSupported,
260  operandTensorInfo);
261 
262  if (!isInputSupported)
263  {
264  Fail("%s: unsupported input tensor", __func__);
265  return LayerInputHandle();
266  }
267 
268  [[clang::fallthrough]]; // intentional fallthrough
269  }
270  case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
271  case OperandLifeTime::SUBGRAPH_OUTPUT:
272  {
273  // The tensor is either an operand internal to the model, or a model input.
274  // It can be associated with an ArmNN output slot for an existing layer.
275 
276  // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
277  const uint32_t operandIndex = operation.inputs[inputIndex];
278  return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
279  }
280  case OperandLifeTime::CONSTANT_COPY: // intentional fallthrough
281  case OperandLifeTime::POINTER:
282  case OperandLifeTime::CONSTANT_REFERENCE:
283  {
284  auto constantTensorDataType = operandTensorInfo.GetDataType();
285  // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
286  ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand,
287  model,
288  data,
289  dimensionMappings,
290  nullptr,
291  false,
292  &constantTensorDataType);
293  if (tensorPin.IsValid())
294  {
295  bool isSupported = false;
296  armnn::BackendId setBackend;
298  IsConstantSupported,
299  data.m_Backends,
300  isSupported,
301  setBackend,
302  tensorPin.GetConstTensor().GetInfo());
303  if (!isSupported)
304  {
305  return LayerInputHandle();
306  }
307 
308  armnn::IConnectableLayer* constantLayer =
309  data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
310  constantLayer->SetBackendId(setBackend);
311  armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
312  armnn::TensorInfo constantTensorInfo = tensorPin.GetConstTensor().GetInfo();
313  outputSlot.SetTensorInfo(constantTensorInfo);
314 
315  return LayerInputHandle(true, &outputSlot, constantTensorInfo);
316  }
317  else
318  {
319  Fail("%s: invalid operand tensor", __func__);
320  return LayerInputHandle();
321  }
322  break;
323  }
324  default:
325  {
326  VLOG(DRIVER) << __func__ << ": unsupported lifetime for input tensor: " << operand->lifetime;
327  return LayerInputHandle();
328  }
329  }
330  }
332  {
333  VLOG(DRIVER) << __func__ << ": Operand type: " << e.m_type << " not supported in ArmnnDriver";
334  return LayerInputHandle();
335  }
336 }

References ConvertOperandToConstTensorPin(), FORWARD_LAYER_SUPPORT_FUNC, ConstTensorPin::GetConstTensor(), TensorInfo::GetDataType(), BaseTensor< MemoryType >::GetInfo(), GetInputOperand(), IConnectableLayer::GetOutputSlot(), GetTensorInfoForOperand(), IsDynamicTensor(), ConstTensorPin::IsValid(), ConversionData::m_Backends, ConversionData::m_DynamicInputsEncountered, ConversionData::m_Network, ConversionData::m_OutputSlotForOperand, UnsupportedOperand< OperandType >::m_type, IConnectableLayer::SetBackendId(), and IOutputSlot::SetTensorInfo().

Referenced by ConvertPooling2d(), ConvertReduce(), and ConvertToActivation().

◆ DequantizeAndMakeConstTensorPin()

ConstTensorPin DequantizeAndMakeConstTensorPin ( const Operation operation,
const Model model,
const ConversionData data,
size_t  operandIndex,
bool  optional 
)

Definition at line 731 of file ConversionUtils.cpp.

736 {
737  DequantizeResult dequantized = DequantizeIfRequired(operandIndex,operation, model, data);
738 
739  DequantizeStatus status = std::get<3>(dequantized);
740  switch (status)
741  {
742  case DequantizeStatus::INVALID_OPERAND:
743  {
744  // return invalid const tensor pin
745  return ConstTensorPin();
746  }
747  case DequantizeStatus::NOT_REQUIRED:
748  {
750  operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
751  }
752  case DequantizeStatus::SUCCESS:
753  default:
754  {
755  return ConstTensorPin(
756  std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
757  }
758  }
759 }

References ConvertOperationInputToConstTensorPin(), DequantizeIfRequired(), g_DontPermute, INVALID_OPERAND, NOT_REQUIRED, and SUCCESS.

◆ DequantizeIfRequired()

DequantizeResult DequantizeIfRequired ( size_t  operand_index,
const Operation operation,
const Model model,
const ConversionData data 
)

Definition at line 648 of file ConversionUtils.cpp.

652 {
653  const Operand* weightsOperand = GetInputOperand(operation, operand_index, model);
654  if (!weightsOperand)
655  {
656  return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
657  }
658 
659  if (IsOperandConstant(*weightsOperand))
660  {
661  // Weights are already constant
662  return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
663  }
664 
665  const size_t weightsInputIndex = operation.inputs[operand_index];
666 
667  // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
668  // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
669  for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
670  {
671  // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
672  const auto& operationIt = getMainModel(model).operations[operationIdx];
673  if (operationIt.type != OperationType::DEQUANTIZE)
674  {
675  continue;
676  }
677 
678  size_t outOpIndex = weightsInputIndex + 1;
679  for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
680  {
681  outOpIndex = operationIt.outputs[i];
682  }
683 
684  if (outOpIndex != weightsInputIndex)
685  {
686  continue;
687  }
688 
689  const Operand* operand = GetInputOperand(operationIt, 0, model);
690  ARMNN_ASSERT(operand);
691 
692  if (!IsQSymm8(*operand))
693  {
694  // Only supporting dequantize from QSYMM8 to FLOAT
695  break;
696  }
697 
698  // Allocate a new buffer for the dequantized data and manually dequantize
699  const void* startValue = GetOperandValueReadOnlyAddress(*operand, model, data);
700  if (!startValue)
701  {
702  // Failed to get the operand address
703  break;
704  }
705 
706  const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
707  size_t dequantizedBufferLength = operand->location.length;
708  const float quantizationScale = operand->scale;
709 
710  auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
711  for (size_t i = 0; i < dequantizedBufferLength; ++i)
712  {
713  float* dstPtr = dequantizedBuffer.get();
714  ARMNN_ASSERT(dstPtr);
715  *dstPtr++ = quantizedBuffer[i] * quantizationScale;
716  }
717 
718  // Construct tensor info for dequantized ConstTensor
719  armnn::TensorInfo tensorInfo(operand->dimensions.size(),
720  operand->dimensions.data(),
722 
723  return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
724  std::move(tensorInfo),
725  DequantizeStatus::SUCCESS };
726  }
727 
728  return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
729 }

References ARMNN_ASSERT, armnn::Float32, GetInputOperand(), getMainModel(), GetOperandValueReadOnlyAddress(), INVALID_OPERAND, IsOperandConstant(), IsQSymm8(), NOT_REQUIRED, and SUCCESS.

Referenced by DequantizeAndMakeConstTensorPin().

◆ DumpJsonProfilingIfRequired()

void DumpJsonProfilingIfRequired ( bool  gpuProfilingEnabled,
const std::string &  dumpDir,
armnn::NetworkId  networkId,
const armnn::IProfiler profiler 
)

Definition at line 352 of file CanonicalUtils.cpp.

356 {
357  // Check if profiling is required.
358  if (!gpuProfilingEnabled)
359  {
360  return;
361  }
362 
363  // The dump directory must exist in advance.
364  if (dumpDir.empty())
365  {
366  return;
367  }
368 
369  ARMNN_ASSERT(profiler);
370 
371  // Set the name of the output profiling file.
372  fs::path dumpPath = dumpDir;
373  const fs::path fileName = dumpPath / (std::to_string(networkId) + "_profiling.json");
374 
375  // Open the ouput file for writing.
376  std::ofstream fileStream;
377  fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
378 
379  if (!fileStream.good())
380  {
381  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
382  return;
383  }
384 
385  // Write the profiling info to a JSON file.
386  profiler->Print(fileStream);
387 }

References ARMNN_ASSERT, and IProfiler::Print().

Referenced by ArmnnPreparedModel::~ArmnnPreparedModel().

◆ DumpTensor()

void DumpTensor ( const std::string &  dumpDir,
const std::string &  requestName,
const std::string &  tensorName,
const TensorType &  tensor 
)

Definition at line 219 of file CanonicalUtils.cpp.

223 {
224  // The dump directory must exist in advance.
225  fs::path dumpPath = dumpDir;
226  const fs::path fileName = dumpPath / (requestName + "_" + tensorName + ".dump");
227 
228  std::ofstream fileStream;
229  fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
230 
231  if (!fileStream.good())
232  {
233  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
234  return;
235  }
236 
237  DumpElementFunction<TensorType> dumpElementFunction = nullptr;
238 
239  switch (tensor.GetDataType())
240  {
242  {
243  dumpElementFunction = &DumpTensorElement<TensorType, float>;
244  break;
245  }
247  {
248  dumpElementFunction = &DumpTensorElement<TensorType, uint8_t, uint32_t>;
249  break;
250  }
252  {
253  dumpElementFunction = &DumpTensorElement<TensorType, int32_t>;
254  break;
255  }
257  {
258  dumpElementFunction = &DumpTensorElement<TensorType, armnn::Half>;
259  break;
260  }
262  {
263  dumpElementFunction = &DumpTensorElement<TensorType, int8_t, int32_t>;
264  break;
265  }
267  {
268  dumpElementFunction = &DumpTensorElement<TensorType, bool>;
269  break;
270  }
271  default:
272  {
273  dumpElementFunction = nullptr;
274  }
275  }
276 
277  if (dumpElementFunction != nullptr)
278  {
279  const unsigned int numDimensions = tensor.GetNumDimensions();
280  const armnn::TensorShape shape = tensor.GetShape();
281 
282  if (!shape.AreAllDimensionsSpecified())
283  {
284  fileStream << "Cannot dump tensor elements: not all dimensions are specified" << std::endl;
285  return;
286  }
287  fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
288 
289  if (numDimensions == 0)
290  {
291  fileStream << "# Shape []" << std::endl;
292  return;
293  }
294  fileStream << "# Shape [" << shape[0];
295  for (unsigned int d = 1; d < numDimensions; ++d)
296  {
297  fileStream << "," << shape[d];
298  }
299  fileStream << "]" << std::endl;
300  fileStream << "Each line contains the data of each of the elements of dimension0. In NCHW and NHWC, each line"
301  " will be a batch" << std::endl << std::endl;
302 
303  // Split will create a new line after all elements of the first dimension
304  // (in a 4, 3, 2, 3 tensor, there will be 4 lines of 18 elements)
305  unsigned int split = 1;
306  if (numDimensions == 1)
307  {
308  split = shape[0];
309  }
310  else
311  {
312  for (unsigned int i = 1; i < numDimensions; ++i)
313  {
314  split *= shape[i];
315  }
316  }
317 
318  // Print all elements in the tensor
319  for (unsigned int elementIndex = 0; elementIndex < tensor.GetNumElements(); ++elementIndex)
320  {
321  (*dumpElementFunction)(tensor, elementIndex, fileStream);
322 
323  if ( (elementIndex + 1) % split == 0 )
324  {
325  fileStream << std::endl;
326  }
327  }
328  fileStream << std::endl;
329  }
330  else
331  {
332  fileStream << "Cannot dump tensor elements: Unsupported data type "
333  << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
334  }
335 
336  if (!fileStream.good())
337  {
338  VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
339  }
340 }

References TensorShape::AreAllDimensionsSpecified(), armnn::Boolean, armnn::Float16, armnn::Float32, TensorShape::GetNumElements(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::Signed32.

◆ DumpTensor< armnn::ConstTensor >()

template void armnn_driver::DumpTensor< armnn::ConstTensor > ( const std::string &  dumpDir,
const std::string &  requestName,
const std::string &  tensorName,
const armnn::ConstTensor tensor 
)

◆ DumpTensor< armnn::Tensor >()

template void armnn_driver::DumpTensor< armnn::Tensor > ( const std::string &  dumpDir,
const std::string &  requestName,
const std::string &  tensorName,
const armnn::Tensor tensor 
)

◆ ExportNetworkGraphToDotFile()

std::string ExportNetworkGraphToDotFile ( const armnn::IOptimizedNetwork optimizedNetwork,
const std::string &  dumpDir 
)

Definition at line 389 of file CanonicalUtils.cpp.

391 {
392  std::string fileName;
393  // The dump directory must exist in advance.
394  if (dumpDir.empty())
395  {
396  return fileName;
397  }
398 
399  std::string timestamp = GetFileTimestamp();
400  if (timestamp.empty())
401  {
402  return fileName;
403  }
404 
405  // Set the name of the output .dot file.
406  fs::path dumpPath = dumpDir;
407  fs::path tempFilePath = dumpPath / (timestamp + "_networkgraph.dot");
408  fileName = tempFilePath.string();
409 
410  VLOG(DRIVER) << "Exporting the optimized network graph to file: %s" << fileName.c_str();
411 
412  // Write the network graph to a dot file.
413  std::ofstream fileStream;
414  fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
415 
416  if (!fileStream.good())
417  {
418  VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
419  return fileName;
420  }
421 
422  if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
423  {
424  VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
425  }
426  return fileName;
427 }

References GetFileTimestamp(), IOptimizedNetwork::SerializeToDot(), and armnn::Success.

Referenced by ArmnnDriverImpl::PrepareArmnnModel(), and ArmnnDriverImpl::PrepareArmnnModelFromCache().

◆ GetFileTimestamp()

std::string GetFileTimestamp ( )

Definition at line 557 of file CanonicalUtils.cpp.

558 {
559  // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
560  // and getSupportedOperations.txt files)
561  timespec ts;
562  int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
563  std::stringstream ss;
564  if (iRet == 0)
565  {
566  ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
567  }
568  else
569  {
570  VLOG(DRIVER) << "clock_gettime failed with errno " <<
571  std::to_string(errno).c_str() << " : " <<
572  std::strerror(errno);
573  }
574  return ss.str();
575 }

Referenced by ExportNetworkGraphToDotFile(), and SerializeNetwork().

◆ GetInputActivationFunction()

bool armnn_driver::GetInputActivationFunction ( const Operation operation,
uint32_t  inputIndex,
ActivationFn &  outActivationFunction,
const Model model,
const ConversionData data 
)
inline

Definition at line 823 of file ConversionUtils.hpp.

828 {
829  return GetInputActivationFunctionImpl(operation,
830  inputIndex,
831  OperandType::INT32,
832  outActivationFunction,
833  model,
834  data);
835 }

References GetInputActivationFunctionImpl().

Referenced by ConvertPooling2d(), and GetOptionalInputActivation().

◆ GetInputActivationFunctionFromTensor()

bool armnn_driver::GetInputActivationFunctionFromTensor ( const Operation operation,
uint32_t  inputIndex,
ActivationFn &  outActivationFunction,
const Model model,
const ConversionData data 
)
inline

Definition at line 837 of file ConversionUtils.hpp.

842 {
843  // This only accepts a 1-D tensor of size 1
844  return GetInputActivationFunctionImpl(operation,
845  inputIndex,
846  OperandType::INT32,
847  outActivationFunction,
848  model,
849  data);
850 }

References GetInputActivationFunctionImpl().

◆ GetInputActivationFunctionImpl()

bool armnn_driver::GetInputActivationFunctionImpl ( const Operation operation,
uint32_t  inputIndex,
OperandType  type,
ActivationFn &  outActivationFunction,
const Model model,
const ConversionData data 
)
inline

Definition at line 800 of file ConversionUtils.hpp.

806 {
807  if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
808  {
809  VLOG(DRIVER) << __func__ << ": unexpected operand type: " << type
810  << " should be OperandType::INT32 or OperandType::TENSOR_INT32";
811  return false;
812  }
813 
814  int32_t activationFunctionAsInt;
815  if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
816  {
817  return Fail("%s: failed to get activation input value", __func__);
818  }
819  outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
820  return true;
821 }

References GetInputScalar().

Referenced by GetInputActivationFunction(), and GetInputActivationFunctionFromTensor().

◆ GetInputFloat32()

bool armnn_driver::GetInputFloat32 ( const Operation operation,
uint32_t  inputIndex,
float &  outValue,
const Model model,
const ConversionData data 
)
inline

Definition at line 791 of file ConversionUtils.hpp.

796 {
797  return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
798 }

References GetInputScalar().

◆ GetInputInt32()

bool armnn_driver::GetInputInt32 ( const Operation operation,
uint32_t  inputIndex,
int32_t &  outValue,
const Model model,
const ConversionData data 
)
inline

Definition at line 782 of file ConversionUtils.hpp.

787 {
788  return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
789 }

References GetInputScalar().

Referenced by GetInputPaddingScheme().

◆ GetInputOperand()

const Operand* armnn_driver::GetInputOperand ( const Operation operation,
uint32_t  inputIndex,
const Model model,
bool  failOnIndexOutOfBounds = true 
)
inline

Definition at line 643 of file ConversionUtils.hpp.

647 {
648  if (inputIndex >= operation.inputs.size())
649  {
650  if (failOnIndexOutOfBounds)
651  {
652  Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
653  }
654  return nullptr;
655  }
656 
657  // Model should have been validated beforehand
658  ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
659  return &getMainModel(model).operands[operation.inputs[inputIndex]];
660 }

References ARMNN_ASSERT, and getMainModel().

Referenced by ConvertOperationInputToConstTensorPin(), ConvertPaddings(), ConvertReduce(), ConvertToLayerInputHandle(), DequantizeIfRequired(), GetInputScalar(), GetOperandType(), GetOptionalBool(), IsWeightsValid(), and OptionalDataLayout().

◆ GetInputPaddingScheme()

bool GetInputPaddingScheme ( const Operation operation,
uint32_t  inputIndex,
PaddingScheme &  outPaddingScheme,
const Model model,
const ConversionData data 
)

Definition at line 761 of file ConversionUtils.cpp.

766 {
767  int32_t paddingSchemeAsInt;
768  if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
769  {
770  return Fail("%s: failed to get padding scheme input value", __func__);
771  }
772 
773  outPaddingScheme = static_cast<::android::nn::PaddingScheme>(paddingSchemeAsInt);
774  return true;
775 }

References GetInputInt32().

Referenced by ConvertPooling2d().

◆ GetInputScalar()

bool armnn_driver::GetInputScalar ( const Operation operation,
uint32_t  inputIndex,
OperandType  type,
OutputType &  outValue,
const Model model,
const ConversionData data,
bool  optional = false 
)

Definition at line 742 of file ConversionUtils.hpp.

749 {
750  const Operand* operand = GetInputOperand(operation, inputIndex, model);
751  if (!optional && !operand)
752  {
753  return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
754  }
755 
756  if (!optional && operand->type != type)
757  {
758  VLOG(DRIVER) << __func__ << ": unexpected operand type: " << operand->type << " should be: " << type;
759  return false;
760  }
761 
762  if (!optional && operand->location.length != sizeof(OutputType))
763  {
764  return Fail("%s: incorrect operand location length: %i (should be %i)",
765  __func__, operand->location.length, sizeof(OutputType));
766  }
767 
768  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
769  if (!optional && !valueAddress)
770  {
771  return Fail("%s: failed to get address for operand", __func__);
772  }
773 
774  if(!optional)
775  {
776  outValue = *(static_cast<const OutputType*>(valueAddress));
777  }
778 
779  return true;
780 }

References GetInputOperand(), and GetOperandValueReadOnlyAddress().

Referenced by ConvertPooling2d(), ConvertReduce(), GetInputActivationFunctionImpl(), GetInputFloat32(), GetInputInt32(), and GetOptionalConvolutionDilationParams().

◆ GetMemoryFromPointer()

void * GetMemoryFromPointer ( const Request::Argument &  requestArg)

Definition at line 77 of file CanonicalUtils.cpp.

78 {
79  // get the pointer memory
80  auto ptrMemory = std::visit([](std::variant<const void*, void*>&& memory)
81  {
82  if (std::holds_alternative<const void*>(memory))
83  {
84  auto ptr = std::get<const void*>(memory);
85  auto ptrMemory = static_cast<const uint8_t*>(ptr);
86  return const_cast<uint8_t*>(ptrMemory);
87  }
88  else
89  {
90  auto ptr = std::get<void*>(memory);
91  return static_cast<uint8_t*>(ptr);
92  }
93  }, requestArg.location.pointer);
94  return ptrMemory;
95 }

◆ GetMemoryFromPool()

void * GetMemoryFromPool ( DataLocation  location,
const std::vector< android::nn::RunTimePoolInfo > &  memPools 
)

Returns a pointer to a specific location in a pool`.

Definition at line 66 of file CanonicalUtils.cpp.

67 {
68  // find the location within the pool
69  assert(location.poolIndex < memPools.size());
70 
71  const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
72  uint8_t* memPoolBuffer = memPool.getBuffer();
73  uint8_t* memory = memPoolBuffer + location.offset;
74  return memory;
75 }

Referenced by GetOperandValueReadOnlyAddress().

◆ GetModelSummary()

std::string GetModelSummary ( const Model model)

Definition at line 523 of file CanonicalUtils.cpp.

524 {
525  std::stringstream result;
526 
527  result << model.main.inputIndexes.size() << " input(s), "
528  << model.main.operations.size() << " operation(s), "
529  << model.main.outputIndexes.size() << " output(s), "
530  << model.main.operands.size() << " operand(s) "
531  << std::endl;
532 
533  result << "Inputs: ";
534  for (uint32_t i = 0; i < model.main.inputIndexes.size(); i++)
535  {
536  result << GetOperandSummary(model.main.operands[model.main.inputIndexes[i]]) << ", ";
537  }
538  result << std::endl;
539 
540  result << "Operations: ";
541  for (uint32_t i = 0; i < model.main.operations.size(); i++)
542  {
543  result << model.main.operations[i].type << ", ";
544  }
545  result << std::endl;
546 
547  result << "Outputs: ";
548  for (uint32_t i = 0; i < model.main.outputIndexes.size(); i++)
549  {
550  result << GetOperandSummary(model.main.operands[model.main.outputIndexes[i]]) << ", ";
551  }
552  result << std::endl;
553 
554  return result.str();
555 }

References GetOperandSummary().

Referenced by ArmnnPreparedModel::execute(), and ArmnnPreparedModel::executeFenced().

◆ GetOperandSummary()

std::string GetOperandSummary ( const Operand operand)

Definition at line 191 of file CanonicalUtils.cpp.

192 {
193  std::stringstream ss;
194  ss << "operand dimensions: [ ";
195  for (unsigned int i = 0; i < operand.dimensions.size(); ++i)
196  {
197  ss << operand.dimensions[i] << " ";
198  }
199  ss << "] operand type: " << operand.type;
200  return ss.str();
201 }

Referenced by GetModelSummary().

◆ GetOperandType()

bool armnn_driver::GetOperandType ( const Operation operation,
uint32_t  inputIndex,
const Model model,
OperandType type 
)
inline

Definition at line 683 of file ConversionUtils.hpp.

687 {
688  const Operand* operand = GetInputOperand(operation, inputIndex, model);
689  if (!operand)
690  {
691  return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
692  }
693 
694  type = operand->type;
695  return true;
696 }

References GetInputOperand().

◆ GetOperandValueReadOnlyAddress()

const void * GetOperandValueReadOnlyAddress ( const Operand operand,
const Model model,
const ConversionData data,
bool  optional 
)

Definition at line 777 of file ConversionUtils.cpp.

781 {
782  const void* valueStart = nullptr;
783  switch (operand.lifetime)
784  {
785  case OperandLifeTime::CONSTANT_COPY:
786  {
787  valueStart = model.operandValues.data() + operand.location.offset;
788  break;
789  }
790  case OperandLifeTime::POINTER:
791  {
792  // Pointer specified in the model
793  valueStart = std::get<const void*>(operand.location.pointer);
794  break;
795  }
796  case OperandLifeTime::CONSTANT_REFERENCE:
797  {
798  // Constant specified via a Memory object
799  valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
800  break;
801  }
802  case OperandLifeTime::NO_VALUE:
803  {
804  // An optional input tensor with no values is not an error so should not register as a fail
805  if (optional)
806  {
807  valueStart = nullptr;
808  break;
809  }
810  [[fallthrough]];
811  }
812  default:
813  {
814  VLOG(DRIVER) << __func__ << ": unsupported/invalid operand lifetime:: " << operand.lifetime;
815  valueStart = nullptr;
816  }
817  }
818 
819  return valueStart;
820 }

References GetMemoryFromPool(), and ConversionData::m_MemPools.

Referenced by ConvertOperandToConstTensorPin(), DequantizeIfRequired(), GetInputScalar(), GetOptionalBool(), GetTensorInt32Values(), and OptionalDataLayout().

◆ GetOptionalBool()

bool armnn_driver::GetOptionalBool ( const Operation operation,
uint32_t  inputIndex,
const Model model,
const ConversionData data 
)
inline

Definition at line 900 of file ConversionUtils.hpp.

904 {
905  const Operand* operand = GetInputOperand(operation, inputIndex, model);
906  if (!operand)
907  {
908  return false;
909  }
910 
911  if (!IsBool(*operand))
912  {
913  return false;
914  }
915 
916  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
917  if (!valueAddress)
918  {
919  return false;
920  }
921 
922  return *(static_cast<const bool*>(valueAddress));
923 }

References GetInputOperand(), and GetOperandValueReadOnlyAddress().

◆ GetOptionalConvolutionDilationParams()

bool armnn_driver::GetOptionalConvolutionDilationParams ( const Operation operation,
uint32_t  dilationXIndex,
ConvolutionDescriptor &  descriptor,
const Model model,
const ConversionData data 
)

Definition at line 874 of file ConversionUtils.hpp.

879 {
880  bool success = true;
881  if (operation.inputs.size() >= dilationXIndex + 2)
882  {
883  success &= GetInputScalar(operation,
884  dilationXIndex,
885  OperandType::INT32,
886  descriptor.m_DilationX,
887  model,
888  data);
889  success &= GetInputScalar(operation,
890  dilationXIndex + 1,
891  OperandType::INT32,
892  descriptor.m_DilationY,
893  model,
894  data);
895  }
896 
897  return success;
898 }

References GetInputScalar().

◆ GetOptionalInputActivation()

bool armnn_driver::GetOptionalInputActivation ( const Operation operation,
uint32_t  inputIndex,
ActivationFn &  activationFunction,
const Model model,
const ConversionData data 
)
inline

Definition at line 853 of file ConversionUtils.hpp.

858 {
859  if (operation.inputs.size() <= inputIndex)
860  {
861  activationFunction = ActivationFn::kActivationNone;
862  }
863  else
864  {
865  if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
866  {
867  return Fail("%s: Operation has invalid inputs", __func__);
868  }
869  }
870  return true;
871 }

References GetInputActivationFunction().

◆ GetOutputOperand()

const Operand* armnn_driver::GetOutputOperand ( const Operation operation,
uint32_t  outputIndex,
const Model model 
)
inline

Definition at line 662 of file ConversionUtils.hpp.

665 {
666  if (outputIndex >= operation.outputs.size())
667  {
668  Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
669  return nullptr;
670  }
671 
672  // Model should have been validated beforehand
673  ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
674 
675  return &getMainModel(model).operands[operation.outputs[outputIndex]];
676 }

References ARMNN_ASSERT, and getMainModel().

Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), and SetupAndTrackLayerOutputSlot().

◆ GetTensorInfoForOperand()

armnn::TensorInfo GetTensorInfoForOperand ( const Operand operand)

Definition at line 97 of file CanonicalUtils.cpp.

98 {
99  using namespace armnn;
100  bool perChannel = false;
101  bool isScalar = false;
102 
103  DataType type;
104  switch (operand.type)
105  {
106  case OperandType::TENSOR_BOOL8:
108  break;
109  case OperandType::TENSOR_FLOAT32:
111  break;
112  case OperandType::TENSOR_FLOAT16:
114  break;
115  case OperandType::TENSOR_QUANT8_ASYMM:
117  break;
118  case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
119  perChannel=true;
121  case OperandType::TENSOR_QUANT8_SYMM:
123  break;
124  case OperandType::TENSOR_QUANT16_SYMM:
126  break;
127  case OperandType::TENSOR_INT32:
129  break;
130  case OperandType::INT32:
132  isScalar = true;
133  break;
134  case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
136  break;
137  default:
138  throw UnsupportedOperand<OperandType>(operand.type);
139  }
140 
141  TensorInfo ret;
142  if (isScalar)
143  {
145  }
146  else
147  {
148  if (operand.dimensions.size() == 0)
149  {
151  ret = TensorInfo(tensorShape, type);
152  }
153  else
154  {
155  std::vector<unsigned char> dimensionsSpecificity(operand.dimensions.size(), true);
156  int count = 0;
157  std::for_each(operand.dimensions.data(),
158  operand.dimensions.data() + operand.dimensions.size(),
159  [&](const unsigned int val)
160  {
161  if (val == 0)
162  {
163  dimensionsSpecificity[count] = false;
164  }
165  count++;
166  });
167 
168  TensorShape tensorShape(operand.dimensions.size(),
169  operand.dimensions.data(),
170  reinterpret_cast<const bool *>(dimensionsSpecificity.data()));
171  ret = TensorInfo(tensorShape, type);
172  }
173  }
174 
175  if (perChannel)
176  {
177  // ExtraParams is expected to be of type channelQuant
178  const auto& perAxisQuantParams = std::get<Operand::SymmPerChannelQuantParams>(operand.extraParams);
179 
180  ret.SetQuantizationScales(perAxisQuantParams.scales);
181  ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
182  }
183  else
184  {
185  ret.SetQuantizationScale(operand.scale);
186  ret.SetQuantizationOffset(operand.zeroPoint);
187  }
188  return ret;
189 }

References ARMNN_FALLTHROUGH, armnn::Boolean, armnn::Float16, armnn::Float32, armnn::QAsymmS8, armnn::QAsymmU8, armnn::QSymmS16, armnn::QSymmS8, armnn::Scalar, and armnn::Signed32.

Referenced by ConvertOperandToConstTensorPin(), ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), ConvertToLayerInputHandle(), and SetupAndTrackLayerOutputSlot().

◆ GetTensorInt32Values()

bool GetTensorInt32Values ( const Operand operand,
std::vector< int32_t > &  outValues,
const Model model,
const ConversionData data 
)

Definition at line 822 of file ConversionUtils.cpp.

826 {
827  if (operand.type != OperandType::TENSOR_INT32)
828  {
829  VLOG(DRIVER) << __func__ << ": invalid operand type: " << operand.type;
830  return false;
831  }
832 
833  const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
834  if (!startAddress)
835  {
836  VLOG(DRIVER) << __func__ << ": failed to get operand address " << operand.type;
837  return false;
838  }
839 
840  // Check number of bytes is sensible
841  const uint32_t numBytes = operand.location.length;
842  if (numBytes % sizeof(int32_t) != 0)
843  {
844  return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
845  __func__, numBytes, sizeof(int32_t));
846  }
847 
848  outValues.resize(numBytes / sizeof(int32_t));
849  memcpy(outValues.data(), startAddress, numBytes);
850  return true;
851 }

References GetOperandValueReadOnlyAddress().

Referenced by ConvertPaddings(), and ConvertReduce().

◆ IsConnectedToDequantize()

bool IsConnectedToDequantize ( armnn::IOutputSlot ioutputSlot)

Definition at line 1040 of file ConversionUtils.cpp.

1041 {
1042  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize()";
1043  if (!ioutputSlot)
1044  {
1045  return false;
1046  }
1047  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() ioutputSlot is valid.";
1048  // Find the connections and layers..
1049  armnn::IConnectableLayer& owningLayer = ioutputSlot->GetOwningIConnectableLayer();
1050  if (owningLayer.GetType() == armnn::LayerType::Dequantize)
1051  {
1052  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() connected to Dequantize Layer.";
1053  armnn::IInputSlot& inputSlot = owningLayer.GetInputSlot(0);
1054  armnn::IOutputSlot* connection = inputSlot.GetConnection();
1055  if (connection)
1056  {
1057  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer has a connection.";
1058  armnn::IConnectableLayer& connectedLayer =
1059  connection->GetOwningIConnectableLayer();
1060  if (connectedLayer.GetType() == armnn::LayerType::Constant)
1061  {
1062  VLOG(DRIVER) << "ConversionUtils::IsConnectedToDequantize() Dequantize Layer connected to Constant";
1063  return true;
1064  }
1065  }
1066  }
1067  return false;
1068 }

References armnn::Constant, armnn::Dequantize, IInputSlot::GetConnection(), IConnectableLayer::GetInputSlot(), IOutputSlot::GetOwningIConnectableLayer(), and IConnectableLayer::GetType().

◆ IsDynamicTensor()

bool IsDynamicTensor ( const armnn::TensorInfo tensorInfo)

Checks if a tensor info represents a dynamic tensor.

Definition at line 488 of file CanonicalUtils.cpp.

489 {
491  {
492  return true;
493  }
494  // Account for the usage of the TensorShape empty constructor
495  if (tensorInfo.GetNumDimensions() == 0)
496  {
497  return true;
498  }
499  return !tensorInfo.GetShape().AreAllDimensionsSpecified();
500 }

References TensorShape::AreAllDimensionsSpecified(), TensorShape::GetDimensionality(), TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), and armnn::NotSpecified.

Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), ConvertToLayerInputHandle(), and SetupAndTrackLayerOutputSlot().

◆ IsOperandConstant()

bool armnn_driver::IsOperandConstant ( const Operand operand)
inline

Definition at line 698 of file ConversionUtils.hpp.

699 {
700  OperandLifeTime lifetime = operand.lifetime;
701 
702  return lifetime == OperandLifeTime::CONSTANT_COPY ||
703  lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
704  lifetime == OperandLifeTime::POINTER ||
705  lifetime == OperandLifeTime::NO_VALUE;
706 }

Referenced by ConvertOperandToConstTensorPin(), and DequantizeIfRequired().

◆ IsQSymm8()

bool armnn_driver::IsQSymm8 ( const Operand operand)
inline

Definition at line 1002 of file ConversionUtils.hpp.

1003 {
1004  return operand.type == OperandType::TENSOR_QUANT8_SYMM;
1005 }

Referenced by DequantizeIfRequired().

◆ isQuantizedOperand()

bool isQuantizedOperand ( const OperandType operandType)

Definition at line 507 of file CanonicalUtils.cpp.

508 {
509  if (operandType == OperandType::TENSOR_QUANT8_ASYMM ||
510  operandType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
511  operandType == OperandType::TENSOR_QUANT8_SYMM ||
512  operandType == OperandType::TENSOR_QUANT16_SYMM ||
513  operandType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
514  {
515  return true;
516  }
517  else
518  {
519  return false;
520  }
521 }

◆ IsWeightsValid()

bool IsWeightsValid ( const Operation operation,
uint32_t  inputIndex,
const Model model 
)

Utility functions.

Definition at line 134 of file ConversionUtils.cpp.

137 {
138  const Operand* operand = GetInputOperand(operation, inputIndex, model);
139  if (!operand)
140  {
141  Fail("%s: failed to get input operand %i", __func__, inputIndex);
142  return false;
143  }
144 
145  if (operand->lifetime != OperandLifeTime::CONSTANT_COPY
146  && operand->lifetime != OperandLifeTime::CONSTANT_REFERENCE
147  && operand->lifetime != OperandLifeTime::NO_VALUE)
148  {
149  return false;
150  }
151  return true;
152 }

References GetInputOperand().

◆ OptionalDataLayout()

armnn::DataLayout OptionalDataLayout ( const Operation operation,
uint32_t  inputIndex,
const Model model,
ConversionData data 
)

Definition at line 853 of file ConversionUtils.cpp.

857 {
858  const Operand* operand = GetInputOperand(operation, inputIndex, model);
859  if (!operand)
860  {
862  }
863 
864  if (!IsBool(*operand))
865  {
867  }
868 
869  const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
870  if (!valueAddress)
871  {
873  }
874 
875  if (*(static_cast<const bool*>(valueAddress)))
876  {
878  }
879  else
880  {
882  }
883 }

References GetInputOperand(), GetOperandValueReadOnlyAddress(), armnn::NCHW, and armnn::NHWC.

Referenced by ConvertPooling2d().

◆ ProcessActivation()

armnn::IConnectableLayer * ProcessActivation ( const armnn::TensorInfo tensorInfo,
ActivationFn  activation,
armnn::IConnectableLayer prevLayer,
ConversionData data 
)

Definition at line 885 of file ConversionUtils.cpp.

889 {
890  ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
891 
892  prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
893 
894  armnn::IConnectableLayer* activationLayer = prevLayer;
895 
896  if (activation != ActivationFn::kActivationNone)
897  {
898  armnn::ActivationDescriptor activationDesc;
899  switch (activation)
900  {
901  case ActivationFn::kActivationRelu:
902  {
904  break;
905  }
906  case ActivationFn::kActivationRelu1:
907  {
909  activationDesc.m_A = 1.0f;
910  activationDesc.m_B = -1.0f;
911  break;
912  }
913  case ActivationFn::kActivationRelu6:
914  {
916  activationDesc.m_A = 6.0f;
917  break;
918  }
919  case ActivationFn::kActivationSigmoid:
920  {
922  break;
923  }
924  case ActivationFn::kActivationTanh:
925  {
927  activationDesc.m_A = 1.0f;
928  activationDesc.m_B = 1.0f;
929  break;
930  }
931  default:
932  {
933  Fail("%s: Invalid activation enum value %i", __func__, activation);
934  return nullptr;
935  }
936  }
937 
938  bool isSupported = false;
939  armnn::BackendId setBackend;
941  IsActivationSupported,
942  data.m_Backends,
943  isSupported,
944  setBackend,
945  prevLayer->GetOutputSlot(0).GetTensorInfo(),
946  tensorInfo,
947  activationDesc);
948  if (!isSupported)
949  {
950  return nullptr;
951  }
952 
953  activationLayer = data.m_Network->AddActivationLayer(activationDesc);
954  activationLayer->SetBackendId(setBackend);
955 
956  prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
957  activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
958  }
959 
960  return activationLayer;
961 }

References ARMNN_ASSERT, armnn::BoundedReLu, IOutputSlot::Connect(), FORWARD_LAYER_SUPPORT_FUNC, IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumOutputSlots(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), ActivationDescriptor::m_A, ActivationDescriptor::m_B, ConversionData::m_Backends, ActivationDescriptor::m_Function, ConversionData::m_Network, armnn::ReLu, IConnectableLayer::SetBackendId(), IOutputSlot::SetTensorInfo(), armnn::Sigmoid, and armnn::TanH.

Referenced by SetupAndTrackLayerOutputSlot().

◆ RenameExportedFiles()

void RenameExportedFiles ( const std::string &  existingSerializedFileName,
const std::string &  existingDotFileName,
const std::string &  dumpDir,
const armnn::NetworkId  networkId 
)

Definition at line 577 of file CanonicalUtils.cpp.

581 {
582  if (dumpDir.empty())
583  {
584  return;
585  }
586  RenameFile(existingSerializedFileName, std::string("_network.armnn"), dumpDir, networkId);
587  RenameFile(existingDotFileName, std::string("_networkgraph.dot"), dumpDir, networkId);
588 }

References RenameFile().

Referenced by ArmnnDriverImpl::PrepareArmnnModel().

◆ RenameFile()

void RenameFile ( const std::string &  existingName,
const std::string &  extension,
const std::string &  dumpDir,
const armnn::NetworkId  networkId 
)

Definition at line 590 of file CanonicalUtils.cpp.

594 {
595  if (existingName.empty() || dumpDir.empty())
596  {
597  return;
598  }
599 
600  fs::path dumpPath = dumpDir;
601  const fs::path newFileName = dumpPath / (std::to_string(networkId) + extension);
602  int iRet = rename(existingName.c_str(), newFileName.c_str());
603  if (iRet != 0)
604  {
605  std::stringstream ss;
606  ss << "rename of [" << existingName << "] to [" << newFileName << "] failed with errno "
607  << std::to_string(errno) << " : " << std::strerror(errno);
608  VLOG(DRIVER) << ss.str().c_str();
609  }
610 }

Referenced by RenameExportedFiles().

◆ SerializeNetwork()

std::string SerializeNetwork ( const armnn::INetwork network,
const std::string &  dumpDir,
std::vector< uint8_t > &  dataCacheData,
bool  dataCachingActive 
)

Definition at line 429 of file CanonicalUtils.cpp.

433 {
434  std::string fileName;
435  bool bSerializeToFile = true;
436  if (dumpDir.empty())
437  {
438  bSerializeToFile = false;
439  }
440  else
441  {
442  std::string timestamp = GetFileTimestamp();
443  if (timestamp.empty())
444  {
445  bSerializeToFile = false;
446  }
447  }
448  if (!bSerializeToFile && !dataCachingActive)
449  {
450  return fileName;
451  }
452 
454  // Serialize the Network
455  serializer->Serialize(network);
456  if (dataCachingActive)
457  {
458  std::stringstream stream;
459  auto serialized = serializer->SaveSerializedToStream(stream);
460  if (serialized)
461  {
462  std::string const serializedString{stream.str()};
463  std::copy(serializedString.begin(),
464  serializedString.end(),
465  std::back_inserter(dataCacheData));
466  }
467  }
468 
469  if (bSerializeToFile)
470  {
471  // Set the name of the output .armnn file.
472  fs::path dumpPath = dumpDir;
473  std::string timestamp = GetFileTimestamp();
474  fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
475  fileName = tempFilePath.string();
476 
477  // Save serialized network to a file
478  std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
479  auto serialized = serializer->SaveSerializedToStream(serializedFile);
480  if (!serialized)
481  {
482  VLOG(DRIVER) << "An error occurred when serializing to file %s" << fileName.c_str();
483  }
484  }
485  return fileName;
486 }

References ISerializer::Create(), and GetFileTimestamp().

Referenced by ArmnnDriverImpl::PrepareArmnnModel().

◆ SetupAndTrackLayerOutputSlot() [1/2]

bool SetupAndTrackLayerOutputSlot ( const Operation operation,
uint32_t  operationOutputIndex,
armnn::IConnectableLayer layer,
uint32_t  layerOutputIndex,
const Model model,
ConversionData data,
const armnn::TensorInfo overrideOutputInfo,
const std::function< void(const armnn::TensorInfo &, bool &)> &  validateFunc,
const ActivationFn &  activationFunction,
bool  inferOutputShapes 
)

Definition at line 963 of file ConversionUtils.cpp.

973 {
974  const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
975  if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
976  {
977  return false;
978  }
979 
980  armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
981  if (overrideOutputInfo == nullptr)
982  {
983  outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
984  }
985  else
986  {
987  outputSlot.SetTensorInfo(*overrideOutputInfo);
988  }
989 
990  bool isSupported = false;
991  if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
992  {
993  // Type one dynamic tensors require the previous layer's output shape for inference
994  for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
995  {
996  if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
997  {
998  return false;
999  }
1000  }
1001  // IsTensorInfoSet will infer the dynamic output shape
1002  outputSlot.IsTensorInfoSet();
1003  // Once the shape is inferred we can validate it
1004  validateFunc(outputSlot.GetTensorInfo(), isSupported);
1005 
1006  if(!isSupported)
1007  {
1008  for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
1009  {
1010  layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
1011  }
1012  return false;
1013  }
1014  }
1015 
1016  const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1017 
1018  if (activationFunction != ActivationFn::kActivationNone)
1019  {
1020  const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
1021  armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
1022  &layer, data);
1023 
1024  if (!endLayer)
1025  {
1026  return Fail("%s: ProcessActivation failed", __func__);
1027  }
1028 
1029  armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
1030  data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
1031  }
1032  else
1033  {
1034  data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1035  }
1036 
1037  return true;
1038 }

References IOutputSlot::Disconnect(), IInputSlot::GetConnection(), IConnectableLayer::GetInputSlot(), IConnectableLayer::GetNumInputSlots(), IConnectableLayer::GetNumOutputSlots(), GetOutputOperand(), IConnectableLayer::GetOutputSlot(), IOutputSlot::GetTensorInfo(), GetTensorInfoForOperand(), IsDynamicTensor(), IOutputSlot::IsTensorInfoSet(), ConversionData::m_OutputSlotForOperand, ProcessActivation(), and IOutputSlot::SetTensorInfo().

Referenced by ConvertPooling2d(), ConvertReduce(), ConvertToActivation(), and SetupAndTrackLayerOutputSlot().

◆ SetupAndTrackLayerOutputSlot() [2/2]

bool armnn_driver::SetupAndTrackLayerOutputSlot ( const Operation operation,
uint32_t  outputIndex,
armnn::IConnectableLayer layer,
const Model model,
ConversionData data,
const armnn::TensorInfo overrideOutputInfo = nullptr,
const std::function< void(const armnn::TensorInfo &, bool &)> &  validateFunc = nullptr,
const ActivationFn &  activationFunction = ActivationFn::kActivationNone 
)
inline

Definition at line 959 of file ConversionUtils.hpp.

968 {
969  return SetupAndTrackLayerOutputSlot(operation,
970  outputIndex,
971  layer,
972  outputIndex,
973  model,
974  data,
975  overrideOutputInfo,
976  validateFunc,
977  activationFunction);
978 }

References SetupAndTrackLayerOutputSlot().

◆ SwizzleAndroidNn4dTensorToArmNn()

void SwizzleAndroidNn4dTensorToArmNn ( armnn::TensorInfo tensorInfo,
const void *  input,
void *  output,
const armnn::PermutationVector mappings 
)

Swizzles tensor data in input according to the dimension mappings.

Definition at line 40 of file CanonicalUtils.cpp.

44 {
45  assert(tensorInfo.GetNumDimensions() == 4U);
46 
47  armnn::DataType dataType = tensorInfo.GetDataType();
48  switch (dataType)
49  {
55  // First swizzle tensor info
56  tensorInfo = armnnUtils::Permuted(tensorInfo, mappings);
57  // Then swizzle tensor data
58  armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
59  break;
60  default:
61  VLOG(DRIVER) << "Unknown armnn::DataType for swizzling";
62  assert(0);
63  }
64 }

References armnn::Float16, armnn::Float32, TensorInfo::GetDataType(), armnn::GetDataTypeSize(), TensorInfo::GetNumDimensions(), TensorInfo::GetShape(), armnnUtils::Permute(), armnnUtils::Permuted(), armnn::QAsymmS8, armnn::QAsymmU8, and armnn::QSymmS8.

Referenced by ConstTensorPin::ConstTensorPin().

Variable Documentation

◆ g_DontPermute

const armnn::PermutationVector g_DontPermute {}

Definition at line 38 of file CanonicalUtils.cpp.

Referenced by DequantizeAndMakeConstTensorPin().

ARMNN_ASSERT
#define ARMNN_ASSERT(COND)
Definition: Assert.hpp:14
armnn_driver::ProcessActivation
armnn::IConnectableLayer * ProcessActivation(const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
Definition: ConversionUtils.cpp:885
armnn::IConnectableLayer::SetBackendId
virtual void SetBackendId(const BackendId &id)=0
Set the backend of the IConnectableLayer.
armnn::DataType::Boolean
@ Boolean
armnn::TensorInfo::SetQuantizationDim
void SetQuantizationDim(const Optional< unsigned int > &quantizationDim)
Definition: Tensor.cpp:499
armnn_driver::DequantizeResult
std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus > DequantizeResult
Definition: ConversionUtils.hpp:1014
armnn::ActivationDescriptor
An ActivationDescriptor for the ActivationLayer.
Definition: Descriptors.hpp:36
armnn_driver::GetTensorInt32Values
bool GetTensorInt32Values(const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:822
armnn::IConnectableLayer::GetNumInputSlots
virtual unsigned int GetNumInputSlots() const =0
Returns the number of connectable input slots.
armnn_driver::ConversionData::m_MemPools
std::vector<::android::nn::RunTimePoolInfo > m_MemPools
Definition: ConversionUtils.hpp:62
armnn::Pooling2dDescriptor::m_PoolHeight
uint32_t m_PoolHeight
Pooling height value.
Definition: Descriptors.hpp:417
armnn::DataLayout::NHWC
@ NHWC
armnn_driver::Operand
::android::nn::Operand Operand
Definition: ConversionUtils.hpp:44
armnn_driver::IsDynamicTensor
bool IsDynamicTensor(const armnn::TensorInfo &tensorInfo)
Checks if a tensor info represents a dynamic tensor.
Definition: CanonicalUtils.cpp:488
armnn::ActivationDescriptor::m_A
float m_A
Alpha upper bound value used by the activation functions. (BoundedReLu, Linear, TanH,...
Definition: Descriptors.hpp:61
armnn_driver::LayerInputHandle::Connect
void Connect(armnn::IInputSlot &inputSlot)
Definition: ConversionUtils.cpp:32
armnn_driver::ConvertOperationInputToConstTensorPin
ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
Definition: ConversionUtils.hpp:718
armnn_driver::ConvertOperandToConstTensorPin
ConstTensorPin ConvertOperandToConstTensorPin(const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
Definition: ConversionUtils.cpp:154
armnn::TensorInfo
Definition: Tensor.hpp:152
armnn_driver::ConvertToLayerInputHandle
LayerInputHandle ConvertToLayerInputHandle(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
Definition: ConversionUtils.cpp:204
armnn::TensorInfo::SetDataType
void SetDataType(DataType type)
Definition: Tensor.hpp:199
armnn::IOutputSlot::Disconnect
virtual void Disconnect(IInputSlot &slot)=0
armnn_driver::OperandLifeTime
::android::nn::Operand::LifeTime OperandLifeTime
Definition: ConversionUtils.hpp:45
armnn::IOptimizedNetwork::SerializeToDot
Status SerializeToDot(std::ostream &stream) const
Definition: Network.cpp:698
armnn::OutputShapeRounding::Floor
@ Floor
armnn::Pooling2dDescriptor::m_StrideY
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
Definition: Descriptors.hpp:421
armnn::TensorInfo::GetNumDimensions
unsigned int GetNumDimensions() const
Definition: Tensor.hpp:195
armnnUtils::DataLayoutIndexed
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.
Definition: DataLayoutIndexed.hpp:17
armnn::DataType::Float32
@ Float32
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t outputIndex, armnn::IConnectableLayer &layer, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo=nullptr, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc=nullptr, const ActivationFn &activationFunction=ActivationFn::kActivationNone)
Definition: ConversionUtils.hpp:959
armnn::ActivationFunction::TanH
@ TanH
armnn::TensorShape::AreAllDimensionsSpecified
bool AreAllDimensionsSpecified() const
Checks if there is at least one dimension not specified.
Definition: Tensor.cpp:241
armnn::Pooling2dDescriptor::m_PadTop
uint32_t m_PadTop
Padding top value in the height dimension.
Definition: Descriptors.hpp:411
FORWARD_LAYER_SUPPORT_FUNC
#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend,...)
Definition: ConversionUtils.hpp:153
armnnSerializer
Definition: ISerializer.hpp:11
armnn_driver::GetOutputOperand
const Operand * GetOutputOperand(const Operation &operation, uint32_t outputIndex, const Model &model)
Definition: ConversionUtils.hpp:662
armnn::DataType::QAsymmU8
@ QAsymmU8
armnn::ActivationFunction::BoundedReLu
@ BoundedReLu
min(a, max(b, input)) ReLu1 & ReLu6.
armnn::DataType::QSymmS8
@ QSymmS8
armnn_driver::ConversionData::m_DynamicInputsEncountered
bool m_DynamicInputsEncountered
Definition: ConversionUtils.hpp:63
armnnUtils::Permute
void Permute(const armnn::TensorShape &dstShape, const armnn::PermutationVector &mappings, const void *src, void *dst, size_t dataTypeSize)
Definition: Permute.cpp:131
armnn::IOutputSlot::GetTensorInfo
virtual const TensorInfo & GetTensorInfo() const =0
armnn_driver::OptionalDataLayout
armnn::DataLayout OptionalDataLayout(const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
Definition: ConversionUtils.cpp:853
armnnUtils::Permuted
armnn::TensorShape Permuted(const armnn::TensorShape &srcShape, const armnn::PermutationVector &mappings)
Definition: Permute.cpp:98
armnn_driver::DequantizeStatus::SUCCESS
@ SUCCESS
armnn_driver::g_DontPermute
const armnn::PermutationVector g_DontPermute
Definition: CanonicalUtils.cpp:38
armnn::Pooling2dDescriptor::m_PoolWidth
uint32_t m_PoolWidth
Pooling width value.
Definition: Descriptors.hpp:415
armnn::IConnectableLayer::GetNumOutputSlots
virtual unsigned int GetNumOutputSlots() const =0
Returns the number of connectable output slots.
armnn_driver::ConversionResult::UnsupportedFeature
@ UnsupportedFeature
armnn::DataType::QSymmS16
@ QSymmS16
armnn::ReduceDescriptor::m_ReduceOperation
ReduceOperation m_ReduceOperation
Specifies the reduction operation to execute.
Definition: Descriptors.hpp:1537
armnn_driver::GetInputPaddingScheme
bool GetInputPaddingScheme(const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:761
armnn::TensorShape
Definition: Tensor.hpp:20
armnn::TensorInfo::SetQuantizationScale
void SetQuantizationScale(float scale)
Definition: Tensor.cpp:473
armnn::TensorInfo::SetQuantizationScales
void SetQuantizationScales(const std::vector< float > &scales)
Definition: Tensor.cpp:456
armnn::IOutputSlot
An output connection slot for a layer.
Definition: INetwork.hpp:53
armnn::DataType::Float16
@ Float16
armnn::Pooling2dDescriptor::m_DataLayout
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Definition: Descriptors.hpp:427
armnn_driver::GetFileTimestamp
std::string GetFileTimestamp()
Definition: CanonicalUtils.cpp:557
armnn::TensorShape::GetNumDimensions
unsigned int GetNumDimensions() const
Function that returns the tensor rank.
Definition: Tensor.cpp:174
armnn::Pooling2dDescriptor::m_PadBottom
uint32_t m_PadBottom
Padding bottom value in the height dimension.
Definition: Descriptors.hpp:413
armnn_driver::DequantizeStatus::INVALID_OPERAND
@ INVALID_OPERAND
armnn::Pooling2dDescriptor::m_PadRight
uint32_t m_PadRight
Padding right value in the width dimension.
Definition: Descriptors.hpp:409
armnn::IOutputSlot::SetTensorInfo
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
armnn_driver::GetMemoryFromPool
void * GetMemoryFromPool(DataLocation location, const std::vector< android::nn::RunTimePoolInfo > &memPools)
Returns a pointer to a specific location in a pool`.
Definition: CanonicalUtils.cpp:66
armnn::DataType
DataType
Definition: Types.hpp:48
armnn::Dimensionality::Scalar
@ Scalar
armnn_driver::UnsupportedOperand
Definition: CanonicalUtils.hpp:27
armnn_driver::GetInputInt32
bool GetInputInt32(const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:782
armnn_driver::GetInputScalar
bool GetInputScalar(const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
Definition: ConversionUtils.hpp:742
armnn::ActivationDescriptor::m_Function
ActivationFunction m_Function
The activation function to use (Sigmoid, TanH, Linear, ReLu, BoundedReLu, SoftReLu,...
Definition: Descriptors.hpp:59
armnn_driver::LayerInputHandle
Definition: ConversionUtils.hpp:66
armnn_driver::DequantizeStatus
DequantizeStatus
Definition: ConversionUtils.hpp:1007
armnn_driver::SetupAndTrackLayerOutputSlot
bool SetupAndTrackLayerOutputSlot(const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
Definition: ConversionUtils.cpp:963
armnn::IConnectableLayer::GetType
virtual LayerType GetType() const =0
Returns the armnn::LayerType of this layer.
armnn::GetDataTypeSize
constexpr unsigned int GetDataTypeSize(DataType dataType)
Definition: TypesUtils.hpp:172
armnn_driver::ConstTensorPin::GetConstTensor
const armnn::ConstTensor & GetConstTensor() const
Definition: ConversionUtils.cpp:115
armnnSerializer::ISerializer::Create
static ISerializerPtr Create()
Definition: Serializer.cpp:35
armnn_driver::ConversionResult::ErrorMappingPools
@ ErrorMappingPools
armnn::Status::Success
@ Success
armnn_driver::GetInputActivationFunctionImpl
bool GetInputActivationFunctionImpl(const Operation &operation, uint32_t inputIndex, OperandType type, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:800
armnn::Dimensionality::NotSpecified
@ NotSpecified
armnn_driver::ConversionData::m_OutputSlotForOperand
std::vector< armnn::IOutputSlot * > m_OutputSlotForOperand
Definition: ConversionUtils.hpp:61
armnn_driver::RenameFile
void RenameFile(const std::string &existingName, const std::string &extension, const std::string &dumpDir, const armnn::NetworkId networkId)
Definition: CanonicalUtils.cpp:590
armnn::BaseTensor::GetInfo
const TensorInfo & GetInfo() const
Definition: Tensor.hpp:295
armnn::Pooling2dDescriptor::m_PadLeft
uint32_t m_PadLeft
Padding left value in the width dimension.
Definition: Descriptors.hpp:407
armnn::BoostLogSeverityMapping::info
@ info
armnn_driver::DequantizeStatus::NOT_REQUIRED
@ NOT_REQUIRED
armnn::TensorInfo::GetDataType
DataType GetDataType() const
Definition: Tensor.hpp:198
armnn_driver::GetInputOperand
const Operand * GetInputOperand(const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
Definition: ConversionUtils.hpp:643
armnn::DataType::Signed32
@ Signed32
armnn::ReduceDescriptor::m_KeepDims
bool m_KeepDims
if true then output shape has no change.
Definition: Descriptors.hpp:1533
armnn::DataType::QAsymmS8
@ QAsymmS8
ARMNN_FALLTHROUGH
#define ARMNN_FALLTHROUGH
Definition: Utils.hpp:35
armnn_driver::DequantizeIfRequired
DequantizeResult DequantizeIfRequired(size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
Definition: ConversionUtils.cpp:648
armnn::Pooling2dDescriptor::m_StrideX
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
Definition: Descriptors.hpp:419
armnn::TensorInfo::SetQuantizationOffset
void SetQuantizationOffset(int32_t offset)
Definition: Tensor.cpp:489
armnn::IOutputSlot::GetOwningIConnectableLayer
virtual const IConnectableLayer & GetOwningIConnectableLayer() const =0
armnn_driver::IsQSymm8
bool IsQSymm8(const Operand &operand)
Definition: ConversionUtils.hpp:1002
armnn::IOutputSlot::Connect
virtual int Connect(IInputSlot &destination)=0
armnn::IOutputSlot::IsTensorInfoSet
virtual bool IsTensorInfoSet() const =0
getMainModel
const android::nn::Model::Subgraph & getMainModel(const android::nn::Model &model)
Definition: ConversionUtils.hpp:28
armnn::TensorInfo::GetShape
const TensorShape & GetShape() const
Definition: Tensor.hpp:191
armnn_driver::ConversionData::m_Network
armnn::INetworkPtr m_Network
Definition: ConversionUtils.hpp:60
armnn::ReduceDescriptor::m_vAxis
std::vector< uint32_t > m_vAxis
The indices of the dimensions to reduce.
Definition: Descriptors.hpp:1535
armnn::PadDescriptor::m_PadList
std::vector< std::pair< unsigned int, unsigned int > > m_PadList
Specifies the padding for input dimension.
Definition: Descriptors.hpp:1197
armnn_driver::IsOperandConstant
bool IsOperandConstant(const Operand &operand)
Definition: ConversionUtils.hpp:698
armnn_driver::GetTensorInfoForOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand &operand)
Definition: CanonicalUtils.cpp:97
armnn::BackendId
Definition: BackendId.hpp:75
armnn::ActivationFunction::ReLu
@ ReLu
armnn::IConnectableLayer::GetOutputSlot
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
armnn::TensorInfo::SetShape
void SetShape(const TensorShape &newShape)
Definition: Tensor.hpp:193
armnn
Copyright (c) 2021 ARM Limited and Contributors.
Definition: 01_00_quick_start.dox:6
armnn::IConnectableLayer::GetInputSlot
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
armnn::ActivationDescriptor::m_B
float m_B
Beta lower bound value used by the activation functions. (BoundedReLu, Linear, TanH).
Definition: Descriptors.hpp:63
armnn::IProfiler::Print
void Print(std::ostream &outStream) const
Print stats for events in JSON Format to the given output stream.
Definition: Profiling.cpp:630
armnn_driver::UnsupportedOperand::m_type
OperandType m_type
Definition: CanonicalUtils.hpp:35
armnn_driver::LayerInputHandle::GetTensorInfo
const armnn::TensorInfo & GetTensorInfo() const
Definition: ConversionUtils.cpp:50
armnn::IConnectableLayer
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:80
armnn::IInputSlot
An input connection slot for a layer.
Definition: INetwork.hpp:25
armnn::Pooling2dDescriptor::m_OutputShapeRounding
OutputShapeRounding m_OutputShapeRounding
The rounding method for the output shape. (Floor, Ceiling).
Definition: Descriptors.hpp:423
armnn_driver::AreDynamicTensorsSupported
bool AreDynamicTensorsSupported()
Checks for ArmNN support of dynamic tensors.
Definition: CanonicalUtils.cpp:502
armnn_driver::ConstTensorPin
Definition: ConversionUtils.hpp:90
armnn_driver::ConstTensorPin::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:105
armnn::TensorInfo::SetConstant
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:514
armnn_driver::GetOperandSummary
std::string GetOperandSummary(const Operand &operand)
Definition: CanonicalUtils.cpp:191
armnn::Pooling2dDescriptor
A Pooling2dDescriptor for the Pooling2dLayer.
Definition: Descriptors.hpp:371
armnn::LayerType::Dequantize
@ Dequantize
armnn::ReduceDescriptor
A ReduceDescriptor for the REDUCE operators.
Definition: Descriptors.hpp:1517
armnn_driver::GetInputActivationFunction
bool GetInputActivationFunction(const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
Definition: ConversionUtils.hpp:823
armnn_driver::GetOperandValueReadOnlyAddress
const void * GetOperandValueReadOnlyAddress(const Operand &operand, const Model &model, const ConversionData &data, bool optional)
Definition: ConversionUtils.cpp:777
armnn::TensorShape::GetDimensionality
Dimensionality GetDimensionality() const
Function that returns the tensor type.
Definition: Tensor.hpp:92
armnn::TensorShape::GetNumElements
unsigned int GetNumElements() const
Function that calculates the tensor elements by multiplying all dimension size which are Specified.
Definition: Tensor.cpp:181
armnn_driver::ConversionData::m_Backends
const std::vector< armnn::BackendId > m_Backends
Definition: ConversionUtils.hpp:59
armnn::Pooling2dDescriptor::m_PoolType
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
Definition: Descriptors.hpp:405
armnn::LayerType::Constant
@ Constant
armnn::DataLayout::NCHW
@ NCHW
armnn_driver::LayerInputHandle::IsValid
bool IsValid() const
Definition: ConversionUtils.cpp:27
armnn::ActivationFunction::Sigmoid
@ Sigmoid
armnn::IInputSlot::GetConnection
virtual const IOutputSlot * GetConnection() const =0