ArmNN
 24.02
ConversionUtils.hpp File Reference
#include "CanonicalUtils.hpp"
#include <armnn/ArmNN.hpp>
#include <armnn/BackendHelper.hpp>
#include <armnn/Exceptions.hpp>
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/NumericCast.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Transpose.hpp>
#include <ActivationFunctor.h>
#include <CpuExecutor.h>
#include <OperationsUtils.h>
#include <armnnUtils/FloatingPointComparison.hpp>
#include <log/log.h>
#include <sstream>
#include <vector>
#include <nnapi/OperandTypes.h>
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
#include <nnapi/Validation.h>
Include dependency graph for ConversionUtils.hpp:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  ConversionData
 
class  LayerInputHandle
 
class  ConstTensorPin
 

Namespaces

 armnn_driver
 Helper classes.
 

Macros

#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, setBackend, ...)
 

Typedefs

using Model = ::android::nn::Model
 Helper classes. More...
 
using Operand = ::android::nn::Operand
 
using OperandLifeTime = ::android::nn::Operand::LifeTime
 
using OperandType = ::android::nn::OperandType
 
using Operation = ::android::nn::Operation
 
using OperationType = ::android::nn::OperationType
 
using ErrorStatus = ::android::nn::ErrorStatus
 
using DequantizeResult = std::tuple< std::unique_ptr< float[]>, size_t, armnn::TensorInfo, DequantizeStatus >
 

Enumerations

enum  ConversionResult { Success, ErrorMappingPools, UnsupportedFeature }
 
enum  DequantizeStatus { SUCCESS, NOT_REQUIRED, INVALID_OPERAND }
 

Functions

const android::nn::Model::Subgraph & getMainModel (const android::nn::Model &model)
 
armnn::IConnectableLayerProcessActivation (const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
 
const OperandGetInputOperand (const Operation &operation, uint32_t inputIndex, const Model &model, bool failOnIndexOutOfBounds=true)
 
const OperandGetOutputOperand (const Operation &operation, uint32_t outputIndex, const Model &model)
 
const void * GetOperandValueReadOnlyAddress (const Operand &operand, const Model &model, const ConversionData &data, bool optional)
 
bool GetOperandType (const Operation &operation, uint32_t inputIndex, const Model &model, OperandType &type)
 
bool IsOperandConstant (const Operand &operand)
 
bool IsWeightsValid (const Operation &operation, uint32_t inputIndex, const Model &model, const bool isOptional=true)
 Utility functions. More...
 
ConstTensorPin ConvertOperandToConstTensorPin (const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
 
ConstTensorPin ConvertOperationInputToConstTensorPin (const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings=g_DontPermute, const armnn::TensorShape *overrideTensorShape=nullptr, bool optional=false)
 
template<typename OutputType >
bool GetInputScalar (const Operation &operation, uint32_t inputIndex, OperandType type, OutputType &outValue, const Model &model, const ConversionData &data, bool optional=false)
 
bool GetInputInt32 (const Operation &operation, uint32_t inputIndex, int32_t &outValue, const Model &model, const ConversionData &data)
 
bool GetInputFloat32 (const Operation &operation, uint32_t inputIndex, float &outValue, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunctionImpl (const Operation &operation, uint32_t inputIndex, OperandType type, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunction (const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetInputActivationFunctionFromTensor (const Operation &operation, uint32_t inputIndex, ActivationFn &outActivationFunction, const Model &model, const ConversionData &data)
 
bool GetOptionalInputActivation (const Operation &operation, uint32_t inputIndex, ActivationFn &activationFunction, const Model &model, const ConversionData &data)
 
template<typename ConvolutionDescriptor >
bool GetOptionalConvolutionDilationParams (const Operation &operation, uint32_t dilationXIndex, ConvolutionDescriptor &descriptor, const Model &model, const ConversionData &data)
 
bool GetOptionalBool (const Operation &operation, uint32_t inputIndex, const Model &model, const ConversionData &data)
 
bool GetTensorInt32Values (const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
 
bool GetInputPaddingScheme (const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
 
LayerInputHandle ConvertToLayerInputHandle (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
 
bool SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
 
armnn::DataLayout OptionalDataLayout (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
 
bool SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t outputIndex, armnn::IConnectableLayer &layer, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo=nullptr, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc=nullptr, const ActivationFn &activationFunction=ActivationFn::kActivationNone)
 
bool ConvertToActivation (const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
 
bool ConvertPaddings (const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
 
bool ConvertReduce (const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
 
bool ConvertPooling2d (const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
 
bool IsQSymm8 (const Operand &operand)
 
DequantizeResult DequantizeIfRequired (size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
 
ConstTensorPin DequantizeAndMakeConstTensorPin (const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
 
bool IsConnectedToDequantize (armnn::IOutputSlot *ioutputSlot)
 

Macro Definition Documentation

◆ FORWARD_LAYER_SUPPORT_FUNC

#define FORWARD_LAYER_SUPPORT_FUNC (   funcName,
  func,
  backends,
  supported,
  setBackend,
  ... 
)

Definition at line 155 of file ConversionUtils.hpp.

Function Documentation

◆ getMainModel()

const android::nn::Model::Subgraph& getMainModel ( const android::nn::Model &  model)
inline