ArmNN
 23.08
ConversionUtils.cpp File Reference
Include dependency graph for ConversionUtils.cpp:

Go to the source code of this file.

Namespaces

 armnn_driver
 Helper classes.
 

Functions

bool IsWeightsValid (const Operation &operation, uint32_t inputIndex, const Model &model)
 Utility functions. More...
 
ConstTensorPin ConvertOperandToConstTensorPin (const Operand &operand, const Model &model, const ConversionData &data, const armnn::PermutationVector &dimensionMappings, const armnn::TensorShape *overrideTensorShape, bool optional, const armnn::DataType *overrideDataType)
 
LayerInputHandle ConvertToLayerInputHandle (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data, const armnn::PermutationVector &dimensionMappings, const LayerInputHandle *inputHandle)
 
bool ConvertPaddings (const Operation &operation, const Model &model, ConversionData &data, unsigned int rank, armnn::PadDescriptor &padDescriptor)
 
bool ConvertPooling2d (const Operation &operation, const char *operationName, armnn::PoolingAlgorithm poolType, const Model &model, ConversionData &data)
 
bool ConvertReduce (const Operation &operation, const Model &model, ConversionData &data, armnn::ReduceOperation reduceOperation)
 
bool ConvertToActivation (const Operation &operation, const char *operationName, const armnn::ActivationDescriptor &activationDesc, const Model &model, ConversionData &data)
 
DequantizeResult DequantizeIfRequired (size_t operand_index, const Operation &operation, const Model &model, const ConversionData &data)
 
ConstTensorPin DequantizeAndMakeConstTensorPin (const Operation &operation, const Model &model, const ConversionData &data, size_t operandIndex, bool optional)
 
bool GetInputPaddingScheme (const Operation &operation, uint32_t inputIndex, PaddingScheme &outPaddingScheme, const Model &model, const ConversionData &data)
 
const void * GetOperandValueReadOnlyAddress (const Operand &operand, const Model &model, const ConversionData &data, bool optional)
 
bool GetTensorInt32Values (const Operand &operand, std::vector< int32_t > &outValues, const Model &model, const ConversionData &data)
 
armnn::DataLayout OptionalDataLayout (const Operation &operation, uint32_t inputIndex, const Model &model, ConversionData &data)
 
armnn::IConnectableLayerProcessActivation (const armnn::TensorInfo &tensorInfo, ActivationFn activation, armnn::IConnectableLayer *prevLayer, ConversionData &data)
 
bool SetupAndTrackLayerOutputSlot (const Operation &operation, uint32_t operationOutputIndex, armnn::IConnectableLayer &layer, uint32_t layerOutputIndex, const Model &model, ConversionData &data, const armnn::TensorInfo *overrideOutputInfo, const std::function< void(const armnn::TensorInfo &, bool &)> &validateFunc, const ActivationFn &activationFunction, bool inferOutputShapes)
 
bool IsConnectedToDequantize (armnn::IOutputSlot *ioutputSlot)