ArmNN
 24.02
armnn::optimizations Namespace Reference

Namespaces

 pad_fold
 

Classes

class  AddBroadcastReshapeLayerImpl
 
class  AddDebugImpl
 
class  AddDebugToFileImpl
 
class  ConvertConstants
 
class  ConvertConstDequantisationLayersToConstLayersImpl
 
class  ConvertConstPermuteLayersToConstLayers
 
class  ConvertFp32NetworkToFp16Impl
 
class  DeleteBroadcastToImpl
 
struct  Float16ToFloat32
 
struct  Float32ToFloat16
 
class  FuseBatchNorm
 
struct  IsFloat16Layer
 
struct  IsFloat32Layer
 
class  MovePermuteUpImpl
 
class  MoveTransposeUpImpl
 
class  OptimizeConsecutiveReshapesImpl
 
class  OptimizeInverseConversionsImpl
 
class  OptimizeInversePermutesImpl
 
class  PermuteAndBatchToSpaceAsDepthToSpaceImpl
 Replaces Permute leading into BatchToSpace with a DepthToSpace in the case where the Permute swaps the batch and channels dimensions such that the replacement is valid. More...
 
class  PermuteAsReshapeImpl
 
class  PermuteDepthwiseConv2dWeightsImpl
 
class  SquashEqualSiblingsImpl
 
class  TransposeAsReshapeImpl
 

Typedefs

using AddBroadcastReshapeLayer = OptimizeForType< Layer, AddBroadcastReshapeLayerImpl >
 
using InsertDebugLayer = OptimizeForType< Layer, AddDebugImpl >
 
using InsertDebugToFileLayer = OptimizeForType< Layer, AddDebugToFileImpl >
 
using ConvertConstantsHalfToFloat = ConvertConstants< Float16ToFloat32, IsFloat32Layer >
 
using ConvertConstantsFloatToHalf = ConvertConstants< Float32ToFloat16, IsFloat16Layer >
 
using ConvertConstDequantisationLayersToConstLayers = OptimizeForConnection< ConstantLayer, DequantizeLayer, ConvertConstDequantisationLayersToConstLayersImpl >
 
using FusePermuteIntoConstLayer = OptimizeForConnection< ConstantLayer, PermuteLayer, ConvertConstPermuteLayersToConstLayers >
 
using Fp32NetworkToFp16Converter = OptimizeForType< Layer, ConvertFp32NetworkToFp16Impl >
 
using BroadcastToOptimizationLayer = OptimizeForType< BroadcastToLayer, DeleteBroadcastToImpl >
 
using FoldPadIntoConvolution2d = OptimizeForExclusiveConnection< PadLayer, Convolution2dLayer, pad_fold::FoldPadIntoConvolution2dImpl >
 
using FoldPadIntoDepthwiseConvolution2d = OptimizeForExclusiveConnection< PadLayer, DepthwiseConvolution2dLayer, pad_fold::FoldPadIntoDepthwiseConvolution2dImpl >
 
using FoldPadIntoPooling2d = OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl >
 
using FuseBatchNormIntoConvolution2DFloat32 = OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float32 > >
 
using FuseBatchNormIntoConvolution2DFloat16 = OptimizeForExclusiveConnection< Convolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< Convolution2dLayer, armnn::DataType::Float16 > >
 
using FuseBatchNormIntoDepthwiseConvolution2DFloat32 = OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float32 > >
 
using FuseBatchNormIntoDepthwiseConvolution2DFloat16 = OptimizeForExclusiveConnection< DepthwiseConvolution2dLayer, BatchNormalizationLayer, FuseBatchNorm< DepthwiseConvolution2dLayer, armnn::DataType::Float16 > >
 
using MovePermuteUp = OptimizeForConnection< Layer, PermuteLayer, MovePermuteUpImpl >
 
using MoveTransposeUp = OptimizeForConnection< Layer, TransposeLayer, MoveTransposeUpImpl >
 
using OptimizeConsecutiveReshapes = OptimizeForConnection< ReshapeLayer, ReshapeLayer, OptimizeConsecutiveReshapesImpl >
 
using OptimizeInverseConversionsFp16 = OptimizeForConnection< ConvertFp16ToFp32Layer, ConvertFp32ToFp16Layer, OptimizeInverseConversionsImpl >
 
using OptimizeInverseConversionsFp32 = OptimizeForConnection< ConvertFp32ToFp16Layer, ConvertFp16ToFp32Layer, OptimizeInverseConversionsImpl >
 
using OptimizeInversePermutes = OptimizeForConnection< PermuteLayer, PermuteLayer, OptimizeInversePermutesImpl< PermuteLayer > >
 
using OptimizeInverseTransposes = OptimizeForConnection< TransposeLayer, TransposeLayer, OptimizeInversePermutesImpl< TransposeLayer > >
 
using PermuteAndBatchToSpaceAsDepthToSpace = OptimizeForConnection< PermuteLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< PermuteLayer > >
 
using TransposeAndBatchToSpaceAsDepthToSpace = OptimizeForConnection< TransposeLayer, BatchToSpaceNdLayer, PermuteAndBatchToSpaceAsDepthToSpaceImpl< TransposeLayer > >
 
using PermuteAsReshape = OptimizeForType< PermuteLayer, PermuteAsReshapeImpl >
 
using PermuteDepthwiseConv2dWeights = OptimizeForType< Layer, PermuteDepthwiseConv2dWeightsImpl >
 
using SquashEqualPermuteSiblings = OptimizeForConnection< Layer, PermuteLayer, SquashEqualSiblingsImpl< PermuteLayer > >
 
using SquashEqualTransposeSiblings = OptimizeForConnection< Layer, TransposeLayer, SquashEqualSiblingsImpl< TransposeLayer > >
 
using SquashEqualReshapeSiblings = OptimizeForConnection< Layer, ReshapeLayer, SquashEqualSiblingsImpl< ReshapeLayer > >
 
using TransposeAsReshape = OptimizeForType< TransposeLayer, TransposeAsReshapeImpl >
 

Typedef Documentation

◆ AddBroadcastReshapeLayer

◆ BroadcastToOptimizationLayer

◆ ConvertConstantsFloatToHalf

◆ ConvertConstantsHalfToFloat

◆ ConvertConstDequantisationLayersToConstLayers

◆ FoldPadIntoConvolution2d

◆ FoldPadIntoDepthwiseConvolution2d

◆ FoldPadIntoPooling2d

◆ Fp32NetworkToFp16Converter

◆ FuseBatchNormIntoConvolution2DFloat16

◆ FuseBatchNormIntoConvolution2DFloat32

◆ FuseBatchNormIntoDepthwiseConvolution2DFloat16

◆ FuseBatchNormIntoDepthwiseConvolution2DFloat32

◆ FusePermuteIntoConstLayer

◆ InsertDebugLayer

Definition at line 53 of file AddDebug.hpp.

◆ InsertDebugToFileLayer

◆ MovePermuteUp

◆ MoveTransposeUp

◆ OptimizeConsecutiveReshapes

◆ OptimizeInverseConversionsFp16

◆ OptimizeInverseConversionsFp32

◆ OptimizeInversePermutes

◆ OptimizeInverseTransposes

◆ PermuteAndBatchToSpaceAsDepthToSpace

◆ PermuteAsReshape

◆ PermuteDepthwiseConv2dWeights

◆ SquashEqualPermuteSiblings

◆ SquashEqualReshapeSiblings

◆ SquashEqualTransposeSiblings

◆ TransposeAndBatchToSpaceAsDepthToSpace

◆ TransposeAsReshape