From 541880fcf4572887e57658a508623fb5f95ac554 Mon Sep 17 00:00:00 2001 From: Cathal Corbett Date: Mon, 16 May 2022 15:20:56 +0100 Subject: IVGCVSW-6147 ConstTensorsAsInput: Optimizer - FusePermuteIntoConstLayer * No trailing permute layer after a constant layer * Unit test for optimization Signed-off-by: Cathal Corbett Change-Id: I0d098f5af41d2c55df7cef1ccfb848093320ddc1 --- src/armnn/Network.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'src/armnn/Network.cpp') diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 9da28ceeea..fecc766836 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -1158,6 +1158,7 @@ OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr, if(selectedBackend == armnn::Compute::GpuAcc || selectedBackend == armnn::Compute::CpuAcc) { Optimizer::Pass(optGraph, MakeOptimizations(optimizations::PermuteDepthwiseConv2dWeights())); + Optimizer::Pass(optGraph, MakeOptimizations(optimizations::FusePermuteIntoConstLayer())); } // Select sub-graphs based on backend @@ -1719,6 +1720,10 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph, optGraph.InferTensorInfos(); } + // Need to FusePermuteIntoConstantLayer before FoldPadIntoDepthwiseConvolution2d or + // FuseBatchNormIntoDepthwiseConvolution2D optimizations are called. + Optimizer::Pass(optGraph, MakeOptimizations(FusePermuteIntoConstLayer())); + // Perform optimisation passes Optimizer::Pass(optGraph, MakeOptimizations(SquashEqualPermuteSiblings(), SquashEqualTransposeSiblings(), @@ -1739,8 +1744,7 @@ IOptimizedNetworkPtr Optimize(const Graph& inGraph, FuseBatchNormIntoConvolution2DFloat16(), FuseBatchNormIntoDepthwiseConvolution2DFloat32(), FuseBatchNormIntoDepthwiseConvolution2DFloat16(), - ConvertConstDequantisationLayersToConstLayers(), - RedirectMembersToConstantInputs())); + ConvertConstDequantisationLayersToConstLayers())); // If Fp32 to Fp16 optimization is set convert Fp32 network to Fp16 if (options.m_ReduceFp32ToFp16) -- cgit v1.2.1