From 07810fc2fcdd34db74222d90cc73ef12a88e7b78 Mon Sep 17 00:00:00 2001 From: Mike Kelly Date: Thu, 12 Nov 2020 10:58:48 +0000 Subject: IVGCVSW-5328-5329 Fuse Activation * Added Fused Activation Optimization to both CL and Neon backends. * Added Fused Activation support to all the CL and Neon workloads that support it. * Changed ProfilingTest network to be a Convolution layer followed by an Abs layer rather than an Activation layer. * Added IBackendInternal::OptimizeSubgraphView function that can accept a ModelOptions. * Network will now call OptimizeSubgraphView passing in the ModelOptions. Signed-off-by: Keith Davis Signed-off-by: Mike Kelly Signed-off-by: Teresa Charlin Change-Id: Ib536ac3cbafc7d9b35c139ad9a65b7735262cd9d --- src/backends/neon/workloads/NeonConvolution2dWorkload.cpp | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'src/backends/neon/workloads/NeonConvolution2dWorkload.cpp') diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp index af6f1aee78..fd8be17dfd 100644 --- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp +++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp @@ -6,6 +6,7 @@ #include "NeonConvolution2dWorkload.hpp" #include +#include #include #include #include @@ -25,7 +26,8 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input, const Convolution2dDescriptor& descriptor, const TensorInfo& weights, const Optional& biases, - bool isFastMathEnabled) + bool isFastMathEnabled, + const ActivationDescriptor* activationDescriptor) { const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout); const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout); @@ -47,6 +49,9 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input, arm_compute::PadStrideInfo layerInfo = BuildArmComputePadStrideInfo(descriptor); + const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo( + activationDescriptor); + return arm_compute::NEConvolutionLayer::validate(&aclInputInfo, &aclWeightsInfo, optionalAclBiasesInfo, @@ -54,7 +59,7 @@ arm_compute::Status NeonConvolution2dWorkloadValidate(const TensorInfo& input, layerInfo, arm_compute::WeightsInfo(), aclDilationInfo, - arm_compute::ActivationLayerInfo(), + activationInfo, isFastMathEnabled); } @@ -92,6 +97,8 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY); + const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor); + auto convolutionLayer = std::make_unique(memoryManager); convolutionLayer->configure(&input, m_KernelTensor.get(), @@ -100,7 +107,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( padStrideInfo, arm_compute::WeightsInfo(), aclDilationInfo, - arm_compute::ActivationLayerInfo(), + activationInfo, isFastMathEnabled); m_ConvolutionMethod = @@ -110,7 +117,7 @@ NeonConvolution2dWorkload::NeonConvolution2dWorkload( padStrideInfo, arm_compute::WeightsInfo(), aclDilationInfo, - arm_compute::ActivationLayerInfo(), + activationInfo, isFastMathEnabled); m_ConvolutionLayer.reset(convolutionLayer.release()); -- cgit v1.2.1