diff options
author | Georgios Pinitas <georgios.pinitas@arm.com> | 2018-10-16 19:10:46 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:55:45 +0000 |
commit | 08346e9b9a7dadd2f0765aea64e656902d843e8a (patch) | |
tree | 6caaebd05a3b78a4a1e8bfb077aa02d75bd4cca0 /arm_compute/graph/backends/FunctionHelpers.h | |
parent | 8aaf93e8c12ce93d3d0082d4f4b70376f15536da (diff) | |
download | ComputeLibrary-08346e9b9a7dadd2f0765aea64e656902d843e8a.tar.gz |
COMPMID-1451:Fuse RELU,LU_BOUNDED_RELU with requantization in NEGEMMConvolutionLayer.
Change-Id: Iea5f2c5bcac8051c4c7655a6eabb2c43772eb31f
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/154104
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'arm_compute/graph/backends/FunctionHelpers.h')
-rw-r--r-- | arm_compute/graph/backends/FunctionHelpers.h | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h index 56f4173fa0..a1cadcbf4c 100644 --- a/arm_compute/graph/backends/FunctionHelpers.h +++ b/arm_compute/graph/backends/FunctionHelpers.h @@ -266,10 +266,11 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, biases->info()->set_data_type(DataType::S32); } - const PadStrideInfo conv_info = node.convolution_info(); - const unsigned int num_groups = node.num_groups(); - const ConvolutionMethod conv_algorithm = node.convolution_method(); - const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled; + const PadStrideInfo conv_info = node.convolution_info(); + const unsigned int num_groups = node.num_groups(); + const ConvolutionMethod conv_algorithm = node.convolution_method(); + const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled; + const ActivationLayerInfo fused_act = node.fused_activation(); // Create and configure function (we assume that functions have been validated before creation) std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType); @@ -281,28 +282,28 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!"); std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>( std::string("WinogradConvolutionLayer"), mm, - input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math); + input, weights, biases, output, conv_info, fused_act, fast_math); } else if(conv_algorithm == ConvolutionMethod::Direct) { ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!"); std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>( std::string("DirectConvolutionLayer"), - input, weights, biases, output, conv_info); + input, weights, biases, output, conv_info, fused_act); } else if(conv_algorithm == ConvolutionMethod::GEMM) { std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>( std::string("GEMMConvolutionLayer"), mm, input, weights, biases, output, conv_info, - WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), num_groups); + WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups); } else { std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>( std::string("GenericConvolutionLayer"), mm, input, weights, biases, output, conv_info, - WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math, num_groups); + WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups); } // Log info @@ -321,6 +322,7 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, << " Input shape: " << input->info()->tensor_shape() << " Weights shape: " << weights->info()->tensor_shape() << " Output shape: " << output->info()->tensor_shape() + << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") << std::endl); return func; } |