From fb2280381e7a98ad698ea0c1b2cd635a48ad4acc Mon Sep 17 00:00:00 2001 From: Sheri Zhang Date: Tue, 2 Nov 2021 10:45:07 +0000 Subject: Add graph level convolution fusion with post operator Resolves: COMPMID-4701 Signed-off-by: Sheri Zhang Change-Id: I8a0d3c2ed4bf84489d94b8ae6641d6041aadaee5 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6557 Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir Reviewed-by: SiCong Li Comments-Addressed: Arm Jenkins --- arm_compute/graph/backends/FunctionHelpers.h | 94 +++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) (limited to 'arm_compute/graph/backends/FunctionHelpers.h') diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h index 9830290d0f..6bec66a6ff 100644 --- a/arm_compute/graph/backends/FunctionHelpers.h +++ b/arm_compute/graph/backends/FunctionHelpers.h @@ -450,7 +450,7 @@ std::unique_ptr create_concatenate_layer(ConcatenateLaye /** Create a backend convolution layer function * * @tparam ConvolutionLayerFunctions Backend convolution functions - * @tparam TargetInfo Target-specific information + * @tparam TargetInfo Target-specific information * * @param[in] node Node to create the backend function for * @param[in] ctx Graph context @@ -538,6 +538,98 @@ std::unique_ptr create_convolution_layer(ConvolutionLayerNode &node, return std::move(func); } +/** Create a backend convolution layer function with post opreator + * + * @tparam ConvolutionLayerFunctions Backend convolution functions + * @tparam TargetInfo Target-specific information + * + * @param[in] node Node to create the backend function for + * @param[in] ctx Graph context + * + * @return Backend convolution layer function + */ +template +std::unique_ptr create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx) +{ + validate_node(node, 4 /* expected inputs */, 1 /* expected outputs */); + + // Extract IO and info + typename TargetInfo::TensorType *input = get_backing_tensor(node.input(0)); + typename TargetInfo::TensorType *weights = get_backing_tensor(node.input(1)); + typename TargetInfo::TensorType *biases = get_backing_tensor(node.input(2)); + typename TargetInfo::TensorType *output = get_backing_tensor(node.output(0)); + + const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type()); + + if(is_quantized) + { + biases->info()->set_data_type(DataType::S32); + } + + const PadStrideInfo conv_info = node.convolution_info(); + const unsigned int num_groups = node.num_groups(); + const ActivationLayerInfo fused_act = node.fused_activation(); + + experimental::PostOpList post_ops; + + auto &post_op_info_list = node.post_op_info_list(); + for(const auto &post_op_info : post_op_info_list) + { + switch(post_op_info->type()) + { + case PostOpType::Activation: + { + const auto act_info = utils::cast::polymorphic_downcast(post_op_info.get()); + post_ops.template push_back_op>(act_info->_act); + break; + } + case PostOpType::Eltwise_Add: + { + typename TargetInfo::TensorType *add_input = get_backing_tensor(node.input(3)); + const auto eltwise_info = utils::cast::polymorphic_downcast(post_op_info.get()); + post_ops.template push_back_op>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy); + break; + } + default: + { + ARM_COMPUTE_ERROR("Unsupported PostOpType"); + } + } + } + + // Create and configure function (we assume that functions have been validated before creation) + std::shared_ptr mm = get_memory_manager(ctx, TargetInfo::TargetType); + std::unique_ptr func; + std::string func_name; + + std::tie(func, func_name) = create_named_memory_managed_function( + std::string("GEMMConvolutionLayer"), mm, + input, weights, biases, output, conv_info, + WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops); + + // Log info + std::ostringstream qss; + if(is_quantized) + { + qss << " Input QuantInfo: " << input->info()->quantization_info() + << " Weights QuantInfo: " << weights->info()->quantization_info() + << " Output QuantInfo: " << output->info()->quantization_info(); + } + ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " + << node.name() + << " Type: " << func_name + << " Target: " << TargetInfo::TargetType + << " Data Type: " << input->info()->data_type() + << " Groups: " << num_groups + << " Input shape: " << input->info()->tensor_shape() + << " Weights shape: " << weights->info()->tensor_shape() + << " Output shape: " << output->info()->tensor_shape() + << qss.str() + << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "") + << std::endl); + return std::move(func); +} + /** Create a backend deconvolution layer function * * @tparam DeconvolutionLayerFunction Backend deconvolution function -- cgit v1.2.1