aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/graph/backends/FunctionHelpers.h
diff options
context:
space:
mode:
authorSheri Zhang <sheri.zhang@arm.com>2021-11-02 10:45:07 +0000
committerSheri Zhang <sheri.zhang@arm.com>2021-11-03 17:08:05 +0000
commitfb2280381e7a98ad698ea0c1b2cd635a48ad4acc (patch)
treee3fab3cff60b806e725ba9c771617e41c654604e /arm_compute/graph/backends/FunctionHelpers.h
parentbc788389dcc7bd682f53a85803f6a202d42ac828 (diff)
downloadComputeLibrary-fb2280381e7a98ad698ea0c1b2cd635a48ad4acc.tar.gz
Add graph level convolution fusion with post operator
Resolves: COMPMID-4701 Signed-off-by: Sheri Zhang <sheri.zhang@arm.com> Change-Id: I8a0d3c2ed4bf84489d94b8ae6641d6041aadaee5 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/6557 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/graph/backends/FunctionHelpers.h')
-rw-r--r--arm_compute/graph/backends/FunctionHelpers.h94
1 files changed, 93 insertions, 1 deletions
diff --git a/arm_compute/graph/backends/FunctionHelpers.h b/arm_compute/graph/backends/FunctionHelpers.h
index 9830290d0f..6bec66a6ff 100644
--- a/arm_compute/graph/backends/FunctionHelpers.h
+++ b/arm_compute/graph/backends/FunctionHelpers.h
@@ -450,7 +450,7 @@ std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLaye
/** Create a backend convolution layer function
*
* @tparam ConvolutionLayerFunctions Backend convolution functions
- * @tparam TargetInfo Target-specific information
+ * @tparam TargetInfo Target-specific information
*
* @param[in] node Node to create the backend function for
* @param[in] ctx Graph context
@@ -538,6 +538,98 @@ std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node,
return std::move(func);
}
+/** Create a backend convolution layer function with post opreator
+ *
+ * @tparam ConvolutionLayerFunctions Backend convolution functions
+ * @tparam TargetInfo Target-specific information
+ *
+ * @param[in] node Node to create the backend function for
+ * @param[in] ctx Graph context
+ *
+ * @return Backend convolution layer function
+ */
+template <typename ConvolutionLayerFunctions, typename TargetInfo>
+std::unique_ptr<IFunction> create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
+{
+ validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
+
+ // Extract IO and info
+ typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
+ typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
+ typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
+ typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
+
+ const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
+
+ if(is_quantized)
+ {
+ biases->info()->set_data_type(DataType::S32);
+ }
+
+ const PadStrideInfo conv_info = node.convolution_info();
+ const unsigned int num_groups = node.num_groups();
+ const ActivationLayerInfo fused_act = node.fused_activation();
+
+ experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
+
+ auto &post_op_info_list = node.post_op_info_list();
+ for(const auto &post_op_info : post_op_info_list)
+ {
+ switch(post_op_info->type())
+ {
+ case PostOpType::Activation:
+ {
+ const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
+ post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
+ break;
+ }
+ case PostOpType::Eltwise_Add:
+ {
+ typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
+ const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
+ post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Unsupported PostOpType");
+ }
+ }
+ }
+
+ // Create and configure function (we assume that functions have been validated before creation)
+ std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
+ std::unique_ptr<IFunction> func;
+ std::string func_name;
+
+ std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
+ std::string("GEMMConvolutionLayer"), mm,
+ input, weights, biases, output, conv_info,
+ WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
+
+ // Log info
+ std::ostringstream qss;
+ if(is_quantized)
+ {
+ qss << " Input QuantInfo: " << input->info()->quantization_info()
+ << " Weights QuantInfo: " << weights->info()->quantization_info()
+ << " Output QuantInfo: " << output->info()->quantization_info();
+ }
+ ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
+ << node.name()
+ << " Type: " << func_name
+ << " Target: " << TargetInfo::TargetType
+ << " Data Type: " << input->info()->data_type()
+ << " Groups: " << num_groups
+ << " Input shape: " << input->info()->tensor_shape()
+ << " Weights shape: " << weights->info()->tensor_shape()
+ << " Output shape: " << output->info()->tensor_shape()
+ << qss.str()
+ << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
+ << std::endl);
+ return std::move(func);
+}
+
/** Create a backend deconvolution layer function
*
* @tparam DeconvolutionLayerFunction Backend deconvolution function