aboutsummaryrefslogtreecommitdiff
path: root/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-01-19 15:56:00 +0000
committerGunes Bayir <gunes.bayir@arm.com>2023-01-24 09:40:01 +0000
commitcc2877368d5e15d9ea89d31c84ec651fc0fffd13 (patch)
treec57a3a406125b3a31e2d4aff6126ce99f4ade395 /arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h
parenta6a153817302793732e28b07c3b4046df3f91a60 (diff)
downloadComputeLibrary-cc2877368d5e15d9ea89d31c84ec651fc0fffd13.tar.gz
Change dynamic fusion API to return destination tensor info
The new dynamic fusion API is introduced in the following patch: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8906 For each operator (except Conv2D, which is migrated in the above patch), we - remove destination tensor from is_supported, validate and create calls - make create_op return ITensorInfo* to the intermediate destination object Affected operators: - DepthwiseConv2D - Cast - Elementwise Ops - Clamp - Reshape - Resize Resolves: COMPMID-5777 Change-Id: Ib60ec8a5f081752808455d7a7d790f2ed0627059 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8991 Reviewed-by: Ramy Elgammal <ramy.elgammal@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Dynamic-Fusion: Ramy Elgammal <ramy.elgammal@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h')
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h24
1 files changed, 13 insertions, 11 deletions
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h
index a2ee7f7dc1..6ac5d4e500 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h
@@ -60,30 +60,32 @@ public:
* @param[in,out] sketch Workload sketch into which the operator will be fused
* @param[in] lhs Left hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
* @param[in] rhs Right hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
- * @param[out] dst Destination tensor info. Data types supported: U8/S16/S32/F16/F32. If an uninitialized ITensorInfo is passed in, it will be auto-initialized
+ *
+ * @return Pointer for the destination tensor info
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *lhs,
- ITensorInfo *rhs,
- ITensorInfo *dst);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *lhs,
+ ITensorInfo *rhs);
/** Check if the operator configuration is supported, irrespective of fusion
*
* @param[in] context Workload context within which the operator is running
* @param[in] lhs Left hand side tensor info.
* @param[in] rhs Right hand side tensor info.
- * @param[in] dst Destination tensor info. If an uninitialized ITensorInfo is passed in, it will be auto-initialized
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *lhs,
- const ITensorInfo *rhs,
- const ITensorInfo *dst);
+ const ITensorInfo *rhs);
/** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
- * Similar to @ref GpuAdd::create_op()
+ *
+ * Parameters are similar to @ref GpuAdd::create_op()
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *rhs,
- const ITensorInfo *lhs,
- const ITensorInfo *dst);
+ const ITensorInfo *lhs);
};
} // namespace dynamic_fusion
} // namespace experimental