aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorGunes Bayir <gunes.bayir@arm.com>2023-01-19 15:56:00 +0000
committerGunes Bayir <gunes.bayir@arm.com>2023-01-24 09:40:01 +0000
commitcc2877368d5e15d9ea89d31c84ec651fc0fffd13 (patch)
treec57a3a406125b3a31e2d4aff6126ce99f4ade395 /arm_compute
parenta6a153817302793732e28b07c3b4046df3f91a60 (diff)
downloadComputeLibrary-cc2877368d5e15d9ea89d31c84ec651fc0fffd13.tar.gz
Change dynamic fusion API to return destination tensor info
The new dynamic fusion API is introduced in the following patch: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8906 For each operator (except Conv2D, which is migrated in the above patch), we - remove destination tensor from is_supported, validate and create calls - make create_op return ITensorInfo* to the intermediate destination object Affected operators: - DepthwiseConv2D - Cast - Elementwise Ops - Clamp - Reshape - Resize Resolves: COMPMID-5777 Change-Id: Ib60ec8a5f081752808455d7a7d790f2ed0627059 Signed-off-by: Gunes Bayir <gunes.bayir@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8991 Reviewed-by: Ramy Elgammal <ramy.elgammal@arm.com> Reviewed-by: Jakub Sujak <jakub.sujak@arm.com> Dynamic-Fusion: Ramy Elgammal <ramy.elgammal@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h24
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h30
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuClamp.h24
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h9
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h32
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h9
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h20
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h30
8 files changed, 99 insertions, 79 deletions
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h
index a2ee7f7dc1..6ac5d4e500 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h
@@ -60,30 +60,32 @@ public:
* @param[in,out] sketch Workload sketch into which the operator will be fused
* @param[in] lhs Left hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
* @param[in] rhs Right hand side tensor info. Data types supported: U8/S16/S32/F16/F32.
- * @param[out] dst Destination tensor info. Data types supported: U8/S16/S32/F16/F32. If an uninitialized ITensorInfo is passed in, it will be auto-initialized
+ *
+ * @return Pointer for the destination tensor info
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *lhs,
- ITensorInfo *rhs,
- ITensorInfo *dst);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *lhs,
+ ITensorInfo *rhs);
/** Check if the operator configuration is supported, irrespective of fusion
*
* @param[in] context Workload context within which the operator is running
* @param[in] lhs Left hand side tensor info.
* @param[in] rhs Right hand side tensor info.
- * @param[in] dst Destination tensor info. If an uninitialized ITensorInfo is passed in, it will be auto-initialized
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *lhs,
- const ITensorInfo *rhs,
- const ITensorInfo *dst);
+ const ITensorInfo *rhs);
/** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
- * Similar to @ref GpuAdd::create_op()
+ *
+ * Parameters are similar to @ref GpuAdd::create_op()
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *rhs,
- const ITensorInfo *lhs,
- const ITensorInfo *dst);
+ const ITensorInfo *lhs);
};
} // namespace dynamic_fusion
} // namespace experimental
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h
index 4b427be06a..1ba05ae5b8 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuCast.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -65,32 +65,32 @@ public:
*
* @param[in,out] sketch Workload sketch into which the operator will be fused
* @param[in] src Left hand side tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
- * @param[out] dst Destination tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
- * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
* @param[in] attributes Operator attributes
+ *
+ * @return Pointer for the destination tensor info
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *dst,
- const Attributes &attributes);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ const Attributes &attributes);
/** Check if the operator configuration is supported, irrespective of fusion
*
- * @param[in] context Workload context within which the operator is running
- * @param[in] src Left hand side tensor info. Data types supported: All.
- * @param[out] dst Destination tensor info. Data types supported: All.
- * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
- * @param[in] attributes Operator attributes
+ * @param[in] context Workload context within which the operator is running
+ * @param[in] src Left hand side tensor info. Data types supported: All.
+ * @param[in] attributes Operator attributes
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes);
/** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
- * Similar to @ref GpuCast::create_op()
+ *
+ * Parameters are similar to @ref GpuCast::create_op()
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes);
};
} // namespace dynamic_fusion
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuClamp.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuClamp.h
index 66d6c5f300..e96251196a 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuClamp.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuClamp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,34 +58,34 @@ public:
*
* @param[in, out] sketch Workload sketch into which the operator will be fused
* @param[in] src Source tensor info. Data types supported: F16/F32.
- * @param[out] dst Destination tensor info. Data types supported: F16/F32.
- * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
* @param[in] attributes Operator attributes
+ *
+ * @return Pointer for the destination tensor info
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *dst,
- const Attributes &attributes);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ const Attributes &attributes);
/** Check if the operator configuration is supported, irrespective of fusion
*
* @param[in] context Workload context within which the operator is running
* @param[in] src Source tensor info. Data types supported: F16/F32.
- * @param[in] dst Destination tensor info. Data types supported: F16/F32.
- * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
* @param[in] attributes Operator attributes
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes);
/** Validate the operator and check if it can be fused into the workload sketch.
- * Similar to @ref GpuClamp::create_op()
+ *
+ * Parameters are similar to @ref GpuClamp::create_op()
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes);
};
} // namespace dynamic_fusion
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h
index 42c63df87f..612cc83a1f 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuConv2d.h
@@ -62,7 +62,7 @@ public:
* @param[in] bia (Optional) Bias tensor
* @param[in] attributes Operator attributes
*
- * @return pointer for the destination tensor
+ * @return Pointer for the destination tensor info
*/
static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
ITensorInfo *src,
@@ -76,6 +76,8 @@ public:
* @param[in] wei Weight tensor
* @param[in] bia (Optional) Bias tensor
* @param[in] attributes Operator attributes
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
@@ -83,7 +85,10 @@ public:
const ITensorInfo *bia,
const Attributes &attributes);
/** Check if the operator configuration is supported and if it can be fused into the workload sketch.
- * Similar to @ref GpuConv2d::create_op()
+ *
+ * Parameters are similar to @ref GpuConv2d::create_op()
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h
index a36ab62143..a0cb292730 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuDepthwiseConv2d.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -59,34 +59,42 @@ public:
* @param[in] src Source tensor
* @param[in] wei Weight tensor
* @param[in] bia (Optional) Bias tensor
- * @param[out] dst Destination tensor. If an uninitialized ITensorInfo is passed in, it will be auto-initialized
* @param[in] attributes Operator attributes
+ *
+ * @return Pointer for the destination tensor info
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *wei,
- ITensorInfo *bia,
- ITensorInfo *dst,
- const Attributes &attributes);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ ITensorInfo *wei,
+ ITensorInfo *bia,
+ const Attributes &attributes);
/** Check if the operator configuration is supported, irrespective of fusion
- * Similar to @ref GpuDepthwiseConv2d::create_op()
+ *
+ * @param[in] context Workload context within which the operator is running
+ * @param[in] src Source tensor
+ * @param[in] wei Weight tensor
+ * @param[in] bia (Optional) Bias tensor
+ * @param[in] attributes Operator attributes
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
const ITensorInfo *wei,
const ITensorInfo *bia,
- const ITensorInfo *dst,
const Attributes &attributes);
/** Check if the operator configuration is supported and if it can be fused into the workload sketch.
- * Similar to @ref GpuDepthwiseConv2d::create_op()
+ *
+ * Parameters are similar to @ref GpuDepthwiseConv2d::create_op()
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
const ITensorInfo *wei,
const ITensorInfo *bia,
- const ITensorInfo *dst,
const Attributes &attributes);
};
} // namespace dynamic_fusion
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h
index 2511b0efd5..06317511cd 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -33,7 +33,6 @@ namespace experimental
{
namespace dynamic_fusion
{
-
/** Forward declaration */
class GpuWorkloadContext;
class GpuWorkloadSketch;
@@ -66,6 +65,8 @@ public:
* @param[in] context Workload context within which the operator is running.
* @param[in] src Source tensor info.
* @param[in] dst Destination tensor info.
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
@@ -73,7 +74,9 @@ public:
/** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
*
- * Similar to @ref GpuOutput::create_op().
+ * Parameters are similar to @ref GpuOutput::create_op().
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h
index 2b49a31191..69c7a3a76a 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuReshape.h
@@ -55,30 +55,32 @@ public:
*
* @param[in,out] sketch Workload sketch into which the operator will be fused
* @param[in] src Input tensor info. Data type supported: All
- * @param[out] dst Output info. Data type supported: Same as @p src
* @param[in] attributes Operator attributes
+ *
+ * @return Pointer for the destination tensor info
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *dst,
- const Attributes &attributes);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ const Attributes &attributes);
/** Check if the operator configuration is supported, irrespective of fusion
*
* @param[in] context Workload context within which the operator is running
* @param[in] src Input tensor info.
- * @param[in] dst Output info.
* @param[in] attributes Operator attributes
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes);
/** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
- * Similar to @ref GpuReshape::create_op()
+ *
+ * Parameters are similar to @ref GpuReshape::create_op()
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes);
};
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h
index 1387bf1cf0..f9661c1c24 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/operators/GpuResize.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -64,32 +64,32 @@ public:
*
* @param[in,out] sketch Workload sketch into which the operator will be fused
* @param[in] src Left hand side tensor info.
- * @param[out] dst Destination tensor info.
- * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
* @param[in] attributes Operator attributes
+ *
+ * @return Pointer for the destination tensor info
*/
- static void create_op(GpuWorkloadSketch &sketch,
- ITensorInfo *src,
- ITensorInfo *dst,
- const Attributes &attributes);
+ static ITensorInfo *create_op(GpuWorkloadSketch &sketch,
+ ITensorInfo *src,
+ const Attributes &attributes);
/** Check if the operator configuration is supported, irrespective of fusion
*
- * @param[in] context Workload context within which the operator is running
- * @param[in] src Left hand side tensor info.
- * @param[out] dst Destination tensor info.
- * If an uninitialized ITensorInfo is passed in, it will be auto-initialized
- * @param[in] attributes Operator attributes
+ * @param[in] context Workload context within which the operator is running
+ * @param[in] src Left hand side tensor info.
+ * @param[in] attributes Operator attributes
+ *
+ * @return Status
*/
static Status is_supported_op(const GpuWorkloadContext &context,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes);
/** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch.
- * Similar to @ref GpuResize::create_op()
+ *
+ * Parameters are similar to @ref GpuResize::create_op()
+ *
+ * @return Status
*/
static Status validate_op(const GpuWorkloadSketch &sketch,
const ITensorInfo *src,
- const ITensorInfo *dst,
const Attributes &attributes);
};