aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorViet-Hoa Do <viet-hoa.do@arm.com>2023-05-17 15:17:48 +0100
committerViet-Hoa Do <viet-hoa.do@arm.com>2023-06-12 14:41:18 +0000
commit3fcf3dcf7b6ffc613468ccaca580bde495677440 (patch)
tree567c85806318a868b0029f969cac823dd48fe44a /arm_compute
parent48cfd5f7895f13167e4e9cd974dbc1e983e04ed7 (diff)
downloadComputeLibrary-3fcf3dcf7b6ffc613468ccaca580bde495677440.tar.gz
Add multi-sketch support for dynamic fusion
* Tensors are owned by workload context instead of workload sketch so that they can be used by multiple sketches. * Add an integration test for multi-sketch case. Resolves: COMPMID-6148 Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com> Change-Id: I37d0de5ac103fb2a85020aa1c26e49eb304f47b7 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9706 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: SiCong Li <sicong.li@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h47
-rw-r--r--arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h25
2 files changed, 39 insertions, 33 deletions
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h
index 1ee3c7e3ec..0b60899734 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,6 +25,7 @@
#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_GPUWORKLOADCONTEXT
#include "arm_compute/core/GPUTarget.h"
+#include "arm_compute/core/TensorInfo.h"
#include <memory>
@@ -56,16 +57,20 @@ enum class GpuLanguage
class GpuWorkloadContext
{
public:
+ class Impl;
+
/** Constructor */
GpuWorkloadContext(CLCompileContext *cl_compile_context);
- /** Allow instances of this class to be copy constructed */
- GpuWorkloadContext(const GpuWorkloadContext &config) = default;
- /** Allow instances of this class to be copied */
- GpuWorkloadContext &operator=(const GpuWorkloadContext &config) = default;
+ /** Destructor */
+ ~GpuWorkloadContext();
+ /** Prohibit instances of this class to be copy constructed */
+ GpuWorkloadContext(const GpuWorkloadContext &config) = delete;
+ /** Prohibit instances of this class to be copied */
+ GpuWorkloadContext &operator=(const GpuWorkloadContext &config) = delete;
/** Allow instances of this class to be move constructed */
- GpuWorkloadContext(GpuWorkloadContext &&config) = default;
+ GpuWorkloadContext(GpuWorkloadContext &&config);
/** Allow instances of this class to be moved */
- GpuWorkloadContext &operator=(GpuWorkloadContext &&config) = default;
+ GpuWorkloadContext &operator=(GpuWorkloadContext &&config);
/** Get @ref GpuLanguage of the context */
GpuLanguage gpu_language() const;
/** Get @ref GpuTarget of the context */
@@ -75,9 +80,33 @@ public:
*/
const CLCompileContext *cl_compile_context() const;
+ /** Create a @ref TensorInfo associated with the workload context.
+ *
+ * @return TensorInfo Newly created tensor info
+ */
+ template <typename... TArgs>
+ TensorInfo create_tensor_info(TArgs &&... args)
+ {
+ auto tensor_info = TensorInfo(std::forward<TArgs>(args)...);
+ register_user_tensor(tensor_info);
+ return tensor_info;
+ }
+
+ /** Get the internal implementation */
+ Impl &implementation();
+
+ /** Get the internal implementation */
+ const Impl &implementation() const;
+
private:
- GpuLanguage _gpu_language{ GpuLanguage::Unknown };
- CLCompileContext *_cl_compile_ctx{ nullptr };
+ /** Set a new ID to the tensor info and register its memory descriptor to the context.
+ *
+ * @param[in,out] tensor_info @ref ITensorInfo to be registered.
+ */
+ void register_user_tensor(ITensorInfo &tensor_info);
+
+ /** Internal implementation */
+ std::unique_ptr<Impl> _impl;
};
} // namespace dynamic_fusion
diff --git a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
index 155df293bf..75c2b1f528 100644
--- a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
+++ b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,7 +24,6 @@
#ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_GPUWORKLOADSKETCH
#define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_GPUWORKLOADSKETCH
-#include "arm_compute/core/TensorInfo.h"
#include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h"
#include <memory>
@@ -62,30 +61,8 @@ public:
const Implementation &implementation() const;
/** Get the gpu workload context of this sketch */
const GpuWorkloadContext *gpu_context() const;
- /** Create a @ref TensorInfo associated with the workload sketch.
- *
- * @return TensorInfo Newly created tensor info
- */
- template <typename... Args>
- TensorInfo create_tensor_info(Args &&... args)
- {
- auto tensor_info = TensorInfo(std::forward<Args>(args)...);
- register_new_tensor(tensor_info);
- return tensor_info;
- }
- /** Create a default @ref TensorInfo associated with the workload sketch
- * It is usually used by user input or output tensors
- *
- * @return TensorInfo Newly created tensor info
- */
- TensorInfo create_tensor_info();
private:
- /** Register a new tensor by setting a new id to it and register its memory descriptor in the sketch
- *
- * @param[in,out] tensor_info @ref ITensorInfo that will be registered
- */
- void register_new_tensor(ITensorInfo &tensor_info);
std::unique_ptr<Implementation> _impl; /**< Internal opaque implementation*/
};