From 3fcf3dcf7b6ffc613468ccaca580bde495677440 Mon Sep 17 00:00:00 2001 From: Viet-Hoa Do Date: Wed, 17 May 2023 15:17:48 +0100 Subject: Add multi-sketch support for dynamic fusion * Tensors are owned by workload context instead of workload sketch so that they can be used by multiple sketches. * Add an integration test for multi-sketch case. Resolves: COMPMID-6148 Signed-off-by: Viet-Hoa Do Change-Id: I37d0de5ac103fb2a85020aa1c26e49eb304f47b7 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9706 Comments-Addressed: Arm Jenkins Reviewed-by: SiCong Li Tested-by: Arm Jenkins Benchmark: Arm Jenkins --- .../dynamic_fusion/sketch/gpu/GpuWorkloadContext.h | 47 +++++++++++++++++----- .../dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h | 25 +----------- 2 files changed, 39 insertions(+), 33 deletions(-) (limited to 'arm_compute') diff --git a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h index 1ee3c7e3ec..0b60899734 100644 --- a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h +++ b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,6 +25,7 @@ #define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_GPUWORKLOADCONTEXT #include "arm_compute/core/GPUTarget.h" +#include "arm_compute/core/TensorInfo.h" #include @@ -56,16 +57,20 @@ enum class GpuLanguage class GpuWorkloadContext { public: + class Impl; + /** Constructor */ GpuWorkloadContext(CLCompileContext *cl_compile_context); - /** Allow instances of this class to be copy constructed */ - GpuWorkloadContext(const GpuWorkloadContext &config) = default; - /** Allow instances of this class to be copied */ - GpuWorkloadContext &operator=(const GpuWorkloadContext &config) = default; + /** Destructor */ + ~GpuWorkloadContext(); + /** Prohibit instances of this class to be copy constructed */ + GpuWorkloadContext(const GpuWorkloadContext &config) = delete; + /** Prohibit instances of this class to be copied */ + GpuWorkloadContext &operator=(const GpuWorkloadContext &config) = delete; /** Allow instances of this class to be move constructed */ - GpuWorkloadContext(GpuWorkloadContext &&config) = default; + GpuWorkloadContext(GpuWorkloadContext &&config); /** Allow instances of this class to be moved */ - GpuWorkloadContext &operator=(GpuWorkloadContext &&config) = default; + GpuWorkloadContext &operator=(GpuWorkloadContext &&config); /** Get @ref GpuLanguage of the context */ GpuLanguage gpu_language() const; /** Get @ref GpuTarget of the context */ @@ -75,9 +80,33 @@ public: */ const CLCompileContext *cl_compile_context() const; + /** Create a @ref TensorInfo associated with the workload context. + * + * @return TensorInfo Newly created tensor info + */ + template + TensorInfo create_tensor_info(TArgs &&... args) + { + auto tensor_info = TensorInfo(std::forward(args)...); + register_user_tensor(tensor_info); + return tensor_info; + } + + /** Get the internal implementation */ + Impl &implementation(); + + /** Get the internal implementation */ + const Impl &implementation() const; + private: - GpuLanguage _gpu_language{ GpuLanguage::Unknown }; - CLCompileContext *_cl_compile_ctx{ nullptr }; + /** Set a new ID to the tensor info and register its memory descriptor to the context. + * + * @param[in,out] tensor_info @ref ITensorInfo to be registered. + */ + void register_user_tensor(ITensorInfo &tensor_info); + + /** Internal implementation */ + std::unique_ptr _impl; }; } // namespace dynamic_fusion diff --git a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h index 155df293bf..75c2b1f528 100644 --- a/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h +++ b/arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022 Arm Limited. + * Copyright (c) 2022-2023 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,7 +24,6 @@ #ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_GPUWORKLOADSKETCH #define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_GPUWORKLOADSKETCH -#include "arm_compute/core/TensorInfo.h" #include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadContext.h" #include @@ -62,30 +61,8 @@ public: const Implementation &implementation() const; /** Get the gpu workload context of this sketch */ const GpuWorkloadContext *gpu_context() const; - /** Create a @ref TensorInfo associated with the workload sketch. - * - * @return TensorInfo Newly created tensor info - */ - template - TensorInfo create_tensor_info(Args &&... args) - { - auto tensor_info = TensorInfo(std::forward(args)...); - register_new_tensor(tensor_info); - return tensor_info; - } - /** Create a default @ref TensorInfo associated with the workload sketch - * It is usually used by user input or output tensors - * - * @return TensorInfo Newly created tensor info - */ - TensorInfo create_tensor_info(); private: - /** Register a new tensor by setting a new id to it and register its memory descriptor in the sketch - * - * @param[in,out] tensor_info @ref ITensorInfo that will be registered - */ - void register_new_tensor(ITensorInfo &tensor_info); std::unique_ptr _impl; /**< Internal opaque implementation*/ }; -- cgit v1.2.1