From 31df05a1870662a7288fbaeb6fbc7fc458bb5a73 Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Wed, 9 Nov 2022 15:57:48 +0000 Subject: Remove dynamic fusion prototype with tests and examples Public headers of the new experimental dynamic fusion can be found in arm_compute/dynamic_fusion/ New examples on how to use the interface can be found in tests/validation/dynamic_fusion/gpu/Integration.cpp Resolves COMPMID-5683 Change-Id: I7ccb902a227fb487562df15fc3c30118d1d95bbd Signed-off-by: SiCong Li Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8671 Reviewed-by: Jakub Sujak Reviewed-by: Gunes Bayir Comments-Addressed: Arm Jenkins Benchmark: Arm Jenkins Tested-by: Arm Jenkins --- .../dynamic_fusion/ClCompositeKernel.cpp | 200 ----------------- .../dynamic_fusion/ClCompositeKernel.h | 76 ------- .../dynamic_fusion/ClCompositeOperator.cpp | 241 --------------------- 3 files changed, 517 deletions(-) delete mode 100644 src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp delete mode 100644 src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h delete mode 100644 src/gpu/cl/operators/experimental/dynamic_fusion/ClCompositeOperator.cpp (limited to 'src/gpu/cl') diff --git a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp deleted file mode 100644 index 30e19d5907..0000000000 --- a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright (c) 2022 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION - -#include "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h" - -#include "arm_compute/core/CL/ICLTensor.h" -#include "src/core/CL/CLUtils.h" -#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h" -#include "src/gpu/cl/ClKernelLibrary.h" - -#include "support/Cast.h" -namespace arm_compute -{ -namespace experimental -{ -namespace dynamic_fusion -{ -using namespace arm_compute::opencl; - -void ClCompositeKernel::configure(const ClCompileContext &compile_ctx, const ClKernelCode &cl_code) -{ - // Create kernel from kernel source string - opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get(); - _kernel = static_cast(compile_ctx.create_kernel(cl_code.name, - "" /* Program name: Used to as part of a unique string for built kernel cache. Not needed */, - cl_code.code, - klib.kernel_path() /* Kernel path: Used in cases of embedded kernels */, - cl_code.build_options.options(), - false /* Is source binary */)); - - // Configure execution window - IClKernel::configure_internal(cl_code.window); - - // Set config id for lws tuning - _config_id = cl_code.config_id; - - // Set kernel arguments - _arguments = cl_code.arguments; -} - -inline void ClCompositeKernel::add_tensor_argument(unsigned int &idx, const ClKernelArgDescriptor &arg, const ICLTensor *tensor, const Window &arg_slice, std::vector &cl_images) -{ - switch(arg.tensor_arg_type) - { - case ClKernelTensorArgType::Scalar: - { - ARM_COMPUTE_ERROR("Unsupported yet"); - break; - } - - case ClKernelTensorArgType::Vector: - { - add_1D_tensor_argument(idx, tensor, arg_slice); - break; - } - - case ClKernelTensorArgType::Image: - { - add_2D_tensor_argument(idx, tensor, arg_slice); - break; - } - case ClKernelTensorArgType::Image_Reinterpret_As_3D: - { - add_2D_tensor_argument(idx, tensor, arg_slice); - const unsigned int total_cross_plane_pad = tensor->info()->padding().top + tensor->info()->padding().bottom; - _kernel.setArg(idx++, static_cast(total_cross_plane_pad)); - break; - } - case ClKernelTensorArgType::Image_Export_To_ClImage2D: - { - const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) * tensor->info()->dimension(2) * tensor->info()->dimension(3)); - const size_t image_row_pitch = tensor->info()->strides_in_bytes()[1]; - cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d, tensor->info()->data_type(), image_row_pitch); - cl_images.push_back(tensor_image2d); - _kernel.setArg(idx++, tensor_image2d); - break; - } - - case ClKernelTensorArgType::Image_3D: - { - add_2D_tensor_argument(idx, tensor, arg_slice); - _kernel.setArg(idx++, static_cast(tensor->info()->strides_in_bytes()[2])); - break; - } - case ClKernelTensorArgType::Image_3D_Export_To_ClImage2D: - { - const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) * tensor->info()->dimension(2) * tensor->info()->dimension(3)); - const size_t image_row_pitch = tensor->info()->strides_in_bytes()[1]; - cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d, tensor->info()->data_type(), image_row_pitch); - cl_images.push_back(tensor_image2d); - _kernel.setArg(idx++, tensor_image2d); - _kernel.setArg(idx++, static_cast(tensor->info()->strides_in_bytes()[2])); - break; - } - - case ClKernelTensorArgType::Tensor_3D: - { - add_3D_tensor_argument(idx, tensor, arg_slice); - break; - } - - case ClKernelTensorArgType::Tensor_4D: - { - add_4D_tensor_argument(idx, tensor, arg_slice); - break; - } - case ClKernelTensorArgType::Tensor_4D_t_Buffer: - { - add_4d_tensor_nhwc_argument(idx, tensor); - break; - } - case ClKernelTensorArgType::Tensor_4D_t_Image: - { - const size_t image_w = tensor->info()->dimension(0) / 4; - const size_t image_h = tensor->info()->tensor_shape().total_size_upper(1); - const size_t image_stride_y = tensor->info()->strides_in_bytes()[1]; - - cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), - TensorShape(image_w, image_h), tensor->info()->data_type(), image_stride_y); - cl_images.push_back(tensor_image2d); - - _kernel.setArg(idx++, tensor_image2d); - add_4d_tensor_nhwc_argument(idx, tensor); - break; - } - default: - { - ARM_COMPUTE_ERROR("Unsupported"); - } - } -} - -void ClCompositeKernel::run_composite_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue, const ClExecutionDescriptor &exec_desc) -{ - ARM_COMPUTE_UNUSED(exec_desc); - ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); - ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window); - - Window slice = window.first_slice_window_3D(); - // Don't slice matrix along the z dimension if matrix has just 2 dimensions and matrix A more than 2 - // This scenario can happen when the matrix multiplication is used to perform a convolution operation - Window slice_fixed_z = slice; - slice_fixed_z.set(Window::DimX, Window::Dimension(0, 1, 1)); - slice_fixed_z.set(Window::DimY, Window::Dimension(0, 1, 1)); - - unsigned int idx = 0; - do - { - // Set kernel arguments - Window arg_slice = slice; - // CLImages created from tensor arguments. Need to be retained until enqueue - std::vector cl_images; - for(auto id_arg : _arguments) - { - const auto arg = id_arg.second; - auto tensor = utils::cast::polymorphic_downcast(tensors.get_tensor(arg.arg_id)); - ARM_COMPUTE_ERROR_ON_NULLPTR(tensor); - ARM_COMPUTE_ERROR_ON_NULLPTR(tensor->info()); - if(!arg.slide_along_dimz) - { - // The stride_z for matrix must be zero if we do not slice - ARM_COMPUTE_ERROR_ON(tensor->info()->strides_in_bytes()[3] != 0); - arg_slice = slice_fixed_z; - } - add_tensor_argument(idx, arg, tensor, arg_slice, cl_images); - } - - // Dispatch kernel - bool use_dummy_work_items = false; - enqueue(queue, *this, slice, lws_hint(), use_dummy_work_items); - } - while(!exec_desc.skip_sliding_window && window.slide_window_slice_3D(slice)); -} - -} // namespace dynamic_fusion -} // namespace experimental -} // namespace arm_compute -#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */ \ No newline at end of file diff --git a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h deleted file mode 100644 index 52b92be568..0000000000 --- a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2022 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION - -#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H -#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H - -#include "arm_compute/core/experimental/ClWorkload.h" -#include "src/gpu/cl/ClCompileContext.h" -#include "src/gpu/cl/IClKernel.h" - -namespace arm_compute -{ -namespace experimental -{ -namespace dynamic_fusion -{ -struct ClExecutionDescriptor; -struct ClKernelCode; - -class ClCompositeKernel final : public opencl::IClKernel -{ -public: - void configure(const opencl::ClCompileContext &, const ClKernelCode &); - - /** Run the composite kernel - * @note The slots / keys in ITensorPack are the argument Ids of the tensors in blueprint - * - * @param tensors ITensorPack object containing run-time tensor memories - * @param window Execution window - * @param queue OpenCL Command queue - * @param exec_desc Descriptor containing execution information - */ - virtual void run_composite_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue, const ClExecutionDescriptor &exec_desc) override; - -private: - /** Set a kernel tensor argument - * - * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set. - * @param[in] arg Kernel argument descriptor accompanying @p tensor - * @param[in] tensor Tensor to set as an argument of the object's kernel. - * @param[in] arg_slice Window the kernel will be run on. - * @param[out] cl_images Extra cl images created from the tensor (will need to be retained until the kernel is enqueued) - */ - inline void add_tensor_argument(unsigned int &idx, const ClKernelArgDescriptor &arg, const ICLTensor *tensor, const Window &arg_slice, std::vector &cl_images); - -private: - ClKernelArgList _arguments{}; /** All kernel arguments required by runtime */ -}; - -} // namespace dynamic_fusion -} // namespace experimental -} // namespace arm_compute -#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H -#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */ \ No newline at end of file diff --git a/src/gpu/cl/operators/experimental/dynamic_fusion/ClCompositeOperator.cpp b/src/gpu/cl/operators/experimental/dynamic_fusion/ClCompositeOperator.cpp deleted file mode 100644 index a53a73e4ec..0000000000 --- a/src/gpu/cl/operators/experimental/dynamic_fusion/ClCompositeOperator.cpp +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Copyright (c) 2022 Arm Limited. - * - * SPDX-License-Identifier: MIT - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION -#include "arm_compute/runtime/experimental/ClCompositeOperator.h" - -#include "arm_compute/core/experimental/ClWorkload.h" -#include "arm_compute/core/experimental/Types.h" -#include "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h" -#include "support/Cast.h" - -namespace arm_compute -{ -namespace experimental -{ -namespace dynamic_fusion -{ -namespace -{ -Status add_tensor_to_tensor_pack(int wk_tensor_id, ICLTensor *tensor, const ClWorkload &workload, TensorPackMap &prepare_pack_map, TensorPackMap &run_pack_map) -{ - if(tensor == nullptr) - { - return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Trying to add a nullptr into the tensor packs"); - } - const auto bp_tensor_id = workload.tensors.at(wk_tensor_id).kernel_arg.arg_id; // blueprint tensor id - std::vector uwk_ids{}; - const auto src_uwk_ids = workload.graph.src_ops_from_tensor(wk_tensor_id); - const auto dst_uwk_ids = workload.graph.dst_ops_from_tensor(wk_tensor_id); - uwk_ids.insert(uwk_ids.end(), src_uwk_ids.begin(), src_uwk_ids.end()); - uwk_ids.insert(uwk_ids.end(), dst_uwk_ids.begin(), dst_uwk_ids.end()); - - for(auto uwk_id : uwk_ids) - { - TensorPackMap *pack_map = nullptr; - const auto uwk_stage = workload.unit_workloads.at(uwk_id).stage.stage; - switch(uwk_stage) - { - case UnitWorkloadStage::Stage::Run: - pack_map = &run_pack_map; - break; - case UnitWorkloadStage::Stage::Prepare: - pack_map = &prepare_pack_map; - break; - default: - return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported workload stage"); - } - - ITensorPack *tensor_pack = pack_map->find_tensor_pack(uwk_id); - if(tensor_pack == nullptr) - { - pack_map->add_tensor_pack(uwk_id, ITensorPack{ { bp_tensor_id, tensor } }); - } - else - { - tensor_pack->add_tensor(bp_tensor_id, tensor); - } - } - return Status{}; -} - -} // namespace - -ITensorPack *TensorPackMap::find_tensor_pack(UnitWorkload::Id uwk_id) -{ - auto tensor_pack = _tensor_packs.find(uwk_id); - if(tensor_pack != _tensor_packs.end()) - { - return &(tensor_pack->second); - } - return nullptr; -} - -ITensorPack &TensorPackMap::get_tensor_pack(UnitWorkload::Id uwk_id) -{ - return _tensor_packs.at(uwk_id); -} - -void TensorPackMap::add_tensor_pack(UnitWorkload::Id uwk_id, const ITensorPack &tensor_pack) -{ - _tensor_packs[uwk_id] = tensor_pack; -} - -Status bind_tensors(ClAuxTensorData &aux_tensor_data, TensorPackMap &prepare_pack_map, TensorPackMap &run_pack_map, const ClWorkload &workload, const OpTensorBinding &op_tensors) -{ - for(auto tensor : workload.tensors) - { - const auto wk_tensor_id = tensor.first; // workload tensor id - ICLTensor *tensor_object = nullptr; - if(tensor.second.memory_type == MemoryType::Core) - { - const auto op_tensor_id = workload.op_tensor_id_lut.at(wk_tensor_id); - auto op_tensor_find = op_tensors.find(op_tensor_id); - if(op_tensor_find == op_tensors.end()) - { - return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Cannot find binding for some operator tensor"); - } - tensor_object = utils::cast::polymorphic_downcast(op_tensor_find->second); - } - else if(tensor.second.memory_type == MemoryType::Auxiliary) - { - // Create aux tensor CLTensor object - const TensorInfo tensor_info = *tensor.second.info; - const auto memory_info = tensor.second.memory_info; - tensor_object = aux_tensor_data.add_aux_tensor(wk_tensor_id, tensor_info, memory_info); - } - else - { - return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported tensor memory type"); - } - - const auto st = add_tensor_to_tensor_pack(wk_tensor_id, tensor_object, workload, prepare_pack_map, run_pack_map); - ARM_COMPUTE_RETURN_ON_ERROR(st); - } - return Status{}; -} - -CLTensor *ClAuxTensorData::add_aux_tensor(int tensor_id, const ITensorInfo &tensor_info, const AuxMemoryInfo &memory_info) -{ - auto find_tensor_pair = _owned_tensors.find(tensor_id); - if(find_tensor_pair == _owned_tensors.end()) - { - return find_tensor_pair->second.get(); - } - else - { - auto tensor = std::make_unique(); - auto inserted_pair = _owned_tensors.emplace(tensor_id, std::move(tensor)).first; - auto new_tensor = inserted_pair->second.get(); - _tensors.emplace_back(new_tensor, tensor_info, memory_info); - return new_tensor; - } -} - -std::vector &ClAuxTensorData::get_tensors() -{ - return _tensors; -} -struct ClCompositeOperator::Implementation -{ - std::map> _kernels{}; - std::map> _kernels_prep{}; - ClWorkload _workload{}; - bool _is_prepared{ false }; -}; - -ClCompositeOperator::ClCompositeOperator() - : _impl{ std::make_unique() } -{ -} - -ClCompositeOperator::~ClCompositeOperator() = default; - -void ClCompositeOperator::configure(const CLCompileContext &ctx, const ClWorkload &workload) -{ - ARM_COMPUTE_ERROR_THROW_ON(ClCompositeOperator::validate(workload)); - _impl->_workload = workload; - - // Traverse workloads in topological order - const auto sorted = workload.graph.topological_sort().second; - for(const auto &node : sorted) - { - auto work = workload.unit_workloads.at(node.op); - auto stage = work.stage.stage; - auto k = std::make_unique(); - k->configure(ctx, work.code); - - switch(stage) - { - case UnitWorkloadStage::Stage::Run: - _impl->_kernels.emplace(work.id, std::move(k)); - break; - case UnitWorkloadStage::Stage::Prepare: - _impl->_kernels_prep.emplace(work.id, std::move(k)); - break; - default: - ARM_COMPUTE_ERROR("Invalid stage"); - } - break; - } -} - -Status ClCompositeOperator::validate(const ClWorkload &workload) -{ - return workload.status; -} - -void ClCompositeOperator::prepare(TensorPackMap &tensor_pack_map) -{ - if(!_impl->_is_prepared) - { - for(auto &id_kernel_pair : _impl->_kernels_prep) - { - const bool flush_queue = false; - const auto uwk_id = id_kernel_pair.first; - auto kernel = id_kernel_pair.second.get(); - CLScheduler::get().enqueue_op(*kernel, tensor_pack_map.get_tensor_pack(uwk_id), ClExecutionDescriptor{}, flush_queue); - } - - _impl->_is_prepared = true; - } -} - -void ClCompositeOperator::run(TensorPackMap &tensor_pack_map) -{ - ARM_COMPUTE_ERROR_ON_MSG(!_impl->_is_prepared, "Operator is not prepared"); - - for(auto &id_kernel_pair : _impl->_kernels) - { - // Flush the command queue on the last kernel - const bool flush_queue = false; - const auto uwk_id = id_kernel_pair.first; - auto kernel = id_kernel_pair.second.get(); - CLScheduler::get().enqueue_op(*kernel, tensor_pack_map.get_tensor_pack(uwk_id), ClExecutionDescriptor{}, flush_queue); - } -} - -} // namespace dynamic_fusion -} // namespace experimental -} // namespace arm_compute -#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */ \ No newline at end of file -- cgit v1.2.1