diff options
Diffstat (limited to 'src/gpu/cl')
3 files changed, 300 insertions, 54 deletions
diff --git a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp index 472cfb9df0..6c8e4abde7 100644 --- a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp +++ b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.cpp @@ -21,13 +21,18 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) +#ifndef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION +#error "This experimental feature must be enabled with -DENABLE_EXPERIMENTAL_DYNAMIC_FUSION" +#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */ #include "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h" + #include "arm_compute/core/CL/ICLTensor.h" #include "src/core/CL/CLUtils.h" +#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h" #include "src/gpu/cl/ClKernelLibrary.h" +#include "support/Cast.h" namespace arm_compute { namespace experimental @@ -57,81 +62,88 @@ void ClCompositeKernel::configure(const ClCompileContext &compile_ctx, const ClK _arguments = cl_code.arguments; } -inline void ClCompositeKernel::add_tensor_argument(unsigned int &idx, const ClKernelArgRuntimeDescriptor &arg, ICLTensor *tensor, const Window &arg_slice) +inline void ClCompositeKernel::add_tensor_argument(unsigned int &idx, const ClKernelArgDescriptor &arg, const ICLTensor *tensor, const Window &arg_slice, std::vector<cl::Image2D> &cl_images) { switch(arg.tensor_arg_type) { - case TensorArgType::Scalar: + case ClKernelTensorArgType::Scalar: { ARM_COMPUTE_ERROR("Unsupported yet"); break; } - case TensorArgType::Vector: + + case ClKernelTensorArgType::Vector: { add_1D_tensor_argument(idx, tensor, arg_slice); break; } - case TensorArgType::Image: + case ClKernelTensorArgType::Image: { add_2D_tensor_argument(idx, tensor, arg_slice); break; } - case TensorArgType::Image_Reinterpret_As_3D: + case ClKernelTensorArgType::Image_Reinterpret_As_3D: { add_2D_tensor_argument(idx, tensor, arg_slice); const unsigned int total_cross_plane_pad = tensor->info()->padding().top + tensor->info()->padding().bottom; _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(total_cross_plane_pad)); break; } - case TensorArgType::Image_Export_To_ClImage2D: + case ClKernelTensorArgType::Image_Export_To_ClImage2D: { const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) * tensor->info()->dimension(2) * tensor->info()->dimension(3)); const size_t image_row_pitch = tensor->info()->strides_in_bytes()[1]; cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d, tensor->info()->data_type(), image_row_pitch); + cl_images.push_back(tensor_image2d); _kernel.setArg(idx++, tensor_image2d); break; } - case TensorArgType::Image_3D: + + case ClKernelTensorArgType::Image_3D: { add_2D_tensor_argument(idx, tensor, arg_slice); _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(tensor->info()->strides_in_bytes()[2])); break; } - case TensorArgType::Image_3D_Export_To_ClImage2D: + case ClKernelTensorArgType::Image_3D_Export_To_ClImage2D: { const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) * tensor->info()->dimension(2) * tensor->info()->dimension(3)); const size_t image_row_pitch = tensor->info()->strides_in_bytes()[1]; cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d, tensor->info()->data_type(), image_row_pitch); + cl_images.push_back(tensor_image2d); _kernel.setArg(idx++, tensor_image2d); _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(tensor->info()->strides_in_bytes()[2])); break; } - case TensorArgType::Tensor_3D: + + case ClKernelTensorArgType::Tensor_3D: { add_3D_tensor_argument(idx, tensor, arg_slice); break; } - case TensorArgType::Tensor_4D: + + case ClKernelTensorArgType::Tensor_4D: { add_4D_tensor_argument(idx, tensor, arg_slice); break; } - case TensorArgType::Tensor_4D_t_Buffer: + case ClKernelTensorArgType::Tensor_4D_t_Buffer: { add_4d_tensor_nhwc_argument(idx, tensor); break; } - case TensorArgType::Tensor_4D_t_Image: + case ClKernelTensorArgType::Tensor_4D_t_Image: { const size_t image_w = tensor->info()->dimension(0) / 4; const size_t image_h = tensor->info()->tensor_shape().total_size_upper(1); const size_t image_stride_y = tensor->info()->strides_in_bytes()[1]; - cl::Image2D tensor_cl_image = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), - TensorShape(image_w, image_h), tensor->info()->data_type(), image_stride_y); + cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), + TensorShape(image_w, image_h), tensor->info()->data_type(), image_stride_y); + cl_images.push_back(tensor_image2d); - _kernel.setArg(idx++, tensor_cl_image); + _kernel.setArg(idx++, tensor_image2d); add_4d_tensor_nhwc_argument(idx, tensor); break; } @@ -142,7 +154,7 @@ inline void ClCompositeKernel::add_tensor_argument(unsigned int &idx, const ClKe } } -void ClCompositeKernel::run_composite_op(TensorBinding &tensors, const Window &window, cl::CommandQueue &queue, const ClExecutionDescriptor &exec_desc) +void ClCompositeKernel::run_composite_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue, const ClExecutionDescriptor &exec_desc) { ARM_COMPUTE_UNUSED(exec_desc); ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); @@ -160,17 +172,21 @@ void ClCompositeKernel::run_composite_op(TensorBinding &tensors, const Window &w { // Set kernel arguments Window arg_slice = slice; - for(auto arg : _arguments) + // CLImages created from tensor arguments. Need to be retained until enqueue + std::vector<cl::Image2D> cl_images; + for(auto id_arg : _arguments) { - auto tensor = tensors._binding.at(arg.arg_id); + const auto arg = id_arg.second; + auto tensor = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(arg.arg_id)); ARM_COMPUTE_ERROR_ON_NULLPTR(tensor); + ARM_COMPUTE_ERROR_ON_NULLPTR(tensor->info()); if(!arg.slide_along_dimz) { // The stride_z for matrix must be zero if we do not slice ARM_COMPUTE_ERROR_ON(tensor->info()->strides_in_bytes()[3] != 0); arg_slice = slice_fixed_z; } - add_tensor_argument(idx, arg, tensor, arg_slice); + add_tensor_argument(idx, arg, tensor, arg_slice, cl_images); } // Dispatch kernel @@ -180,12 +196,6 @@ void ClCompositeKernel::run_composite_op(TensorBinding &tensors, const Window &w while(!exec_desc.skip_sliding_window && window.slide_window_slice_3D(slice)); } -Status bind_arguments(ITensorPack &, const ClKernelCode &, const TensorBinding &) -{ - return Status{}; -} } // namespace dynamic_fusion } // namespace experimental -} // namespace arm_compute - -#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file +} // namespace arm_compute
\ No newline at end of file diff --git a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h index 19efb505eb..bf70d6a226 100644 --- a/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h +++ b/src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h @@ -21,13 +21,14 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ -#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION) +#ifndef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION +#error "This experimental feature must be enabled with -DENABLE_EXPERIMENTAL_DYNAMIC_FUSION" +#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */ #ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H #define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H -#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h" - +#include "arm_compute/core/experimental/ClWorkload.h" #include "src/gpu/cl/ClCompileContext.h" #include "src/gpu/cl/IClKernel.h" @@ -37,47 +38,40 @@ namespace experimental { namespace dynamic_fusion { -struct TensorBinding -{ - TensorBinding(const std::map<ArgumentID, ICLTensor *> binding) - : _binding{ binding } - { - } - bool empty() const - { - return _binding.empty(); - } - std::map<ArgumentID, ICLTensor *> _binding; -}; -class ClCompositeKernel : public opencl::IClKernel +struct ClExecutionDescriptor; +struct ClKernelCode; + +class ClCompositeKernel final : public opencl::IClKernel { public: void configure(const opencl::ClCompileContext &, const ClKernelCode &); /** Run the composite kernel + * @note The slots / keys in ITensorPack are the argument Ids of the tensors in blueprint * - * @param tensors TensorBinding object containing run-time tensors information + * @param tensors ITensorPack object containing run-time tensor memories * @param window Execution window * @param queue OpenCL Command queue * @param exec_desc Descriptor containing execution information */ - virtual void run_composite_op(TensorBinding &tensors, const Window &window, cl::CommandQueue &queue, const ClExecutionDescriptor &exec_desc) override; + virtual void run_composite_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue, const ClExecutionDescriptor &exec_desc) override; private: - inline void add_tensor_argument(unsigned int &idx, const ClKernelArgRuntimeDescriptor &arg, ICLTensor *tensor, const Window &arg_slice); + /** Set a kernel tensor argument + * + * @param[in,out] idx Index at which to start adding the tensor's arguments. Will be incremented by the number of kernel arguments set. + * @param[in] arg Kernel argument descriptor accompanying @p tensor + * @param[in] tensor Tensor to set as an argument of the object's kernel. + * @param[in] arg_slice Window the kernel will be run on. + * @param[out] cl_images Extra cl images created from the tensor (will need to be retained until the kernel is enqueued) + */ + inline void add_tensor_argument(unsigned int &idx, const ClKernelArgDescriptor &arg, const ICLTensor *tensor, const Window &arg_slice, std::vector<cl::Image2D> &cl_images); private: ClKernelArgList _arguments{}; /** All kernel arguments required by runtime */ }; -/** Argument Binding. - * Tensor Arguments to ICLKernel run_op method need to be passed via an ITensorPack. So the bind_arguments is essentially a converter from TensorBinding to ITensorPack - */ -Status bind_arguments(ITensorPack &tensor_pack, const ClKernelCode &, const TensorBinding &); - } // namespace dynamic_fusion } // namespace experimental } // namespace arm_compute -#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H - -#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file +#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_CLCOMPOSITEKERNEL_H
\ No newline at end of file diff --git a/src/gpu/cl/operators/experimental/dynamic_fusion/ClCompositeOperator.cpp b/src/gpu/cl/operators/experimental/dynamic_fusion/ClCompositeOperator.cpp new file mode 100644 index 0000000000..984de74249 --- /dev/null +++ b/src/gpu/cl/operators/experimental/dynamic_fusion/ClCompositeOperator.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2022 Arm Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION +#error "This experimental feature must be enabled with -DENABLE_EXPERIMENTAL_DYNAMIC_FUSION" +#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */ +#include "arm_compute/runtime/experimental/ClCompositeOperator.h" + +#include "arm_compute/core/experimental/ClWorkload.h" +#include "arm_compute/core/experimental/Types.h" +#include "src/gpu/cl/kernels/experimental/dynamic_fusion/ClCompositeKernel.h" +#include "support/Cast.h" + +namespace arm_compute +{ +namespace experimental +{ +namespace dynamic_fusion +{ +namespace +{ +Status add_tensor_to_tensor_pack(int wk_tensor_id, ICLTensor *tensor, const ClWorkload &workload, TensorPackMap &prepare_pack_map, TensorPackMap &run_pack_map) +{ + if(tensor == nullptr) + { + return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Trying to add a nullptr into the tensor packs"); + } + const auto bp_tensor_id = workload.tensors.at(wk_tensor_id).kernel_arg.arg_id; // blueprint tensor id + std::vector<ClWorkload::UnitWorkId> uwk_ids{}; + const auto src_uwk_ids = workload.graph.src_ops_from_tensor(wk_tensor_id); + const auto dst_uwk_ids = workload.graph.dst_ops_from_tensor(wk_tensor_id); + uwk_ids.insert(uwk_ids.end(), src_uwk_ids.begin(), src_uwk_ids.end()); + uwk_ids.insert(uwk_ids.end(), dst_uwk_ids.begin(), dst_uwk_ids.end()); + + for(auto uwk_id : uwk_ids) + { + TensorPackMap *pack_map = nullptr; + const auto uwk_stage = workload.unit_workloads.at(uwk_id).stage.stage; + switch(uwk_stage) + { + case UnitWorkloadStage::Stage::Run: + pack_map = &run_pack_map; + break; + case UnitWorkloadStage::Stage::Prepare: + pack_map = &prepare_pack_map; + break; + default: + return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported workload stage"); + } + + ITensorPack *tensor_pack = pack_map->find_tensor_pack(uwk_id); + if(tensor_pack == nullptr) + { + pack_map->add_tensor_pack(uwk_id, ITensorPack{ { bp_tensor_id, tensor } }); + } + else + { + tensor_pack->add_tensor(bp_tensor_id, tensor); + } + } + return Status{}; +} + +} // namespace + +ITensorPack *TensorPackMap::find_tensor_pack(UnitWorkload::Id uwk_id) +{ + auto tensor_pack = _tensor_packs.find(uwk_id); + if(tensor_pack != _tensor_packs.end()) + { + return &(tensor_pack->second); + } + return nullptr; +} + +ITensorPack &TensorPackMap::get_tensor_pack(UnitWorkload::Id uwk_id) +{ + return _tensor_packs.at(uwk_id); +} + +void TensorPackMap::add_tensor_pack(UnitWorkload::Id uwk_id, const ITensorPack &tensor_pack) +{ + _tensor_packs[uwk_id] = tensor_pack; +} + +Status bind_tensors(ClAuxTensorData &aux_tensor_data, TensorPackMap &prepare_pack_map, TensorPackMap &run_pack_map, const ClWorkload &workload, const OpTensorBinding &op_tensors) +{ + for(auto tensor : workload.tensors) + { + const auto wk_tensor_id = tensor.first; // workload tensor id + ICLTensor *tensor_object = nullptr; + if(tensor.second.memory_type == MemoryType::Core) + { + const auto op_tensor_id = workload.op_tensor_id_lut.at(wk_tensor_id); + auto op_tensor_find = op_tensors.find(op_tensor_id); + if(op_tensor_find == op_tensors.end()) + { + return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Cannot find binding for some operator tensor"); + } + tensor_object = utils::cast::polymorphic_downcast<ICLTensor *>(op_tensor_find->second); + } + else if(tensor.second.memory_type == MemoryType::Auxiliary) + { + // Create aux tensor CLTensor object + const TensorInfo tensor_info = *tensor.second.info; + const auto memory_info = tensor.second.memory_info; + tensor_object = aux_tensor_data.add_aux_tensor(wk_tensor_id, tensor_info, memory_info); + } + else + { + return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Unsupported tensor memory type"); + } + + const auto st = add_tensor_to_tensor_pack(wk_tensor_id, tensor_object, workload, prepare_pack_map, run_pack_map); + ARM_COMPUTE_RETURN_ON_ERROR(st); + } + return Status{}; +} + +CLTensor *ClAuxTensorData::add_aux_tensor(int tensor_id, const ITensorInfo &tensor_info, const AuxMemoryInfo &memory_info) +{ + auto find_tensor_pair = _owned_tensors.find(tensor_id); + if(find_tensor_pair == _owned_tensors.end()) + { + return find_tensor_pair->second.get(); + } + else + { + auto tensor = std::make_unique<CLTensor>(); + auto inserted_pair = _owned_tensors.emplace(tensor_id, std::move(tensor)).first; + auto new_tensor = inserted_pair->second.get(); + _tensors.emplace_back(new_tensor, tensor_info, memory_info); + return new_tensor; + } +} + +std::vector<ClAuxTensorData::DataView> &ClAuxTensorData::get_tensors() +{ + return _tensors; +} +struct ClCompositeOperator::Implementation +{ + std::map<UnitWorkload::Id, std::unique_ptr<ClCompositeKernel>> _kernels{}; + std::map<UnitWorkload::Id, std::unique_ptr<ClCompositeKernel>> _kernels_prep{}; + ClWorkload _workload{}; + bool _is_prepared{ false }; +}; + +ClCompositeOperator::ClCompositeOperator() + : _impl{ std::make_unique<Implementation>() } +{ +} + +ClCompositeOperator::~ClCompositeOperator() = default; + +void ClCompositeOperator::configure(const CLCompileContext &ctx, const ClWorkload &workload) +{ + ARM_COMPUTE_ERROR_THROW_ON(ClCompositeOperator::validate(workload)); + _impl->_workload = workload; + + // Traverse workloads in topological order + const auto sorted = workload.graph.topological_sort().second; + for(const auto &node : sorted) + { + auto work = workload.unit_workloads.at(node.op); + auto stage = work.stage.stage; + auto k = std::make_unique<ClCompositeKernel>(); + k->configure(ctx, work.code); + + switch(stage) + { + case UnitWorkloadStage::Stage::Run: + _impl->_kernels.emplace(work.id, std::move(k)); + break; + case UnitWorkloadStage::Stage::Prepare: + _impl->_kernels_prep.emplace(work.id, std::move(k)); + break; + default: + ARM_COMPUTE_ERROR("Invalid stage"); + } + break; + } +} + +Status ClCompositeOperator::validate(const ClWorkload &workload) +{ + return workload.status; +} + +void ClCompositeOperator::prepare(TensorPackMap &tensor_pack_map) +{ + if(!_impl->_is_prepared) + { + for(auto &id_kernel_pair : _impl->_kernels_prep) + { + const bool flush_queue = false; + const auto uwk_id = id_kernel_pair.first; + auto kernel = id_kernel_pair.second.get(); + CLScheduler::get().enqueue_op(*kernel, tensor_pack_map.get_tensor_pack(uwk_id), ClExecutionDescriptor{}, flush_queue); + } + + _impl->_is_prepared = true; + } +} + +void ClCompositeOperator::run(TensorPackMap &tensor_pack_map) +{ + ARM_COMPUTE_ERROR_ON_MSG(!_impl->_is_prepared, "Operator is not prepared"); + + for(auto &id_kernel_pair : _impl->_kernels) + { + // Flush the command queue on the last kernel + const bool flush_queue = false; + const auto uwk_id = id_kernel_pair.first; + auto kernel = id_kernel_pair.second.get(); + CLScheduler::get().enqueue_op(*kernel, tensor_pack_map.get_tensor_pack(uwk_id), ClExecutionDescriptor{}, flush_queue); + } +} + +} // namespace dynamic_fusion +} // namespace experimental +} // namespace arm_compute
\ No newline at end of file |