aboutsummaryrefslogtreecommitdiff
path: root/src/dynamic_fusion/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/dynamic_fusion/runtime')
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp61
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.h11
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp81
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.cpp7
-rw-r--r--src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.h5
5 files changed, 100 insertions, 65 deletions
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
index 15a5632d0b..9ca20fa152 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.cpp
@@ -22,14 +22,15 @@
* SOFTWARE.
*/
#include "ClKernelRuntime.h"
+
#include "arm_compute/core/CL/ICLTensor.h"
+
#include "src/core/CL/CLUtils.h"
#ifdef ACL_INTERNAL_TEST_CKW_IN_DF
#include "src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.h"
#endif // ACL_INTERNAL_TEST_CKW_IN_DF
#include "src/dynamic_fusion/sketch/gpu/GpuKernelSourceCode.h"
#include "src/gpu/cl/ClKernelLibrary.h"
-
#include "support/Cast.h"
namespace arm_compute
{
@@ -43,13 +44,12 @@ void ClKernelRuntime::configure(const ClCompileContext &compile_ctx, const GpuKe
{
// Create kernel from kernel source string
opencl::ClKernelLibrary &klib = opencl::ClKernelLibrary::get();
- _kernel = static_cast<cl::Kernel>(compile_ctx.create_kernel(code.name(),
- code.name(), // program name has to be provided to differentiate between different unfusable components' kernels.
- // Each program contains exactly one kernel
- code.code(),
- klib.kernel_path() /* Kernel path: Used in cases of embedded kernels */,
- code.build_options().options(),
- false /* Is source binary */));
+ _kernel = static_cast<cl::Kernel>(compile_ctx.create_kernel(
+ code.name(),
+ code.name(), // program name has to be provided to differentiate between different unfusable components' kernels.
+ // Each program contains exactly one kernel
+ code.code(), klib.kernel_path() /* Kernel path: Used in cases of embedded kernels */,
+ code.build_options().options(), false /* Is source binary */));
// Configure execution window
IClKernel::configure_internal(code.window());
@@ -63,11 +63,15 @@ void ClKernelRuntime::configure(const ClCompileContext &compile_ctx, const GpuKe
#ifndef ACL_INTERNAL_TEST_CKW_IN_DF
-inline void ClKernelRuntime::add_tensor_argument(unsigned int &idx, const GpuKernelArgumentInfo &arg, const ICLTensor *tensor, const Window &arg_slice, std::vector<cl::Image2D> &cl_images)
+inline void ClKernelRuntime::add_tensor_argument(unsigned int &idx,
+ const GpuKernelArgumentInfo &arg,
+ const ICLTensor *tensor,
+ const Window &arg_slice,
+ std::vector<cl::Image2D> &cl_images)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
- switch(arg.type)
+ switch (arg.type)
{
case GpuKernelArgumentInfo::Type::Scalar:
{
@@ -95,9 +99,13 @@ inline void ClKernelRuntime::add_tensor_argument(unsigned int &idx, const GpuKer
}
case GpuKernelArgumentInfo::Type::Image_Export_To_ClImage2D:
{
- const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) * tensor->info()->dimension(2) * tensor->info()->dimension(3));
+ const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) *
+ tensor->info()->dimension(2) *
+ tensor->info()->dimension(3));
const size_t image_row_pitch = tensor->info()->strides_in_bytes()[1];
- cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d, tensor->info()->data_type(), image_row_pitch, CLImage2DType::ReadOnly);
+ cl::Image2D tensor_image2d =
+ create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d,
+ tensor->info()->data_type(), image_row_pitch, CLImage2DType::ReadOnly);
cl_images.push_back(tensor_image2d);
_kernel.setArg(idx++, tensor_image2d);
break;
@@ -111,9 +119,13 @@ inline void ClKernelRuntime::add_tensor_argument(unsigned int &idx, const GpuKer
}
case GpuKernelArgumentInfo::Type::Image_3D_Export_To_ClImage2D:
{
- const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) * tensor->info()->dimension(2) * tensor->info()->dimension(3));
+ const TensorShape shape2d(tensor->info()->dimension(0) / 4, tensor->info()->dimension(1) *
+ tensor->info()->dimension(2) *
+ tensor->info()->dimension(3));
const size_t image_row_pitch = tensor->info()->strides_in_bytes()[1];
- cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d, tensor->info()->data_type(), image_row_pitch, CLImage2DType::ReadOnly);
+ cl::Image2D tensor_image2d =
+ create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(), shape2d,
+ tensor->info()->data_type(), image_row_pitch, CLImage2DType::ReadOnly);
cl_images.push_back(tensor_image2d);
_kernel.setArg(idx++, tensor_image2d);
_kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(tensor->info()->strides_in_bytes()[2]));
@@ -142,8 +154,9 @@ inline void ClKernelRuntime::add_tensor_argument(unsigned int &idx, const GpuKer
const size_t image_h = tensor->info()->tensor_shape().total_size_upper(1);
const size_t image_stride_y = tensor->info()->strides_in_bytes()[1];
- cl::Image2D tensor_image2d = create_image2d_from_buffer(CLKernelLibrary::get().context(), tensor->cl_buffer(),
- TensorShape(image_w, image_h), tensor->info()->data_type(), image_stride_y, CLImage2DType::ReadOnly);
+ cl::Image2D tensor_image2d = create_image2d_from_buffer(
+ CLKernelLibrary::get().context(), tensor->cl_buffer(), TensorShape(image_w, image_h),
+ tensor->info()->data_type(), image_stride_y, CLImage2DType::ReadOnly);
cl_images.push_back(tensor_image2d);
_kernel.setArg(idx++, tensor_image2d);
@@ -170,13 +183,16 @@ inline void ClKernelRuntime::add_tensor_argument(unsigned int &idx, const GpuKer
}
#else // ACL_INTERNAL_TEST_CKW_IN_DF
-inline void ClKernelRuntime::add_kernel_argument(unsigned int &idx, const GpuKernelArgumentBinding &arg, const ICLTensor *tensor, std::vector<cl::Image2D> &cl_images)
+inline void ClKernelRuntime::add_kernel_argument(unsigned int &idx,
+ const GpuKernelArgumentBinding &arg,
+ const ICLTensor *tensor,
+ std::vector<cl::Image2D> &cl_images)
{
- switch(arg.type())
+ switch (arg.type())
{
case GpuKernelArgumentBinding::Type::TensorStorage:
{
- switch(arg.tensor_storage_type())
+ switch (arg.tensor_storage_type())
{
case TensorStorageType::ClBufferUint8Ptr:
{
@@ -238,7 +254,7 @@ void ClKernelRuntime::run_op(ITensorPack &tensors, const Window &window, cl::Com
// CLImages created from tensor arguments. Need to be retained until enqueue
std::vector<cl::Image2D> cl_images;
#ifndef ACL_INTERNAL_TEST_CKW_IN_DF
- for(auto id_arg : _arguments)
+ for (auto id_arg : _arguments)
{
const auto arg = id_arg.second;
auto tensor = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(id_arg.first));
@@ -248,7 +264,7 @@ void ClKernelRuntime::run_op(ITensorPack &tensors, const Window &window, cl::Com
}
#else // ACL_INTERNAL_TEST_CKW_IN_DF
- for(const auto &arg : _arguments)
+ for (const auto &arg : _arguments)
{
auto tensor = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(arg.id()));
ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
@@ -259,8 +275,7 @@ void ClKernelRuntime::run_op(ITensorPack &tensors, const Window &window, cl::Com
// Dispatch kernel
enqueue(queue, *this, slice, lws_hint(), use_dummy_work_items);
- }
- while(skip_sliding_window && window.slide_window_slice_3D(slice));
+ } while (skip_sliding_window && window.slide_window_slice_3D(slice));
}
} // namespace dynamic_fusion
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.h b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.h
index 92e73503ce..e78567eb9d 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.h
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.h
@@ -68,7 +68,11 @@ private:
* @param[in] arg_slice Window the kernel will be run on
* @param[out] cl_images Extra cl images created from the tensor (will need to be retained until the kernel is enqueued)
*/
- inline void add_tensor_argument(unsigned int &idx, const GpuKernelArgumentInfo &arg, const ICLTensor *tensor, const Window &arg_slice, std::vector<cl::Image2D> &cl_images);
+ inline void add_tensor_argument(unsigned int &idx,
+ const GpuKernelArgumentInfo &arg,
+ const ICLTensor *tensor,
+ const Window &arg_slice,
+ std::vector<cl::Image2D> &cl_images);
#else // ACL_INTERNAL_TEST_CKW_IN_DF
/** Set a kernel argument as part of a tensor
*
@@ -77,7 +81,10 @@ private:
* @param[in] tensor Tensor of which the kernel argument @p arg is a part of
* @param[out] cl_images Extra cl images created from the tensor (will need to be retained until the kernel is enqueued)
*/
- inline void add_kernel_argument(unsigned int &idx, const GpuKernelArgumentBinding &arg, const ICLTensor *tensor, std::vector<cl::Image2D> &cl_images);
+ inline void add_kernel_argument(unsigned int &idx,
+ const GpuKernelArgumentBinding &arg,
+ const ICLTensor *tensor,
+ std::vector<cl::Image2D> &cl_images);
#endif // ACL_INTERNAL_TEST_CKW_IN_DF
private:
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
index cd21b10180..ba39ff4c9d 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/experimental/Types.h"
#include "arm_compute/runtime/CL/CLTensor.h"
+
#include "src/dynamic_fusion/runtime/gpu/cl/ClKernelRuntime.h"
#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSourceCode.h"
@@ -55,14 +56,14 @@ public:
{
DataView() = default;
DataView(CLTensor *tensor, const TensorInfo &tensor_info, const AuxMemoryInfo &memory_info)
- : tensor{ tensor }, tensor_info{ tensor_info }, memory_info{ memory_info }
+ : tensor{tensor}, tensor_info{tensor_info}, memory_info{memory_info}
{
}
- ~DataView() = default;
- DataView(const DataView &other) = default;
+ ~DataView() = default;
+ DataView(const DataView &other) = default;
DataView &operator=(const DataView &other) = default;
DataView(DataView &&other) = default;
- DataView &operator=(DataView &&other) = default;
+ DataView &operator=(DataView &&other) = default;
CLTensor *tensor{}; /**< Pointer to the auxiliary tensor */
TensorInfo tensor_info{}; /**< Associated tensor info */
AuxMemoryInfo memory_info{}; /**< Memory requirement */
@@ -92,7 +93,7 @@ private:
{
const auto t_id = tensor_info.id();
auto find_tensor_pair = _owned_tensors.find(t_id);
- if(find_tensor_pair != _owned_tensors.end())
+ if (find_tensor_pair != _owned_tensors.end())
{
return find_tensor_pair->second.get();
}
@@ -107,7 +108,7 @@ private:
}
std::map<ITensorInfo::Id, std::unique_ptr<CLTensor>> _owned_tensors{};
- std::vector<DataView> _tensors{};
+ std::vector<DataView> _tensors{};
};
/** Construct auxiliary tensors required by @ref GpuWorkloadSourceCode
*
@@ -120,12 +121,12 @@ private:
*/
Status create_aux_tensors(ClAuxTensors *aux_tensors, const GpuWorkloadSourceCode &code)
{
- for(auto t_id : code.tensors())
+ for (auto t_id : code.tensors())
{
// Get tensor object
const auto workload_arg = code.query_tensor(t_id);
ICLTensor *tensor_object = nullptr;
- if(workload_arg->memory_descriptor()->memory_type == MemoryType::Auxiliary)
+ if (workload_arg->memory_descriptor()->memory_type == MemoryType::Auxiliary)
{
// Create aux tensor CLTensor object
const TensorInfo tensor_info = *workload_arg->tensor_info();
@@ -133,7 +134,7 @@ Status create_aux_tensors(ClAuxTensors *aux_tensors, const GpuWorkloadSourceCode
const auto aux_memory_info = workload_arg->memory_descriptor()->aux_memory_info;
tensor_object = aux_tensors->add_aux_tensor(tensor_info, aux_memory_info);
- if(tensor_object == nullptr)
+ if (tensor_object == nullptr)
{
return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Failed to construct an auxiliary tensor");
}
@@ -156,7 +157,7 @@ public:
ITensorPack *find_tensor_pack(UnitWorkloadId uwk_id)
{
auto tensor_pack = _tensor_packs.find(uwk_id);
- if(tensor_pack != _tensor_packs.end())
+ if (tensor_pack != _tensor_packs.end())
{
return &(tensor_pack->second);
}
@@ -173,7 +174,10 @@ public:
return _tensor_packs.at(uwk_id);
}
- friend Status create_tensor_lut(ClTensorLUT *tensor_lut, const GpuWorkloadSourceCode &code, const std::vector<CLTensor *> &user_tensors, const ClAuxTensors &aux_tensors);
+ friend Status create_tensor_lut(ClTensorLUT *tensor_lut,
+ const GpuWorkloadSourceCode &code,
+ const std::vector<CLTensor *> &user_tensors,
+ const ClAuxTensors &aux_tensors);
private:
/** Add a tensor pack and associate it with @ref UnitWorkloadId @p uwk_id
@@ -197,19 +201,22 @@ private:
*
* @return Status
*/
-Status create_tensor_lut(ClTensorLUT *tensor_lut, const GpuWorkloadSourceCode &code, const std::vector<CLTensor *> &user_tensors, const ClAuxTensors &aux_tensors)
+Status create_tensor_lut(ClTensorLUT *tensor_lut,
+ const GpuWorkloadSourceCode &code,
+ const std::vector<CLTensor *> &user_tensors,
+ const ClAuxTensors &aux_tensors)
{
// Combine user tensors and aux tensors
std::map<ITensorInfo::Id, CLTensor *> tensor_map;
- for(auto tensor : user_tensors)
+ for (auto tensor : user_tensors)
{
const auto t_id = tensor->info()->id();
- if(tensor_map.find(t_id) != tensor_map.end())
+ if (tensor_map.find(t_id) != tensor_map.end())
{
// In case of elementwise in-place: give another Id to the In/Out tensor when passed again
std::vector<ITensorInfo::Id> ids;
- for(auto &t : tensor_map)
+ for (auto &t : tensor_map)
{
ids.push_back(t.first);
}
@@ -221,11 +228,11 @@ Status create_tensor_lut(ClTensorLUT *tensor_lut, const GpuWorkloadSourceCode &c
tensor_map[t_id] = tensor;
}
}
- for(const auto &data : aux_tensors.get_tensors())
+ for (const auto &data : aux_tensors.get_tensors())
{
const auto t_id = data.tensor_info.id();
const auto tensor = data.tensor;
- if(tensor_map.find(t_id) != tensor_map.end())
+ if (tensor_map.find(t_id) != tensor_map.end())
{
return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Clashing tensor ids");
}
@@ -233,25 +240,25 @@ Status create_tensor_lut(ClTensorLUT *tensor_lut, const GpuWorkloadSourceCode &c
}
// Add tensor objects into corresponding tensor packs
- for(auto id_tensor : tensor_map)
+ for (auto id_tensor : tensor_map)
{
const auto t_id = id_tensor.first;
const auto tensor_object = id_tensor.second;
- if(tensor_object == nullptr)
+ if (tensor_object == nullptr)
{
return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Trying to add a nullptr into the tensor packs");
}
- if(tensor_object->allocator()->info().total_size() == 0U)
+ if (tensor_object->allocator()->info().total_size() == 0U)
{
return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "No allocated memory found in tensor");
}
- for(auto uwk_id : code.get_unit_workloads_from_tensor(t_id))
+ for (auto uwk_id : code.get_unit_workloads_from_tensor(t_id))
{
ITensorPack *tensor_pack = tensor_lut->find_tensor_pack(uwk_id);
- if(tensor_pack == nullptr)
+ if (tensor_pack == nullptr)
{
- tensor_lut->add_tensor_pack(uwk_id, ITensorPack{ { t_id, tensor_object } });
+ tensor_lut->add_tensor_pack(uwk_id, ITensorPack{{t_id, tensor_object}});
}
else
{
@@ -269,15 +276,14 @@ struct ClWorkloadRuntime::Implementation
{
std::map<UnitWorkloadId, std::unique_ptr<ClKernelRuntime>> _kernels{};
std::map<UnitWorkloadId, std::unique_ptr<ClKernelRuntime>> _kernels_prep{};
- bool _is_configured{ false };
- bool _is_prepared{ false };
- ClTensorLUT _tensor_lut{};
- ClAuxTensors _aux_tensors{};
- GpuWorkloadSourceCode _source_code{};
+ bool _is_configured{false};
+ bool _is_prepared{false};
+ ClTensorLUT _tensor_lut{};
+ ClAuxTensors _aux_tensors{};
+ GpuWorkloadSourceCode _source_code{};
};
-ClWorkloadRuntime::ClWorkloadRuntime()
- : _impl{ std::make_unique<Implementation>() }
+ClWorkloadRuntime::ClWorkloadRuntime() : _impl{std::make_unique<Implementation>()}
{
}
@@ -286,18 +292,19 @@ ClWorkloadRuntime::~ClWorkloadRuntime() = default;
Status ClWorkloadRuntime::configure(const GpuWorkloadSketch &sketch)
{
ARM_COMPUTE_RETURN_ERROR_ON_MSG(_impl->_is_configured, "ClWorkloadRuntime cannot be re-configured");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(sketch.gpu_context()->gpu_language() != GpuLanguage::OpenCL, "ClWorkloadRuntime cannot be configured with non-OpenCL workload sketch");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(sketch.gpu_context()->gpu_language() != GpuLanguage::OpenCL,
+ "ClWorkloadRuntime cannot be configured with non-OpenCL workload sketch");
// Generate source code
_impl->_source_code = sketch.implementation().generate_source_code();
// Configure unit workload from source code
- for(auto uwk_id : _impl->_source_code.unit_workloads())
+ for (auto uwk_id : _impl->_source_code.unit_workloads())
{
const auto work = _impl->_source_code.query_unit_workload(uwk_id);
const auto stage = work.stage().stage;
auto k = std::make_unique<ClKernelRuntime>();
k->configure(*sketch.gpu_context()->cl_compile_context(), work.code());
- switch(stage)
+ switch (stage)
{
case UnitWorkloadStage::Stage::Run:
{
@@ -323,9 +330,9 @@ Status ClWorkloadRuntime::configure(const GpuWorkloadSketch &sketch)
void ClWorkloadRuntime::prepare()
{
- if(!_impl->_is_prepared)
+ if (!_impl->_is_prepared)
{
- for(auto &id_kernel_pair : _impl->_kernels_prep)
+ for (auto &id_kernel_pair : _impl->_kernels_prep)
{
const bool flush_queue = false;
const auto uwk_id = id_kernel_pair.first;
@@ -344,7 +351,7 @@ Status ClWorkloadRuntime::run(const std::vector<CLTensor *> &tensors)
const auto st = create_tensor_lut(&_impl->_tensor_lut, _impl->_source_code, tensors, _impl->_aux_tensors);
ARM_COMPUTE_RETURN_ON_ERROR(st);
prepare();
- for(auto &id_kernel_pair : _impl->_kernels)
+ for (auto &id_kernel_pair : _impl->_kernels)
{
// Flush the command queue on the last kernel
const bool flush_queue = false;
@@ -358,7 +365,7 @@ Status ClWorkloadRuntime::run(const std::vector<CLTensor *> &tensors)
std::vector<std::tuple<CLTensor *, TensorInfo, AuxMemoryInfo>> ClWorkloadRuntime::get_auxiliary_tensors()
{
std::vector<std::tuple<CLTensor *, TensorInfo, AuxMemoryInfo>> aux_tensors;
- for(const auto &data : _impl->_aux_tensors.get_tensors())
+ for (const auto &data : _impl->_aux_tensors.get_tensors())
{
aux_tensors.emplace_back(data.tensor, data.tensor_info, data.memory_info);
}
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.cpp b/src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.cpp
index 84fb279237..7044b0ea66 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.cpp
@@ -30,14 +30,17 @@ namespace experimental
{
namespace dynamic_fusion
{
-void cl_add_tensor_component_argument(cl::Kernel &kernel, unsigned int &idx, const ICLTensor *tensor, TensorComponentType component)
+void cl_add_tensor_component_argument(cl::Kernel &kernel,
+ unsigned int &idx,
+ const ICLTensor *tensor,
+ TensorComponentType component)
{
ARM_COMPUTE_ERROR_ON(tensor == nullptr);
const auto *info = tensor->info();
const auto &strides = info->strides_in_bytes();
- switch(component)
+ switch (component)
{
case TensorComponentType::OffsetFirstElement:
kernel.setArg<cl_uint>(idx++, info->offset_first_element_in_bytes());
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.h b/src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.h
index 4cbb157a48..306d547acb 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.h
+++ b/src/dynamic_fusion/runtime/gpu/cl/ckw_driver/GpuCkwKernelArgumentsHelpers.h
@@ -42,7 +42,10 @@ namespace dynamic_fusion
* @param[in] tensor Tensor from which to access the tensor component.
* @param[in] component Tensor component to select such as tensor dimensions, strides, etc.
*/
-void cl_add_tensor_component_argument(cl::Kernel &kernel, unsigned int &idx, const ICLTensor *tensor, TensorComponentType component);
+void cl_add_tensor_component_argument(cl::Kernel &kernel,
+ unsigned int &idx,
+ const ICLTensor *tensor,
+ TensorComponentType component);
/** Add an OpenCL buffer object to the kernel's arguments at the specified index @p idx.
*