From afd38f0c617d6f89b2b4532c6c44f116617e2b6f Mon Sep 17 00:00:00 2001 From: Felix Thomasmathibalan Date: Wed, 27 Sep 2023 17:46:17 +0100 Subject: Apply clang-format on repository Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Gunes Bayir --- src/runtime/CL/functions/CLSoftmaxLayer.cpp | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) (limited to 'src/runtime/CL/functions/CLSoftmaxLayer.cpp') diff --git a/src/runtime/CL/functions/CLSoftmaxLayer.cpp b/src/runtime/CL/functions/CLSoftmaxLayer.cpp index d52352fc8d..2e70e2aa08 100644 --- a/src/runtime/CL/functions/CLSoftmaxLayer.cpp +++ b/src/runtime/CL/functions/CLSoftmaxLayer.cpp @@ -22,12 +22,14 @@ * SOFTWARE. */ #include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h" + #include "arm_compute/core/CL/CLHelpers.h" #include "arm_compute/core/CL/CLKernelLibrary.h" #include "arm_compute/core/Helpers.h" #include "arm_compute/core/KernelDescriptors.h" #include "arm_compute/core/Types.h" #include "arm_compute/core/Utils.h" + #include "src/core/helpers/MemoryHelpers.h" #include "src/gpu/cl/kernels/ClSoftmaxKernel.h" #include "src/gpu/cl/operators/ClPermute.h" @@ -40,9 +42,9 @@ using OperatorType = opencl::ClSoftmax; template struct CLSoftmaxLayerGeneric::Impl { - const ICLTensor *src{ nullptr }; - ICLTensor *dst{ nullptr }; - std::unique_ptr op{ nullptr }; + const ICLTensor *src{nullptr}; + ICLTensor *dst{nullptr}; + std::unique_ptr op{nullptr}; MemoryGroup memory_group{}; ITensorPack run_pack{}; WorkspaceData workspace_tensors{}; @@ -65,28 +67,30 @@ void CLSoftmaxLayerGeneric::configure(const ICLTensor *input, ICLTensor } template -void CLSoftmaxLayerGeneric::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta, int32_t axis) +void CLSoftmaxLayerGeneric::configure( + const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, float beta, int32_t axis) { _impl->src = input; _impl->dst = output; _impl->op = std::make_unique(); - SoftmaxKernelInfo softmax_info{ beta, IS_LOG, input->info()->data_type(), axis }; + SoftmaxKernelInfo softmax_info{beta, IS_LOG, input->info()->data_type(), axis}; _impl->op->configure(compile_context, *input->info(), *output->info(), softmax_info); - _impl->run_pack = { { TensorType::ACL_SRC, _impl->src }, { TensorType::ACL_DST, _impl->dst } }; + _impl->run_pack = {{TensorType::ACL_SRC, _impl->src}, {TensorType::ACL_DST, _impl->dst}}; _impl->workspace_tensors = manage_workspace(_impl->op->workspace(), _impl->memory_group, _impl->run_pack); } template -Status CLSoftmaxLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t axis) +Status +CLSoftmaxLayerGeneric::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t axis) { - SoftmaxKernelInfo softmax_info{ beta, IS_LOG, input->data_type(), axis }; + SoftmaxKernelInfo softmax_info{beta, IS_LOG, input->data_type(), axis}; return OperatorType::validate(*input, *output, softmax_info); } template -void CLSoftmaxLayerGeneric::run() +void CLSoftmaxLayerGeneric::run() { // Acquire all the temporaries MemoryGroupResourceScope scope_mg(_impl->memory_group); -- cgit v1.2.1