aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
diff options
context:
space:
mode:
authorFelix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-27 17:46:17 +0100
committerfelixjohnny.thomasmathibalan <felixjohnny.thomasmathibalan@arm.com>2023-09-28 12:08:05 +0000
commitafd38f0c617d6f89b2b4532c6c44f116617e2b6f (patch)
tree03bc7d5a762099989b16a656fa8d397b490ed70e /src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
parentbdcb4c148ee2fdeaaddf4cf1e57bbb0de02bb894 (diff)
downloadComputeLibrary-afd38f0c617d6f89b2b4532c6c44f116617e2b6f.tar.gz
Apply clang-format on repository
Code is formatted as per a revised clang format configuration file(not part of this delivery). Version 14.0.6 is used. Exclusion List: - files with .cl extension - files that are not strictly C/C++ (e.g. Android.bp, Sconscript ...) And the following directories - compute_kernel_writer/validation/ - tests/ - include/ - src/core/NEON/kernels/convolution/ - src/core/NEON/kernels/arm_gemm/ - src/core/NEON/kernels/arm_conv/ - data/ There will be a follow up for formatting of .cl files and the files under tests/ and compute_kernel_writer/validation/. Signed-off-by: Felix Thomasmathibalan <felixjohnny.thomasmathibalan@arm.com> Change-Id: Ib7eb1fcf4e7537b9feaefcfc15098a804a3fde0a Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/10391 Benchmark: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Diffstat (limited to 'src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp')
-rw-r--r--src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp36
1 files changed, 24 insertions, 12 deletions
diff --git a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
index 5feafe19db..0e994e1aee 100644
--- a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
@@ -27,50 +27,62 @@
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLHelpers.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
+
+#include "src/common/utils/Log.h"
#include "src/core/CL/ICLKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
-#include "src/common/utils/Log.h"
-
namespace arm_compute
{
CLInstanceNormalizationLayer::CLInstanceNormalizationLayer(CLRuntimeContext *ctx) // NOLINT
- : _inst_norm_kernel(),
- _mean_var_kernel(),
- _mean_var_tensor(),
- _ctx(ctx)
+ : _inst_norm_kernel(), _mean_var_kernel(), _mean_var_tensor(), _ctx(ctx)
{
}
CLInstanceNormalizationLayer::~CLInstanceNormalizationLayer()
{
}
-void CLInstanceNormalizationLayer::configure(ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
+void CLInstanceNormalizationLayer::configure(
+ ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
{
configure(CLKernelLibrary::get().get_compile_context(), input, output, gamma, beta, epsilon, use_mixed_precision);
}
-void CLInstanceNormalizationLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
+void CLInstanceNormalizationLayer::configure(const CLCompileContext &compile_context,
+ ICLTensor *input,
+ ICLTensor *output,
+ float gamma,
+ float beta,
+ float epsilon,
+ bool use_mixed_precision)
{
ARM_COMPUTE_LOG_PARAMS(input, output, gamma, beta, epsilon, use_mixed_precision);
auto w = std::make_unique<CLComputeMeanVariance>();
w->configure(compile_context, input, &_mean_var_tensor, use_mixed_precision);
_mean_var_kernel = std::move(w);
auto k = std::make_unique<CLInstanceNormalizationLayerKernel>();
- k->configure(compile_context, input, &_mean_var_tensor, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
+ k->configure(compile_context, input, &_mean_var_tensor, output,
+ InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
_inst_norm_kernel = std::move(k);
_mean_var_tensor.allocator()->allocate();
}
-Status CLInstanceNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
+Status CLInstanceNormalizationLayer::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ float gamma,
+ float beta,
+ float epsilon,
+ bool use_mixed_precision)
{
- return CLInstanceNormalizationLayerKernel::validate(input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
+ return CLInstanceNormalizationLayerKernel::validate(
+ input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
}
void CLInstanceNormalizationLayer::run()
{
- ARM_COMPUTE_ERROR_ON_MSG(!_inst_norm_kernel, "The child class didn't set the CL kernel or function isn't configured");
+ ARM_COMPUTE_ERROR_ON_MSG(!_inst_norm_kernel,
+ "The child class didn't set the CL kernel or function isn't configured");
schedule_kernel_on_ctx(_ctx, _mean_var_kernel.get());
schedule_kernel_on_ctx(_ctx, _inst_norm_kernel.get());
}