aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Marquez Tello <pablo.tello@arm.com>2021-03-03 12:12:35 +0000
committerPablo Marquez Tello <pablo.tello@arm.com>2021-04-19 15:02:29 +0000
commitfe7ae817755577be29f4c07aa27d8ef9e821da45 (patch)
tree459b1b22f59cf5144cd72b839fbfdf21fa341479
parent60c3b0e6821a80d78ffca5be30e05d062d071cd2 (diff)
downloadComputeLibrary-fe7ae817755577be29f4c07aa27d8ef9e821da45.tar.gz
CLInstanceNormalizationLayer NHWC optimisation
* Make changes to split the workload into two kernels. One kernel precomputes mean and variance and the second kernel just loads these precomputed values. * The new approach runs %30 faster than the original code for NHWC workloads like 32x192x256. * Resolves MLCE-337 Change-Id: I8356fcefa2d131ab4dcb32268ce7142421d073e4 Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5355 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
-rw-r--r--arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h37
-rw-r--r--src/core/CL/CLKernelLibrary.cpp1
-rw-r--r--src/core/CL/cl_kernels/instance_normalization.cl155
-rw-r--r--src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp96
-rw-r--r--src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h57
-rw-r--r--src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp35
6 files changed, 305 insertions, 76 deletions
diff --git a/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h
index d41f3fedf6..a6e5b1622b 100644
--- a/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -25,24 +25,44 @@
#define ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYER_H
#include "arm_compute/core/Error.h"
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/IFunction.h"
+
+#include <memory>
namespace arm_compute
{
class CLCompileContext;
class ICLTensor;
class ITensorInfo;
+class ICLKernel;
+class CLRuntimeContext;
/** Basic function to perform a Instance normalization.
*
* This function runs the following kernels:
* -# @ref CLInstanceNormalizationLayerKernel
*/
-class CLInstanceNormalizationLayer : public ICLSimpleFunction
+class CLInstanceNormalizationLayer : public IFunction
{
public:
- /** Default constructor */
- CLInstanceNormalizationLayer();
+ /** Constructor
+ *
+ * @param[in] ctx Runtime context to be used by the function
+ */
+ CLInstanceNormalizationLayer(CLRuntimeContext *ctx = nullptr);
+
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLInstanceNormalizationLayer(const CLInstanceNormalizationLayer &) = delete;
+ /** Default move constructor */
+ CLInstanceNormalizationLayer(CLInstanceNormalizationLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLInstanceNormalizationLayer &operator=(const CLInstanceNormalizationLayer &) = delete;
+ /** Default move assignment operator */
+ CLInstanceNormalizationLayer &operator=(CLInstanceNormalizationLayer &&) = default;
+ /** Default destructor */
+ ~CLInstanceNormalizationLayer();
+
/** Set the input and output tensors.
*
* @param[in, out] input Source tensor. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
@@ -79,6 +99,13 @@ public:
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f, bool use_mixed_precision = true);
+ void run() override;
+
+private:
+ std::unique_ptr<ICLKernel> _inst_norm_kernel; /**< Kernel to run */
+ std::unique_ptr<ICLKernel> _mean_var_kernel; /**< Kernel to run */
+ CLTensor _mean_var_tensor;
+ CLRuntimeContext *_ctx; /**< Context to use */
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYER_H */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index eef204fde9..002a14400f 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -356,6 +356,7 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "im2col9x9_nhwc", "im2col.cl" },
{ "im2col_generic_nhwc", "im2col.cl" },
{ "instance_normalization", "instance_normalization.cl" },
+ { "compute_mean_var", "instance_normalization.cl" },
{ "l2_normalize_x", "l2_normalize.cl" },
{ "l2_normalize_y", "l2_normalize.cl" },
{ "l2_normalize_z", "l2_normalize.cl" },
diff --git a/src/core/CL/cl_kernels/instance_normalization.cl b/src/core/CL/cl_kernels/instance_normalization.cl
index 480d9cd20c..d2507d94dd 100644
--- a/src/core/CL/cl_kernels/instance_normalization.cl
+++ b/src/core/CL/cl_kernels/instance_normalization.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,14 +23,11 @@
*/
#include "helpers.h"
-#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(INTERNAL_DATA_TYPE) && defined(GAMMA) && defined(BETA) && defined(EPSILON) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
-/** This function normalizes the input 2D tensor across the first dimension with respect to mean and standard deviation of the same dimension.
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
+/** This function computes the mean and variance of each plane of the input tensor and provides it as output.
*
* @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
* @attention Data type should be passed using the -DDATA_TYPE=data_type compile flag, e.g. -DDATA_TYPE=float
- * @attention The scale scalar value applied to the normalized tensor should be passed using the -DGAMMA=value compile flag, e.g. -DGAMMA=1.3
- * @attention The offset scalar value applied to the normalized tensor should be passed using the -DBETA=value compile flag, e.g. -DBETA=2.4
- * @attention Normalization epsilon parameter should be given as a preprocessor argument with -DEPSILON=value. e.g. -DEPSILON=0.001f
* @attention Dimensions X, Y, and Z should be given as a preprocessor argument with -DDIM_X=value, -DDIM_Y=value, -DDIM_Z=value. e.g. -DDIM_X=6, -DDIM_Y=2, -DDIM_Z=7
*
* @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
@@ -40,6 +37,8 @@
* @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
* @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
* @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
* @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
* @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr
* @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes)
@@ -50,46 +49,40 @@
* @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
* @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
*/
-__kernel void instance_normalization(
- TENSOR4D_DECLARATION(input)
-#ifndef IN_PLACE
- ,
- TENSOR4D_DECLARATION(output)
-#endif /* IN_PLACE */
-)
+__kernel void compute_mean_var(
+ TENSOR4D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
-#ifndef IN_PLACE
- Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
-#endif /* IN_PLACE */
-
- INTERNAL_DATA_TYPE sum = 0.f;
- INTERNAL_DATA_TYPE sum_sq = 0.f;
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
#if defined(NHWC)
-
const int ch = get_global_id(0); // Current channel
- const int batch = get_global_id(2); // Current batch
+ const int batch = get_global_id(1); // Current batch
const int elements_plane = DIM_Y * DIM_Z;
-
- for(int i_w = 0; i_w < DIM_Y; ++i_w)
+ float part_sum = 0.f;
+ float part_sum_sq = 0.f;
+ const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
+ for(int i = 0; i < (DIM_Y * DIM_Z); ++i)
{
- for(int i_h = 0; i_h < DIM_Z; ++i_h)
- {
- INTERNAL_DATA_TYPE data = (INTERNAL_DATA_TYPE) * ((__global DATA_TYPE *)tensor4D_offset(&in, ch, i_w, i_h, batch));
- sum += data;
- sum_sq += data * data;
- }
+ const float data = *((__global DATA_TYPE *)(input_ptr + in_offset + i * input_stride_y));
+ part_sum += data;
+ part_sum_sq += data * data;
}
-
+ float mean = (part_sum / elements_plane);
+ float var = (part_sum_sq / elements_plane) - (mean * mean);
+ __global DATA_TYPE *output_address0 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
+ *output_address0 = mean;
+ __global DATA_TYPE *output_address1 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
+ *output_address1 = var;
#else // !defined(NHWC)
const int ch = get_global_id(2) % DIM_Z; // Current channel
const int batch = get_global_id(2) / DIM_Z; // Current batch
const int elements_plane = DIM_X * DIM_Y;
- VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
+ VEC_DATA_TYPE(float, VEC_SIZE)
part_sum = 0.f;
- VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
+ VEC_DATA_TYPE(float, VEC_SIZE)
part_sum_sq = 0.f;
// Calculate partial sum
for(int y = 0; y < DIM_Y; ++y)
@@ -98,15 +91,15 @@ __kernel void instance_normalization(
for(; x <= (DIM_X - VEC_SIZE); x += VEC_SIZE)
{
// Load data
- VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
- data = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)), VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE));
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ data = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)), VEC_DATA_TYPE(float, VEC_SIZE));
part_sum += data;
part_sum_sq += data * data;
}
// Left-overs loop
for(; x < DIM_X; ++x)
{
- INTERNAL_DATA_TYPE data = (INTERNAL_DATA_TYPE)(*((__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)));
+ float data = (float)(*((__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)));
part_sum.s0 += data;
part_sum_sq.s0 += data * data;
}
@@ -127,29 +120,93 @@ __kernel void instance_normalization(
part_sum.s0 += part_sum.s1;
part_sum_sq.s0 += part_sum_sq.s1;
- sum = (INTERNAL_DATA_TYPE)part_sum.s0;
- sum_sq = (INTERNAL_DATA_TYPE)part_sum_sq.s0;
+ float sum = (float)part_sum.s0;
+ float sum_sq = (float)part_sum_sq.s0;
+
+ const float mean = (sum / elements_plane);
+ const float var = (sum_sq / elements_plane) - (mean * mean);
+
+ __global DATA_TYPE *output_address0 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
+ *output_address0 = mean;
+ __global DATA_TYPE *output_address1 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
+ *output_address1 = var;
#endif // defined(NHWC)
+}
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z) */
- const INTERNAL_DATA_TYPE mean = (sum / elements_plane);
- const INTERNAL_DATA_TYPE var = (sum_sq / elements_plane) - (mean * mean);
- const INTERNAL_DATA_TYPE multip = GAMMA / sqrt(var + EPSILON);
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(INTERNAL_DATA_TYPE) && defined(GAMMA) && defined(BETA) && defined(EPSILON) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
+/** This function normalizes the input 2D tensor across the first dimension with respect to mean and standard deviation of the same dimension.
+ *
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @attention Data type should be passed using the -DDATA_TYPE=data_type compile flag, e.g. -DDATA_TYPE=float
+ * @attention The scale scalar value applied to the normalized tensor should be passed using the -DGAMMA=value compile flag, e.g. -DGAMMA=1.3
+ * @attention The offset scalar value applied to the normalized tensor should be passed using the -DBETA=value compile flag, e.g. -DBETA=2.4
+ * @attention Normalization epsilon parameter should be given as a preprocessor argument with -DEPSILON=value. e.g. -DEPSILON=0.001f
+ * @attention Dimensions X, Y, and Z should be given as a preprocessor argument with -DDIM_X=value, -DDIM_Y=value, -DDIM_Z=value. e.g. -DDIM_X=6, -DDIM_Y=2, -DDIM_Z=7
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
+ */
+__kernel void instance_normalization(
+ TENSOR4D_DECLARATION(input),
+ TENSOR3D_DECLARATION(mean_var)
+#ifndef IN_PLACE
+ ,
+ TENSOR4D_DECLARATION(output)
+#endif /* IN_PLACE */
+)
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor3D mean_var = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(mean_var);
+#ifndef IN_PLACE
+ Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
+#endif /* IN_PLACE */
#if defined(NHWC)
+ const int ch = get_global_id(0); // Current channel
+ const int batch = get_global_id(2); // Current batch
+#else /* defined(NHWC) */
+ const int ch = get_global_id(2) % DIM_Z; // Current channel
+ const int batch = get_global_id(2) / DIM_Z; // Current batch
+#endif /* defined(NHWC) */
- for(int i_w = 0; i_w < DIM_Y; ++i_w)
+ const __global DATA_TYPE *mean_ptr = (__global DATA_TYPE *)tensor3D_offset(&mean_var, ch, 0, batch);
+ const __global DATA_TYPE *var_ptr = (__global DATA_TYPE *)tensor3D_offset(&mean_var, ch, 1, batch);
+ const INTERNAL_DATA_TYPE mean = (INTERNAL_DATA_TYPE) * mean_ptr;
+ const INTERNAL_DATA_TYPE var = (INTERNAL_DATA_TYPE) * var_ptr;
+ const INTERNAL_DATA_TYPE multip = GAMMA / sqrt(var + EPSILON);
+ const INTERNAL_DATA_TYPE beta = (INTERNAL_DATA_TYPE)BETA;
+
+#if defined(NHWC)
+ const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
+#ifndef IN_PLACE
+ const int out_offset = output_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
+#endif /* IN_PLACE */
+
+ for(int i = 0; i < (DIM_Y * DIM_Z); ++i)
{
- for(int i_h = 0; i_h < DIM_Z; ++i_h)
- {
- __global DATA_TYPE *input_address = (__global DATA_TYPE *)tensor4D_offset(&in, ch, i_w, i_h, batch);
+ __global DATA_TYPE *input_address = (__global DATA_TYPE *)(input_ptr + in_offset + i * input_stride_y);
#ifdef IN_PLACE
- __global DATA_TYPE *output_address = input_address;
+ __global DATA_TYPE *output_address = input_address;
#else /* !IN_PLACE */
- __global DATA_TYPE *output_address = (__global DATA_TYPE *)tensor4D_offset(&out, ch, i_w, i_h, batch);
+ __global DATA_TYPE *output_address = (__global DATA_TYPE *)(output_ptr + out_offset + i * output_stride_y);
#endif /* IN_PLACE */
- *(output_address) = (*(input_address) - mean) * multip + (INTERNAL_DATA_TYPE)BETA;
- }
+ *(output_address) = (*(input_address) - mean) * multip + beta;
}
#else // !defined(NHWC)
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
index 50c4e24c33..80a42cc3f5 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
@@ -32,7 +32,6 @@
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
@@ -54,25 +53,108 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
return Status{};
}
+
+Status validate_arguments_meanvar(const ITensorInfo *input, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
+
+ if(output != nullptr && output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels");
+ }
+
+ return Status{};
+}
} // namespace
-CLInstanceNormalizationLayerKernel::CLInstanceNormalizationLayerKernel()
- : _input(nullptr), _output(nullptr), _run_in_place(false)
+CLComputeMeanVariance::CLComputeMeanVariance()
+ : _input(nullptr), _output(nullptr)
+{
+}
+
+void CLComputeMeanVariance::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ auto padding_info = get_padding_info({ input, output });
+
+ _input = input;
+ _output = output == nullptr ? input : output;
+
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_meanvar(_input->info(), _output->info()));
+ const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
+
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option("-DDIM_X=" + support::cpp11::to_string(input->info()->dimension(0)));
+ build_opts.add_option("-DDIM_Y=" + support::cpp11::to_string(input->info()->dimension(1)));
+ build_opts.add_option("-DDIM_Z=" + support::cpp11::to_string(input->info()->dimension(2)));
+ build_opts.add_option_if(_input->info()->data_layout() == DataLayout::NHWC, "-DNHWC");
+ // Create kernel
+ _kernel = create_kernel(compile_context, "compute_mean_var", build_opts.options());
+
+ // We handle the planes manually
+ Window win = calculate_max_window(*(input->info()), Steps(1));
+ const auto data_layout = input->info()->data_layout();
+ const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const unsigned int batches_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+ const unsigned int input_channel = input->info()->dimension(channel_idx);
+ const unsigned int input_batches = input->info()->dimension(batches_idx);
+ const TensorShape out_shape(input_channel, 2u, input_batches);
+
+ // Output auto initialization if not yet initialized
+ auto_init_if_empty(*output->info(), out_shape, 1, input->info()->data_type());
+
+ ICLKernel::configure_internal(win);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+}
+
+Status CLComputeMeanVariance::validate(const ITensorInfo *input, const ITensorInfo *output)
{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_meanvar(input, output));
+ return Status{};
}
-void CLInstanceNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
+void CLComputeMeanVariance::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ Window collapsed_window = window.collapse(window, Window::DimZ);
+
+ // We will process the planes together
+ if(_input->info()->data_layout() == DataLayout::NCHW)
+ {
+ collapsed_window.set(Window::DimX, Window::Dimension(0, 1, 1));
+ collapsed_window.set(Window::DimY, Window::Dimension(0, 1, 1));
+ }
+ else
+ {
+ collapsed_window.set(Window::DimZ, Window::Dimension(0, 1, 1));
+ collapsed_window.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(3), 1));
+ }
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, collapsed_window);
+ add_3D_tensor_argument(idx, _output, collapsed_window);
+
+ enqueue(queue, *this, collapsed_window, lws_hint());
+}
+
+CLInstanceNormalizationLayerKernel::CLInstanceNormalizationLayerKernel()
+ : _input(nullptr), _output(nullptr), _mean(nullptr), _run_in_place(false)
{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
}
-void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
+void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *mean_var, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
auto padding_info = get_padding_info({ input, output });
_input = input;
_output = output == nullptr ? input : output;
+ _mean = mean_var;
_run_in_place = (output == nullptr) || (output == input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _output->info(), info));
@@ -132,6 +214,8 @@ void CLInstanceNormalizationLayerKernel::run(const Window &window, cl::CommandQu
unsigned int idx = 0;
add_4D_tensor_argument(idx, _input, collapsed_window);
+ add_3D_tensor_argument(idx, _mean, collapsed_window);
+
if(!_run_in_place)
{
add_4D_tensor_argument(idx, _output, collapsed_window);
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
index d4444f0b20..33a3ff97c3 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,21 +52,14 @@ public:
/** Set the input and output tensors.
*
- * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
- * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
- * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
- * @param[in] info Kernel meta-data descriptor
- */
- void configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
- /** Set the input and output tensors.
- *
* @param[in] compile_context The compile context to be used.
* @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
* In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+ * @param[in] mean_var Tensor containing the precomputed mean and variance values. Data types supported: F32.
* @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* @param[in] info Kernel meta-data descriptor
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *mean_var, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
*
@@ -84,7 +77,51 @@ public:
private:
ICLTensor *_input;
ICLTensor *_output;
+ ICLTensor *_mean;
bool _run_in_place;
};
+
+/** Interface for compute Mean and Variance per channel */
+class CLComputeMeanVariance : public ICLKernel
+{
+public:
+ /** Constructor */
+ CLComputeMeanVariance();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLComputeMeanVariance(const CLComputeMeanVariance &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLComputeMeanVariance &operator=(const CLComputeMeanVariance &) = delete;
+ /** Default Move Constructor. */
+ CLComputeMeanVariance(CLComputeMeanVariance &&) = default;
+ /** Default move assignment operator */
+ CLComputeMeanVariance &operator=(CLComputeMeanVariance &&) = default;
+ /** Default destructor */
+ ~CLComputeMeanVariance() = default;
+
+ /** Set the input and output tensors.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
+ * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
+ */
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
+ *
+ * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+ * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ ICLTensor *_input;
+ ICLTensor *_output;
+};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYERKERNEL_H */
diff --git a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
index 9bc060e6ca..f2406d68f4 100644
--- a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,13 +23,24 @@
*/
#include "arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h"
+#include "arm_compute/core/Error.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLHelpers.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/CL/ICLKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
namespace arm_compute
{
-CLInstanceNormalizationLayer::CLInstanceNormalizationLayer()
+CLInstanceNormalizationLayer::CLInstanceNormalizationLayer(CLRuntimeContext *ctx) // NOLINT
+ : _inst_norm_kernel(),
+ _mean_var_kernel(),
+ _mean_var_tensor(),
+ _ctx(ctx)
+{
+}
+CLInstanceNormalizationLayer::~CLInstanceNormalizationLayer()
{
}
@@ -40,13 +51,25 @@ void CLInstanceNormalizationLayer::configure(ICLTensor *input, ICLTensor *output
void CLInstanceNormalizationLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
{
- auto k = std::make_unique<CLInstanceNormalizationLayerKernel>();
- k->configure(compile_context, input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
- _kernel = std::move(k);
+ auto w = std::make_unique<CLComputeMeanVariance>();
+ w->configure(compile_context, input, &_mean_var_tensor);
+ _mean_var_kernel = std::move(w);
+ auto k = std::make_unique<CLInstanceNormalizationLayerKernel>();
+ k->configure(compile_context, input, &_mean_var_tensor, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
+ _inst_norm_kernel = std::move(k);
+ _mean_var_tensor.allocator()->allocate();
}
Status CLInstanceNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
{
return CLInstanceNormalizationLayerKernel::validate(input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
}
-} // namespace arm_compute \ No newline at end of file
+
+void CLInstanceNormalizationLayer::run()
+{
+ ARM_COMPUTE_ERROR_ON_MSG(!_inst_norm_kernel, "The child class didn't set the CL kernel or function isn't configured");
+ schedule_kernel_on_ctx(_ctx, _mean_var_kernel.get());
+ schedule_kernel_on_ctx(_ctx, _inst_norm_kernel.get());
+}
+
+} // namespace arm_compute