aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2021-04-26 15:39:05 +0100
committerPablo Marquez Tello <pablo.tello@arm.com>2021-04-27 11:29:07 +0000
commit5c3eeec645883dc8f57a1e10995b4e8298343ecb (patch)
tree8c936233e98c2d0032b60cdd1c399bf4643527df
parent0ff73c70003bad895e1d0bd5c53aa117eb798900 (diff)
downloadComputeLibrary-5c3eeec645883dc8f57a1e10995b4e8298343ecb.tar.gz
Fixed CTS failures CLInstanceNorm
* Resolves COMPMID-4400 Change-Id: I54c33a017c735194fbf4437d1c7df465208bc0ca Signed-off-by: Pablo Tello <pablo.tello@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5505 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Sheri Zhang <sheri.zhang@arm.com>
-rw-r--r--src/core/CL/cl_kernels/instance_normalization.cl91
-rw-r--r--src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp13
-rw-r--r--src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h11
-rw-r--r--src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp2
4 files changed, 66 insertions, 51 deletions
diff --git a/src/core/CL/cl_kernels/instance_normalization.cl b/src/core/CL/cl_kernels/instance_normalization.cl
index d2507d94dd..adfbebd67d 100644
--- a/src/core/CL/cl_kernels/instance_normalization.cl
+++ b/src/core/CL/cl_kernels/instance_normalization.cl
@@ -23,7 +23,7 @@
*/
#include "helpers.h"
-#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(INTERNAL_DATA_TYPE) & defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
/** This function computes the mean and variance of each plane of the input tensor and provides it as output.
*
* @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
@@ -57,32 +57,37 @@ __kernel void compute_mean_var(
Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
#if defined(NHWC)
- const int ch = get_global_id(0); // Current channel
- const int batch = get_global_id(1); // Current batch
- const int elements_plane = DIM_Y * DIM_Z;
- float part_sum = 0.f;
- float part_sum_sq = 0.f;
- const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
- for(int i = 0; i < (DIM_Y * DIM_Z); ++i)
+ const int ch = get_global_id(0); // Current channel
+ const int batch = get_global_id(1); // Current batch
+ const int elements_plane = DIM_Y * DIM_Z;
+ INTERNAL_DATA_TYPE part_sum = 0.f;
+ INTERNAL_DATA_TYPE part_sum_sq = 0.f;
+ const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
+
+ for(int i_w = 0; i_w < DIM_Y; ++i_w)
{
- const float data = *((__global DATA_TYPE *)(input_ptr + in_offset + i * input_stride_y));
- part_sum += data;
- part_sum_sq += data * data;
+ for(int i_h = 0; i_h < DIM_Z; ++i_h)
+ {
+ INTERNAL_DATA_TYPE data = (INTERNAL_DATA_TYPE) * ((__global DATA_TYPE *)tensor4D_offset(&in, ch, i_w, i_h, batch));
+ part_sum += data;
+ part_sum_sq += data * data;
+ }
}
- float mean = (part_sum / elements_plane);
- float var = (part_sum_sq / elements_plane) - (mean * mean);
- __global DATA_TYPE *output_address0 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
- *output_address0 = mean;
- __global DATA_TYPE *output_address1 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
- *output_address1 = var;
+
+ INTERNAL_DATA_TYPE mean = (part_sum / elements_plane);
+ INTERNAL_DATA_TYPE var = (part_sum_sq / elements_plane) - (mean * mean);
+ __global INTERNAL_DATA_TYPE *output_address0 = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
+ *output_address0 = mean;
+ __global INTERNAL_DATA_TYPE *output_address1 = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
+ *output_address1 = var;
#else // !defined(NHWC)
const int ch = get_global_id(2) % DIM_Z; // Current channel
const int batch = get_global_id(2) / DIM_Z; // Current batch
const int elements_plane = DIM_X * DIM_Y;
- VEC_DATA_TYPE(float, VEC_SIZE)
+ VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
part_sum = 0.f;
- VEC_DATA_TYPE(float, VEC_SIZE)
+ VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
part_sum_sq = 0.f;
// Calculate partial sum
for(int y = 0; y < DIM_Y; ++y)
@@ -91,15 +96,15 @@ __kernel void compute_mean_var(
for(; x <= (DIM_X - VEC_SIZE); x += VEC_SIZE)
{
// Load data
- VEC_DATA_TYPE(float, VEC_SIZE)
- data = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)), VEC_DATA_TYPE(float, VEC_SIZE));
+ VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
+ data = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)), VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE));
part_sum += data;
part_sum_sq += data * data;
}
// Left-overs loop
for(; x < DIM_X; ++x)
{
- float data = (float)(*((__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)));
+ INTERNAL_DATA_TYPE data = (INTERNAL_DATA_TYPE)(*((__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)));
part_sum.s0 += data;
part_sum_sq.s0 += data * data;
}
@@ -120,16 +125,16 @@ __kernel void compute_mean_var(
part_sum.s0 += part_sum.s1;
part_sum_sq.s0 += part_sum_sq.s1;
- float sum = (float)part_sum.s0;
- float sum_sq = (float)part_sum_sq.s0;
+ INTERNAL_DATA_TYPE sum = (INTERNAL_DATA_TYPE)part_sum.s0;
+ INTERNAL_DATA_TYPE sum_sq = (INTERNAL_DATA_TYPE)part_sum_sq.s0;
- const float mean = (sum / elements_plane);
- const float var = (sum_sq / elements_plane) - (mean * mean);
+ const INTERNAL_DATA_TYPE mean = (sum / elements_plane);
+ const INTERNAL_DATA_TYPE var = (sum_sq / elements_plane) - (mean * mean);
- __global DATA_TYPE *output_address0 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
- *output_address0 = mean;
- __global DATA_TYPE *output_address1 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
- *output_address1 = var;
+ __global INTERNAL_DATA_TYPE *output_address0 = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
+ *output_address0 = mean;
+ __global INTERNAL_DATA_TYPE *output_address1 = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
+ *output_address1 = var;
#endif // defined(NHWC)
}
@@ -185,12 +190,12 @@ __kernel void instance_normalization(
const int batch = get_global_id(2) / DIM_Z; // Current batch
#endif /* defined(NHWC) */
- const __global DATA_TYPE *mean_ptr = (__global DATA_TYPE *)tensor3D_offset(&mean_var, ch, 0, batch);
- const __global DATA_TYPE *var_ptr = (__global DATA_TYPE *)tensor3D_offset(&mean_var, ch, 1, batch);
- const INTERNAL_DATA_TYPE mean = (INTERNAL_DATA_TYPE) * mean_ptr;
- const INTERNAL_DATA_TYPE var = (INTERNAL_DATA_TYPE) * var_ptr;
- const INTERNAL_DATA_TYPE multip = GAMMA / sqrt(var + EPSILON);
- const INTERNAL_DATA_TYPE beta = (INTERNAL_DATA_TYPE)BETA;
+ const __global INTERNAL_DATA_TYPE *mean_ptr = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&mean_var, ch, 0, batch);
+ const __global INTERNAL_DATA_TYPE *var_ptr = (__global INTERNAL_DATA_TYPE *)tensor3D_offset(&mean_var, ch, 1, batch);
+ const INTERNAL_DATA_TYPE mean = (INTERNAL_DATA_TYPE) * mean_ptr;
+ const INTERNAL_DATA_TYPE var = (INTERNAL_DATA_TYPE) * var_ptr;
+ const INTERNAL_DATA_TYPE multip = GAMMA / sqrt(var + EPSILON);
+ const INTERNAL_DATA_TYPE beta = (INTERNAL_DATA_TYPE)BETA;
#if defined(NHWC)
const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
@@ -198,17 +203,19 @@ __kernel void instance_normalization(
const int out_offset = output_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
#endif /* IN_PLACE */
- for(int i = 0; i < (DIM_Y * DIM_Z); ++i)
+ for(int i_w = 0; i_w < DIM_Y; ++i_w)
{
- __global DATA_TYPE *input_address = (__global DATA_TYPE *)(input_ptr + in_offset + i * input_stride_y);
+ for(int i_h = 0; i_h < DIM_Z; ++i_h)
+ {
+ __global DATA_TYPE *input_address = (__global DATA_TYPE *)tensor4D_offset(&in, ch, i_w, i_h, batch);
#ifdef IN_PLACE
- __global DATA_TYPE *output_address = input_address;
+ __global DATA_TYPE *output_address = input_address;
#else /* !IN_PLACE */
- __global DATA_TYPE *output_address = (__global DATA_TYPE *)(output_ptr + out_offset + i * output_stride_y);
+ __global DATA_TYPE *output_address = (__global DATA_TYPE *)tensor4D_offset(&out, ch, i_w, i_h, batch);
#endif /* IN_PLACE */
- *(output_address) = (*(input_address) - mean) * multip + beta;
+ *(output_address) = (*(input_address) - mean) * multip + (INTERNAL_DATA_TYPE)BETA;
+ }
}
-
#else // !defined(NHWC)
for(int y = 0; y < DIM_Y; ++y)
{
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
index 80a42cc3f5..dcde0850a7 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
@@ -74,7 +74,7 @@ CLComputeMeanVariance::CLComputeMeanVariance()
{
}
-void CLComputeMeanVariance::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output)
+void CLComputeMeanVariance::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, bool use_mixed_precision)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
auto padding_info = get_padding_info({ input, output });
@@ -86,6 +86,7 @@ void CLComputeMeanVariance::configure(const CLCompileContext &compile_context, I
const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
CLBuildOptions build_opts;
+ build_opts.add_option("-DINTERNAL_DATA_TYPE=" + (use_mixed_precision ? "float" : get_cl_type_from_data_type(input->info()->data_type())));
build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
build_opts.add_option("-DDIM_X=" + support::cpp11::to_string(input->info()->dimension(0)));
@@ -105,8 +106,14 @@ void CLComputeMeanVariance::configure(const CLCompileContext &compile_context, I
const TensorShape out_shape(input_channel, 2u, input_batches);
// Output auto initialization if not yet initialized
- auto_init_if_empty(*output->info(), out_shape, 1, input->info()->data_type());
-
+ if(use_mixed_precision)
+ {
+ auto_init_if_empty(*output->info(), out_shape, 1, DataType::F32);
+ }
+ else
+ {
+ auto_init_if_empty(*output->info(), out_shape, 1, input->info()->data_type());
+ }
ICLKernel::configure_internal(win);
ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
}
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
index 33a3ff97c3..2f9014a651 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
@@ -100,12 +100,13 @@ public:
/** Set the input and output tensors.
*
- * @param[in] compile_context The compile context to be used.
- * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
- * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
- * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
+ * @param[in] compile_context The compile context to be used.
+ * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
+ * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
+ * @param[in] use_mixed_precision Use mixed precision in case of FP16 execution
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, bool use_mixed_precision);
/** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
*
diff --git a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
index f2406d68f4..4a0bda8255 100644
--- a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
@@ -52,7 +52,7 @@ void CLInstanceNormalizationLayer::configure(ICLTensor *input, ICLTensor *output
void CLInstanceNormalizationLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
{
auto w = std::make_unique<CLComputeMeanVariance>();
- w->configure(compile_context, input, &_mean_var_tensor);
+ w->configure(compile_context, input, &_mean_var_tensor, use_mixed_precision);
_mean_var_kernel = std::move(w);
auto k = std::make_unique<CLInstanceNormalizationLayerKernel>();
k->configure(compile_context, input, &_mean_var_tensor, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));