aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arm_compute/core/Utils.h8
-rw-r--r--src/core/CL/CLKernelLibrary.cpp3
-rw-r--r--src/core/CL/cl_kernels/batchnormalization_layer.cl125
-rw-r--r--src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp7
-rw-r--r--src/core/Utils.cpp12
-rw-r--r--tests/benchmark/CL/BatchNormalizationLayer.cpp12
-rw-r--r--tests/validation/CL/BatchNormalizationLayer.cpp6
7 files changed, 152 insertions, 21 deletions
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index 23668e0169..060d5904d4 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -917,7 +917,13 @@ const std::string &string_from_format(Format format);
* @return The string describing the channel.
*/
const std::string &string_from_channel(Channel channel);
-
+/** Convert a data layout identity into a string.
+ *
+ * @param[in] dl @ref DataLayout to be translated to string.
+ *
+ * @return The string describing the data layout.
+ */
+const std::string &string_from_data_layout(DataLayout dl);
/** Convert a data type identity into a string.
*
* @param[in] dt @ref DataType to be translated to string.
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index db0e51bb48..1c773bc42f 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -151,7 +151,8 @@ const std::map<std::string, std::string> CLKernelLibrary::_kernel_program_map =
{ "activation_layer_qa8", "activation_layer_qa8.cl" },
{ "arithmetic_add", "arithmetic_op.cl" },
{ "arithmetic_sub", "arithmetic_op.cl" },
- { "batchnormalization_layer", "batchnormalization_layer.cl" },
+ { "batchnormalization_layer_nchw", "batchnormalization_layer.cl" },
+ { "batchnormalization_layer_nhwc", "batchnormalization_layer.cl" },
{ "bitwise_or", "bitwise_op.cl" },
{ "bitwise_and", "bitwise_op.cl" },
{ "bitwise_xor", "bitwise_op.cl" },
diff --git a/src/core/CL/cl_kernels/batchnormalization_layer.cl b/src/core/CL/cl_kernels/batchnormalization_layer.cl
index 29b62d3d92..9c980da62a 100644
--- a/src/core/CL/cl_kernels/batchnormalization_layer.cl
+++ b/src/core/CL/cl_kernels/batchnormalization_layer.cl
@@ -87,19 +87,19 @@
* @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
* @param[in] epsilon Epsilon parameter in the batch normalization equation
*/
-__kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
+__kernel void batchnormalization_layer_nchw(TENSOR3D_DECLARATION(input),
#ifndef IN_PLACE
- TENSOR3D_DECLARATION(output),
+ TENSOR3D_DECLARATION(output),
#endif /* not IN_PLACE */
- VECTOR_DECLARATION(mean),
- VECTOR_DECLARATION(var),
+ VECTOR_DECLARATION(mean),
+ VECTOR_DECLARATION(var),
#ifndef USE_DEFAULT_BETA
- VECTOR_DECLARATION(beta),
+ VECTOR_DECLARATION(beta),
#endif /* USE_DEFAULT_BETA */
#ifndef USE_DEFAULT_GAMMA
- VECTOR_DECLARATION(gamma),
+ VECTOR_DECLARATION(gamma),
#endif /* USE_DEFAULT_GAMMA */
- float epsilon)
+ float epsilon)
{
Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
#ifdef IN_PLACE
@@ -145,7 +145,7 @@ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
res = MUL_OP(gamma_vec, x_bar);
#else /* USE_DEFAULT_GAMMA */
// gamma is equal to 1, no need to perform multiplications
- res = x_bar;
+ res = x_bar;
#endif /* USE_DEFAULT_GAMMA */
#ifndef USE_DEFAULT_BETA
@@ -161,4 +161,113 @@ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input),
(res, 0, (__global DATA_TYPE *)out.ptr);
}
+/** Apply batch normalization on tensors with NHWC format.
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: QS8/QS16/F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
+ * @param[in] mean_ptr Pointer to the mean source tensor. Supported data types: same as @p input_ptr
+ * @param[in] mean_stride_x Stride of the mean source tensor in X dimension (in bytes)
+ * @param[in] mean_step_x mean_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] mean_offset_first_element_in_bytes The offset of the first element in the mean source tensor
+ * @param[in] var_ptr Pointer to the var tensor. Supported data types: same as @p input_ptr
+ * @param[in] var_stride_x Stride of the var tensor in X dimension (in bytes)
+ * @param[in] var_step_x var_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] var_offset_first_element_in_bytes The offset of the first element in the var source tensor
+ * @param[in] beta_ptr Pointer to the beta source tensor. Supported data types: same as @p input_ptr
+ * @param[in] beta_stride_x Stride of the beta source tensor in X dimension (in bytes)
+ * @param[in] beta_step_x beta_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] beta_offset_first_element_in_bytes The offset of the first element in the beta source tensor
+ * @param[in] gamma_ptr Pointer to the gamma source tensor. Supported data types: same as @p input_ptr
+ * @param[in] gamma_stride_x Stride of the gamma source tensor in X dimension (in bytes)
+ * @param[in] gamma_step_x gamma_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] gamma_offset_first_element_in_bytes The offset of the first element in the gamma source tensor
+ * @param[in] epsilon Epsilon parameter in the batch normalization equation
+ */
+__kernel void batchnormalization_layer_nhwc(TENSOR3D_DECLARATION(input),
+#ifndef IN_PLACE
+ TENSOR3D_DECLARATION(output),
+#endif /* not IN_PLACE */
+ VECTOR_DECLARATION(mean),
+ VECTOR_DECLARATION(var),
+#ifndef USE_DEFAULT_BETA
+ VECTOR_DECLARATION(beta),
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+ VECTOR_DECLARATION(gamma),
+#endif /* USE_DEFAULT_GAMMA */
+ float epsilon)
+{
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
+#ifdef IN_PLACE
+ Tensor3D out = in;
+#else /* IN_PLACE */
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+#endif /* IN_PLACE */
+ Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
+ Vector var = CONVERT_TO_VECTOR_STRUCT(var);
+#ifndef USE_DEFAULT_BETA
+ Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+ Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma);
+#endif /* USE_DEFAULT_GAMMA */
+
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ data = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ denominator = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ numerator = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ x_bar = 0;
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ res = 0;
+
+ const int current_slice = get_global_id(0);
+
+ data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)in.ptr);
+ denominator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(var.ptr + current_slice * VEC_SIZE * var.stride_x));
+ denominator = INVSQRT_OP(ADD_OP(denominator, ((VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(epsilon))));
+
+ // Calculate x bar and store results
+ numerator = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(mean.ptr + current_slice * VEC_SIZE * mean.stride_x));
+ numerator = SUB_OP(data, numerator);
+ x_bar = MUL_OP(numerator, denominator);
+
+#ifndef USE_DEFAULT_GAMMA
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ gamma_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(gamma.ptr + current_slice * VEC_SIZE * gamma.stride_x));
+
+ res = MUL_OP(gamma_vec, x_bar);
+#else /* USE_DEFAULT_GAMMA */
+ // gamma is equal to 1, no need to perform multiplications
+ res = x_bar;
+#endif /* USE_DEFAULT_GAMMA */
+
+#ifndef USE_DEFAULT_BETA
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ beta_vec = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)(beta.ptr + current_slice * VEC_SIZE * beta.stride_x));
+ // beta is not zero, hence we need to perform the addition
+ res = ADD_OP(res, beta_vec);
+#endif /* USE_DEFAULT_BETA */
+
+ res = ACTIVATION_FUNC(res);
+
+ VSTORE(VEC_SIZE)
+ (res, 0, (__global DATA_TYPE *)out.ptr);
+}
#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) */
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 62f21eed96..3a2211c522 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -49,6 +49,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var);
+ ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(get_data_layout_dimension_index(input->data_layout(), DataLayoutDimension::CHANNEL)) != mean->dimension(0));
if(beta != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta);
@@ -62,7 +63,6 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, gamma);
}
- ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != mean->dimension(0));
if(act_info.enabled())
{
ActivationLayerInfo::ActivationFunction act = act_info.activation();
@@ -75,6 +75,7 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
if(output != nullptr && output->total_size() != 0)
{
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
}
@@ -152,7 +153,7 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
build_opts.add_option_if(gamma == nullptr, "-DUSE_DEFAULT_GAMMA");
// Create kernel
- _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts.options()));
+ _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batchnormalization_layer_" + lower_string(string_from_data_layout(input->info()->data_layout())), build_opts.options()));
// Set kernel static arguments
unsigned int include_output = (!_run_in_place) ? 1 : 0;
@@ -173,6 +174,8 @@ void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *out
ICLKernel::configure(win_config.second);
_config_id = "batch_normalization_layer_";
+ _config_id += string_from_data_layout(input->info()->data_layout());
+ _config_id += "_";
_config_id += string_from_data_type(input->info()->data_type());
_config_id += "_";
_config_id += support::cpp11::to_string(input->info()->dimension(0));
diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp
index 4a237f9daa..b5663e63e4 100644
--- a/src/core/Utils.cpp
+++ b/src/core/Utils.cpp
@@ -126,6 +126,18 @@ const std::string &arm_compute::string_from_channel(Channel channel)
return channels_map[channel];
}
+const std::string &arm_compute::string_from_data_layout(DataLayout dl)
+{
+ static std::map<DataLayout, const std::string> dl_map =
+ {
+ { DataLayout::UNKNOWN, "UNKNOWN" },
+ { DataLayout::NCHW, "NCHW" },
+ { DataLayout::NHWC, "NHWC" },
+ };
+
+ return dl_map[dl];
+}
+
const std::string &arm_compute::string_from_data_type(DataType dt)
{
static std::map<DataType, const std::string> dt_map =
diff --git a/tests/benchmark/CL/BatchNormalizationLayer.cpp b/tests/benchmark/CL/BatchNormalizationLayer.cpp
index 3d11aea1e7..9ae80a8854 100644
--- a/tests/benchmark/CL/BatchNormalizationLayer.cpp
+++ b/tests/benchmark/CL/BatchNormalizationLayer.cpp
@@ -56,7 +56,7 @@ REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, CLBatchNormali
framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
data_types),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
@@ -65,7 +65,7 @@ REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizat
framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
framework::dataset::make("Batches", 1)));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
@@ -74,7 +74,7 @@ REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLB
framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
framework::dataset::make("Batches", 1)));
TEST_SUITE(NIGHTLY)
@@ -85,7 +85,7 @@ REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, CLBatchNormali
framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
data_types),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
@@ -94,7 +94,7 @@ REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizat
framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
framework::dataset::make("Batches", { 4, 8 })));
REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
@@ -103,7 +103,7 @@ REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLB
framework::dataset::make("UseBeta", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
data_types),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
framework::dataset::make("Batches", { 4, 8 })));
TEST_SUITE_END()
TEST_SUITE_END()
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index 6190e67dba..f6dc6b377c 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -66,7 +66,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combi
combine(framework::dataset::make("UseBeta", { false, true }),
framework::dataset::make("UseGamma", { false, true }))),
framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
shape0, shape1, epsilon, use_gamma, use_beta, dt, data_layout)
{
// Set fixed point position data type allowed
@@ -168,7 +168,7 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framewor
framework::dataset::make("UseGamma", { false, true }))),
act_infos),
framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32, 0);
@@ -181,7 +181,7 @@ FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework
framework::dataset::make("UseGamma", { false, true }))),
framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
framework::dataset::make("DataType", DataType::F16)),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16, 0);