From 409ee0a69799364797263d13dd95936c851bfe80 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 18 Aug 2017 10:16:09 +0100 Subject: COMPMID-417: Add in-place support for batch-normalization. Change-Id: I4b0c9348f3bc2addc198a76fadd1b583abf42b60 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/84434 Tested-by: Kaizen Reviewed-by: Michalis Spyrou Reviewed-by: Gian Marco Iodice --- .../CL/kernels/CLBatchNormalizationLayerKernel.h | 23 +++++---- .../NEON/kernels/NEBatchNormalizationLayerKernel.h | 25 +++++----- .../CL/functions/CLBatchNormalizationLayer.h | 21 ++++---- .../runtime/CL/functions/CLConvolutionLayer.h | 2 - .../NEON/functions/NEBatchNormalizationLayer.h | 21 ++++---- src/core/CL/cl_kernels/batchnormalization_layer.cl | 18 ++++--- .../CL/kernels/CLBatchNormalizationLayerKernel.cpp | 54 +++++++++++++-------- .../kernels/NEBatchNormalizationLayerKernel.cpp | 56 ++++++++++++---------- src/core/NEON/kernels/NESoftmaxLayerKernel.cpp | 1 - .../CL/functions/CLBatchNormalizationLayer.cpp | 2 +- .../NEON/functions/NEBatchNormalizationLayer.cpp | 2 +- 11 files changed, 132 insertions(+), 93 deletions(-) diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h index 6df7ae4fc7..add1dfbb8c 100644 --- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h +++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h @@ -50,22 +50,25 @@ public: /** Set the input and output tensors. * - * @param[in] input Source tensor. 3 lower dimensions represent a single input with dimensions [width, height, FM]. Data types supported: QS8/QS16/F32. - * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input - * The rest are optional and used for representing batches. - * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] epsilon Small value to avoid division with zero. + * @note If the output tensor is a nullptr, the batch normalization function will be performed in-place + * + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result. + * 3 lower dimensions represent a single input with dimensions [width, height, FM]. + * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. + * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] epsilon Small value to avoid division with zero. + * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input */ - void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon); + void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon); // Inherited methods overridden: void run(const Window &window, cl::CommandQueue &queue) override; private: - const ICLTensor *_input; + ICLTensor *_input; ICLTensor *_output; const ICLTensor *_mean; const ICLTensor *_var; diff --git a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h index 29fcbd26a0..8ac70be727 100644 --- a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h +++ b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h @@ -49,24 +49,27 @@ public: ~NEBatchNormalizationLayerKernel() = default; /** Set the input and output tensors. * - * @param[in] input Source tensor. 3 lower dimensions represent a single input with dimensions [width, height, FM]. - * The rest are optional and used for representing batches. Data types supported: QS8/F32. - * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] epsilon Small value to avoid division with zero. - * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input + * @note If the output tensor is a nullptr, the batch normalization function will be performed in-place + * + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result. + * 3 lower dimensions represent a single input with dimensions [width, height, FM]. + * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. + * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] epsilon Small value to avoid division with zero. + * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input */ - void configure(const ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon); + void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon); // Inherited methods overridden: void run(const Window &window) override; private: - using BatchNormFunction = void(const ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window); + using BatchNormFunction = void(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window); BatchNormFunction *_func; - const ITensor *_input; + ITensor *_input; ITensor *_output; const ITensor *_mean; const ITensor *_var; diff --git a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h index 882786f1d6..ffb66bee60 100644 --- a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h +++ b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h @@ -46,16 +46,19 @@ public: CLBatchNormalizationLayer(); /** Set the input and output tensors. * - * @param[in] input Source tensor. 3 lower dimensions represent a single input with dimensions [width, height, FM]. - * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F32. - * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] epsilon Small value to avoid division with zero. - * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input + * @note If the output tensor is a nullptr, the batch normalization function will be performed in-place + * + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result. + * 3 lower dimensions represent a single input with dimensions [width, height, FM]. + * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. + * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] epsilon Small value to avoid division with zero. + * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input */ - void configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon); + void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon); // Inherited methods overridden: void run() override; diff --git a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h index aba88bd856..2057b6ff8a 100644 --- a/arm_compute/runtime/CL/functions/CLConvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLConvolutionLayer.h @@ -34,8 +34,6 @@ #include "arm_compute/core/CL/kernels/CLIm2ColKernel.h" #include "arm_compute/core/CL/kernels/CLWeightsReshapeKernel.h" #include "arm_compute/core/Types.h" -#include "arm_compute/core/Types.h" -#include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/CL/CLTensor.h" namespace arm_compute diff --git a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h index b0b5c122cb..041b9e7290 100644 --- a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h +++ b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h @@ -45,16 +45,19 @@ public: NEBatchNormalizationLayer(); /** Set the input and output tensors. * - * @param[in] input Source tensor. 3 lower dimensions represent a single input with dimensions [width, height, FM]. - * The rest are optional and used for representing batches. Data types supported: QS8/F32. - * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input - * @param[in] epsilon Small value to avoid division with zero. - * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input + * @note If the output tensor is a nullptr, the batch normalization function will be performed in-place + * + * @param[in, out] input Source tensor. In case of @p output tensor = nullptr, this tensor will store the result. + * 3 lower dimensions represent a single input with dimensions [width, height, FM]. + * The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32. + * @param[in] mean Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] var Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] gamma Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] beta Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input + * @param[in] epsilon Small value to avoid division with zero. + * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input */ - void configure(const ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon); + void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon); // Inherited methods overridden: void run() override; diff --git a/src/core/CL/cl_kernels/batchnormalization_layer.cl b/src/core/CL/cl_kernels/batchnormalization_layer.cl index cb4d0c8947..904d5b3045 100644 --- a/src/core/CL/cl_kernels/batchnormalization_layer.cl +++ b/src/core/CL/cl_kernels/batchnormalization_layer.cl @@ -80,19 +80,25 @@ * @param[in] epsilon Epsilon parameter in the batch normalization equation */ __kernel void batchnormalization_layer(TENSOR3D_DECLARATION(input), +#ifndef IN_PLACE TENSOR3D_DECLARATION(output), +#endif /* not IN_PLACE */ VECTOR_DECLARATION(mean), VECTOR_DECLARATION(var), VECTOR_DECLARATION(beta), VECTOR_DECLARATION(gamma), float epsilon) { - Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input); - Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output); - Vector mean = CONVERT_TO_VECTOR_STRUCT(mean); - Vector var = CONVERT_TO_VECTOR_STRUCT(var); - Vector beta = CONVERT_TO_VECTOR_STRUCT(beta); - Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma); + Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input); +#ifdef IN_PLACE + Tensor3D out = in; +#else /* IN_PLACE */ + Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output); +#endif /* IN_PLACE */ + Vector mean = CONVERT_TO_VECTOR_STRUCT(mean); + Vector var = CONVERT_TO_VECTOR_STRUCT(var); + Vector beta = CONVERT_TO_VECTOR_STRUCT(beta); + Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma); VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE) _in = 0; diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp index 02bf35a860..18c0c9721e 100644 --- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp +++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp @@ -42,20 +42,10 @@ CLBatchNormalizationLayerKernel::CLBatchNormalizationLayerKernel() { } -void CLBatchNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, +void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F32); - ARM_COMPUTE_ERROR_ON_NULLPTR(output); - - // Output tensor auto initialization if not yet initialized - auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position()); - - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, mean, var, beta, gamma); - ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma); - ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); - ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != mean->info()->dimension(0)); _input = input; _output = output; @@ -65,12 +55,31 @@ void CLBatchNormalizationLayerKernel::configure(const ICLTensor *input, ICLTenso _gamma = gamma; _epsilon = epsilon; + if(output != nullptr) + { + // Output tensor auto initialization if not yet initialized + auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position()); + + ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, mean, var, beta, gamma); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma); + } + else + { + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var, beta, gamma); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var, beta, gamma); + } + + ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma); + ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != mean->info()->dimension(0)); + const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size(); // Set build options std::set build_opts; build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()))); build_opts.emplace(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration))); + build_opts.emplace(output == nullptr ? "-DIN_PLACE" : ""); if(is_data_type_fixed_point(input->info()->data_type())) { build_opts.emplace("-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position())); @@ -84,14 +93,18 @@ void CLBatchNormalizationLayerKernel::configure(const ICLTensor *input, ICLTenso _kernel.setArg(idx++, _epsilon); // Configure kernel window - Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); - + Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); - - update_window_and_padding(win, input_access, output_access); - output_access.set_valid_region(win, input->info()->valid_region()); - + if(output != nullptr) + { + AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); + update_window_and_padding(win, input_access, output_access); + output_access.set_valid_region(win, input->info()->valid_region()); + } + else + { + update_window_and_padding(win, input_access); + } ICLKernel::configure(win); } @@ -115,7 +128,10 @@ void CLBatchNormalizationLayerKernel::run(const Window &window, cl::CommandQueue { idx = 0; add_3D_tensor_argument(idx, _input, slice); - add_3D_tensor_argument(idx, _output, slice); + if(_output != nullptr) + { + add_3D_tensor_argument(idx, _output, slice); + } enqueue(queue, *this, slice); } while(window.slide_window_slice_3D(slice)); diff --git a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp index 290a3c59ba..66f174e883 100644 --- a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp +++ b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp @@ -38,7 +38,7 @@ NEBatchNormalizationLayerKernel::NEBatchNormalizationLayerKernel() { } -void batch_normalization_q8(const ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window) +void batch_normalization_q8(ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window) { Iterator input(in, window); Iterator output(out, window); @@ -82,7 +82,7 @@ void batch_normalization_q8(const ITensor *in, ITensor *out, const ITensor *mean input, output); } -void batch_normalization_q16(const ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window) +void batch_normalization_q16(ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window) { Iterator input(in, window); Iterator output(out, window); @@ -126,7 +126,7 @@ void batch_normalization_q16(const ITensor *in, ITensor *out, const ITensor *mea input, output); } -void batch_normalization_fp32(const ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window) +void batch_normalization_fp32(ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window) { Iterator input(in, window); Iterator output(out, window); @@ -170,7 +170,7 @@ void batch_normalization_fp32(const ITensor *in, ITensor *out, const ITensor *me } #ifdef ARM_COMPUTE_ENABLE_FP16 -void batch_normalization_fp16(const ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window) +void batch_normalization_fp16(ITensor *in, ITensor *out, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon, const Window &window) { Iterator input(in, window); Iterator output(out, window); @@ -214,28 +214,33 @@ void batch_normalization_fp16(const ITensor *in, ITensor *out, const ITensor *me } #endif /* ARM_COMPUTE_ENABLE_FP16 */ -void NEBatchNormalizationLayerKernel::configure(const ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon) +void NEBatchNormalizationLayerKernel::configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon) { ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32); - ARM_COMPUTE_ERROR_ON_NULLPTR(output); - - // Output tensor auto initialization if not yet initialized - auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position()); - - ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, mean, var, beta, gamma); - ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma); - ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); - ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma); - ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != mean->info()->dimension(0)); _input = input; - _output = output; + _output = input; _mean = mean; _var = var; _gamma = gamma; _beta = beta; _epsilon = epsilon; + if(output != nullptr) + { + // Output tensor auto initialization if not yet initialized + auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position()); + + ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output); + + _output = output; + } + + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, mean, var, beta, gamma); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma); + ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma); + ARM_COMPUTE_ERROR_ON(input->info()->dimension(2) != mean->info()->dimension(0)); + unsigned int num_elems_processed_per_iteration = 0; switch(input->info()->data_type()) @@ -263,15 +268,18 @@ void NEBatchNormalizationLayerKernel::configure(const ITensor *input, ITensor *o break; } - Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); - + Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration)); AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration); - AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); - - update_window_and_padding(win, input_access, output_access); - - output_access.set_valid_region(win, input->info()->valid_region()); - + if(output != nullptr) + { + AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration); + update_window_and_padding(win, input_access, output_access); + output_access.set_valid_region(win, input->info()->valid_region()); + } + else + { + update_window_and_padding(win, input_access); + } INEKernel::configure(win); } diff --git a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp index 176e3d688e..4fed16b5fa 100644 --- a/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp +++ b/src/core/NEON/kernels/NESoftmaxLayerKernel.cpp @@ -26,7 +26,6 @@ #include "arm_compute/core/AccessWindowStatic.h" #include "arm_compute/core/Error.h" #include "arm_compute/core/Helpers.h" -#include "arm_compute/core/Helpers.h" #include "arm_compute/core/ITensor.h" #include "arm_compute/core/NEON/NEFixedPoint.h" #include "arm_compute/core/NEON/NEMath.h" diff --git a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp index 3df673c6a6..68cdaac812 100644 --- a/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp +++ b/src/runtime/CL/functions/CLBatchNormalizationLayer.cpp @@ -37,7 +37,7 @@ CLBatchNormalizationLayer::CLBatchNormalizationLayer() { } -void CLBatchNormalizationLayer::configure(const ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon) +void CLBatchNormalizationLayer::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon) { _norm_kernel.configure(input, output, mean, var, beta, gamma, epsilon); } diff --git a/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp b/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp index a24429c6de..ef79b02048 100644 --- a/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp +++ b/src/runtime/NEON/functions/NEBatchNormalizationLayer.cpp @@ -37,7 +37,7 @@ NEBatchNormalizationLayer::NEBatchNormalizationLayer() { } -void NEBatchNormalizationLayer::configure(const ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon) +void NEBatchNormalizationLayer::configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon) { // Configure kernel _norm_kernel.configure(input, output, mean, var, beta, gamma, epsilon); -- cgit v1.2.1