aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGian Marco Iodice <gianmarco.iodice@arm.com>2017-08-10 10:43:40 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:35:24 +0000
commitd60a6b9d7977c6bd63ff7c523bed84d42363898b (patch)
tree4b1ef99dfd76883060688dcaadbadaaf5c14cf6d
parent4e09b3839206254d0df56095ad0762718a764c9c (diff)
downloadComputeLibrary-d60a6b9d7977c6bd63ff7c523bed84d42363898b.tar.gz
COMPMID-477 - Optimized CLNormalizationLayer
CLPixelWiseMultiplication has been removed within the function Change-Id: Ibe7edd7921d5cef6ff68fdeeca89771129a8eaea Reviewed-on: http://mpd-gerrit.cambridge.arm.com/84459 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h13
-rw-r--r--arm_compute/runtime/CL/functions/CLNormalizationLayer.h17
-rw-r--r--src/core/CL/cl_kernels/normalization_layer.cl120
-rw-r--r--src/core/CL/kernels/CLNormalizationLayerKernel.cpp25
-rw-r--r--src/runtime/CL/functions/CLNormalizationLayer.cpp21
5 files changed, 82 insertions, 114 deletions
diff --git a/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
index 5eedc31486..5f8c9c9d07 100644
--- a/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLNormalizationLayerKernel.h
@@ -48,14 +48,12 @@ public:
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data types supported: QS8/QS16/F16/F32.
- * @param[in] squared_input Source with each element has been squared. 3 lower dims represent a single input with dimensions [width, height, IFM].
- * Data types supported: same as @p input.
- * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data types supported: same as @p input.
- * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
+ * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
+ * and an optional 4th dimension for batch of inputs. Data types supported: QS8/QS16/F16/F32.
+ * @param[out] output Destination tensor. Output will have the same number of dimensions as input. Data types supported: same as @p input.
+ * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
- void configure(const ICLTensor *input, const ICLTensor *squared_input, ICLTensor *output, NormalizationLayerInfo norm_info);
+ void configure(const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info);
// Inherited methods overridden:
void run(const Window &window, cl::CommandQueue &queue) override;
@@ -63,7 +61,6 @@ public:
private:
const ICLTensor *_input;
- const ICLTensor *_squared_input;
ICLTensor *_output;
BorderSize _border_size;
bool _is_in_map;
diff --git a/arm_compute/runtime/CL/functions/CLNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
index a4dae85c1d..0818cec2e5 100644
--- a/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLNormalizationLayer.h
@@ -39,7 +39,6 @@ class ICLTensor;
/** Basic function to simulate a normalization layer. This function calls the following CL kernels:
*
- * -# @ref CLPixelWiseMultiplicationKernel
* -# @ref CLFillBorderKernel
* -# @ref CLNormalizationLayerKernel
*
@@ -51,21 +50,19 @@ public:
CLNormalizationLayer();
/** Set the input and output tensors.
*
- * @param[in] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
- * and an optional 4th dimension for batch of inputs. Data types supported: F16, F32. Number of channels must be 1.
- * @param[out] output Destination tensor. Dimensions, data type and number of channels must match the input ones.
- * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
+ * @param[in, out] input Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
+ * and an optional 4th dimension for batch of inputs. Data types supported: F16/F32 (Written to by the border handler)
+ * @param[out] output Destination tensor. Dimensions, data type and number of channels must match the input ones.
+ * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
*/
- void configure(const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info);
+ void configure(ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info);
// Inherited methods overridden:
void run() override;
private:
- CLTensor _squared_input; /**< The intermediate buffer which stores results of squaring input*/
- CLNormalizationLayerKernel _norm_kernel; /**< Normalization layer kernel to run */
- CLPixelWiseMultiplicationKernel _multiply_kernel; /**< Pixel multiplication kernel to run */
- CLFillBorderKernel _border_handler; /**< Kernel to handle borders */
+ CLNormalizationLayerKernel _norm_kernel; /**< Normalization layer kernel to run */
+ CLFillBorderKernel _border_handler; /**< Kernel to handle borders */
};
}
#endif /* __ARM_COMPUTE_CLNORMALIZATIONLAYER_H__ */
diff --git a/src/core/CL/cl_kernels/normalization_layer.cl b/src/core/CL/cl_kernels/normalization_layer.cl
index e2a5c4079a..4e65560b95 100644
--- a/src/core/CL/cl_kernels/normalization_layer.cl
+++ b/src/core/CL/cl_kernels/normalization_layer.cl
@@ -54,43 +54,33 @@
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size, e.g. -DVEC_SIZE=16
+ * @note The radius should be given as a preprocessor argument using -DRADIUS=size. e.g. -DRADIUS=5
+ * @note The number of slices should be given as a preprocessor argument using -DNUM_SLICES=size. e.g. -DNUM_SLICES=192
* @note In case of fixed-point operation -DFIXED_POINT_POSITION=fixed_point_position must be provided: e.g. -DFIXED_POINT_POSITION=3
* @note Scaling coefficient (= alpha/norm_size), beta and kappa need to be passed at compile time using -DCOEFF, -DALPHA and -DKAPPA
*
- * @param[in] input_ptr Pointer to the first source tensor. Supported data types: QS8/QS16/F16/F32
- * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[in] squared_input_ptr Pointer to the second source tensor. Supported data types: same as @p input_ptr
- * @param[in] squared_input_stride_x Stride of the second source tensor in X dimension (in bytes)
- * @param[in] squared_input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] squared_input_stride_y Stride of the second source tensor in Y dimension (in bytes)
- * @param[in] squared_input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] squared_input_stride_z Stride of the second source tensor in Z dimension (in bytes)
- * @param[in] squared_input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] squared_input_offset_first_element_in_bytes The offset of the second element in the second source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] radius Number of elements on the right or left side to normalize across
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: QS8/QS16/F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(squared_input),
- TENSOR3D_DECLARATION(output),
- uint radius)
+ TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D squared_in = CONVERT_TO_TENSOR3D_STRUCT(squared_input);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
acc = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0;
@@ -101,15 +91,16 @@ __kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
const VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
kappa_v = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))SQCVT_SAT(KAPPA);
- const int num_of_slices = get_global_size(2);
const int current_slice = get_global_id(2);
- const int left_slice = max(current_slice - (int)radius, (int)0);
- const int right_slice = min(current_slice + (int)radius, (int)(num_of_slices - 1));
+ const int left_slice = max(current_slice - (int)RADIUS, (int)0);
+ const int right_slice = min(current_slice + (int)RADIUS, (int)(NUM_SLICES - 1));
for(int i = left_slice; i <= right_slice; i++)
{
- acc = ADD_OP(acc, LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&squared_in, 0, 0, i - current_slice)));
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, 0, 0, i - current_slice));
+ acc = ADD_OP(acc, MUL_OP(values, values));
}
acc = ADD_OP(MUL_OP(acc, coeff_v), kappa_v);
@@ -125,43 +116,32 @@ __kernel void normalization_layer_cross_map(TENSOR3D_DECLARATION(input),
*
* @note Datatype should be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
* @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size, e.g. -DVEC_SIZE=16
+ * @note The radius should be given as a preprocessor argument using -DRADIUS=size. e.g. -DRADIUS=5
* @note In case of fixed-point operation -DFIXED_POINT_POSITION=fixed_point_position must be provided: e.g. -DFIXED_POINT_POSITION=3
* @note Scaling coefficient (= alpha/norm_size), beta and kappa need to be passed at compile time using -DCOEFF, -DALPHA and -DKAPPA
*
- * @param[in] input_ptr Pointer to the first source tensor. Supported data types: QS8/F16/F32
- * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
- * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
- * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
- * @param[in] squared_input_ptr Pointer to the second source tensor. Supported data types: same as @p input_ptr
- * @param[in] squared_input_stride_x Stride of the second source tensor in X dimension (in bytes)
- * @param[in] squared_input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] squared_input_stride_y Stride of the second source tensor in Y dimension (in bytes)
- * @param[in] squared_input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] squared_input_stride_z Stride of the second source tensor in Z dimension (in bytes)
- * @param[in] squared_input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] squared_input_offset_first_element_in_bytes The offset of the second element in the second source tensor
- * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
- * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
- * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
- * @param[in] output_stride_y Stride of the first destination tensor in Y dimension (in bytes)
- * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
- * @param[in] output_stride_z Stride of the first source tensor in Z dimension (in bytes)
- * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
- * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[in] radius Number of elements on the right or left side to normalize across
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: QS8/F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y Stride of the first destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] output_step_z output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes The offset of the first element in the destination tensor
*/
__kernel void normalization_layer_in_map_1D(TENSOR3D_DECLARATION(input),
- TENSOR3D_DECLARATION(squared_input),
- TENSOR3D_DECLARATION(output),
- uint radius)
+ TENSOR3D_DECLARATION(output))
{
- Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
- Tensor3D squared_in = CONVERT_TO_TENSOR3D_STRUCT(squared_input);
- Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
+ Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
acc = (VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE))0;
@@ -174,12 +154,14 @@ __kernel void normalization_layer_in_map_1D(TENSOR3D_DECLARATION(input),
const int current_pos = get_global_id(0) << 2;
- const int left_pos = max(current_pos - (int)radius, -3);
- const int right_pos = min(current_pos + (int)radius, (int)((get_global_size(0) << 2) + 3 - 1));
+ const int left_pos = max(current_pos - (int)RADIUS, -3);
+ const int right_pos = min(current_pos + (int)RADIUS, (int)((get_global_size(0) << 2) + 3 - 1));
for(int i = left_pos; i <= right_pos; i += 1)
{
- acc = ADD_OP(acc, LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&squared_in, i - current_pos, 0, 0)));
+ VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+ values = LOAD_OP(0, (__global DATA_TYPE *)tensor3D_offset(&in, i - current_pos, 0, 0));
+ acc = ADD_OP(acc, MUL_OP(values, values));
}
acc = ADD_OP(MUL_OP(acc, coeff_v), kappa_v);
diff --git a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
index b382e9d510..a74473980b 100644
--- a/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLNormalizationLayerKernel.cpp
@@ -36,7 +36,7 @@
using namespace arm_compute;
CLNormalizationLayerKernel::CLNormalizationLayerKernel()
- : _input(nullptr), _squared_input(nullptr), _output(nullptr), _border_size(0), _is_in_map(false)
+ : _input(nullptr), _output(nullptr), _border_size(0), _is_in_map(false)
{
}
@@ -45,7 +45,7 @@ BorderSize CLNormalizationLayerKernel::border_size() const
return _border_size;
}
-void CLNormalizationLayerKernel::configure(const ICLTensor *input, const ICLTensor *squared_input, ICLTensor *output, NormalizationLayerInfo norm_info)
+void CLNormalizationLayerKernel::configure(const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info)
{
ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
ARM_COMPUTE_ERROR_ON_NULLPTR(output);
@@ -53,21 +53,20 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, const ICLTens
// Output tensor auto initialization if not yet initialized
auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
- ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, squared_input, output);
- ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, squared_input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
ARM_COMPUTE_ERROR_ON_MSG(!(norm_info.norm_size() % 2), "Normalization size should be odd");
ARM_COMPUTE_ERROR_ON_MSG(norm_info.type() == NormType::IN_MAP_2D, "2D In-Map Normalization not implemented");
if(is_data_type_fixed_point(input->info()->data_type()))
{
- ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, squared_input, output);
+ ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
ARM_COMPUTE_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(norm_info.beta(), input);
ARM_COMPUTE_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(norm_info.kappa(), input);
ARM_COMPUTE_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(norm_info.scale_coeff(), input);
}
- _input = input;
- _squared_input = squared_input;
- _output = output;
+ _input = input;
+ _output = output;
_is_in_map = (norm_info.type() != NormType::CROSS_MAP);
const unsigned int border_width = _is_in_map ? std::min(norm_info.norm_size() / 2, 3U) : 0;
@@ -87,23 +86,20 @@ void CLNormalizationLayerKernel::configure(const ICLTensor *input, const ICLTens
build_opts.emplace(("-DBETA=" + float_to_string_with_full_precision(norm_info.beta())));
build_opts.emplace(("-DKAPPA=" + float_to_string_with_full_precision(norm_info.kappa())));
build_opts.emplace(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+ build_opts.emplace(("-DRADIUS=" + support::cpp11::to_string(norm_info.norm_size() / 2)));
+ build_opts.emplace(("-DNUM_SLICES=" + support::cpp11::to_string(input->info()->dimension(2))));
// Create kernel
std::string kernel_name = (norm_info.type() == NormType::IN_MAP_1D) ? "normalization_layer_in_map_1D" : "normalization_layer_cross_map";
_kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts));
- // Set kernel static arguments
- unsigned int idx = 3 * num_arguments_per_3D_tensor(); // Skip the input and output parameters
- _kernel.setArg<cl_uint>(idx++, norm_info.norm_size() / 2);
-
// Configure kernel window
Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
AccessWindowHorizontal input_access(input->info(), -_border_size.left, num_elems_read_per_iteration);
- AccessWindowHorizontal squared_input_access(squared_input->info(), -_border_size.left, num_elems_read_per_iteration);
AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
- update_window_and_padding(win, input_access, squared_input_access, output_access);
+ update_window_and_padding(win, input_access, output_access);
output_access.set_valid_region(win, input->info()->valid_region());
@@ -123,7 +119,6 @@ void CLNormalizationLayerKernel::run(const Window &window, cl::CommandQueue &que
{
unsigned int idx = 0;
add_3D_tensor_argument(idx, _input, slice);
- add_3D_tensor_argument(idx, _squared_input, slice);
add_3D_tensor_argument(idx, _output, slice);
enqueue(queue, *this, slice);
}
diff --git a/src/runtime/CL/functions/CLNormalizationLayer.cpp b/src/runtime/CL/functions/CLNormalizationLayer.cpp
index 69cef334e8..f4bd49406c 100644
--- a/src/runtime/CL/functions/CLNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLNormalizationLayer.cpp
@@ -33,29 +33,26 @@
using namespace arm_compute;
CLNormalizationLayer::CLNormalizationLayer()
- : _squared_input(), _norm_kernel(), _multiply_kernel(), _border_handler()
+ : _norm_kernel(), _border_handler()
{
}
-void CLNormalizationLayer::configure(const ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info)
+void CLNormalizationLayer::configure(ICLTensor *input, ICLTensor *output, NormalizationLayerInfo norm_info)
{
ARM_COMPUTE_ERROR_ON(input == nullptr);
- TensorInfo tensor_info(input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
- _squared_input.allocator()->init(tensor_info);
+ // Configure normalization kernel
+ _norm_kernel.configure(input, output, norm_info);
- _norm_kernel.configure(input, &_squared_input, output, norm_info);
- _multiply_kernel.configure(input, input, &_squared_input, 1.0f, ConvertPolicy::SATURATE, RoundingPolicy::TO_NEAREST_EVEN);
// Fill the border by 3 elements since we need vload4 in the IN_MAP normalization kernel
- _border_handler.configure(&_squared_input, _norm_kernel.border_size(), BorderMode::CONSTANT, PixelValue(0));
-
- // Allocate intermediate buffers
- _squared_input.allocator()->allocate();
+ _border_handler.configure(input, _norm_kernel.border_size(), BorderMode::CONSTANT, PixelValue(0));
}
void CLNormalizationLayer::run()
{
- CLScheduler::get().enqueue(_multiply_kernel, false);
+ // Run border handler
CLScheduler::get().enqueue(_border_handler, false);
- CLScheduler::get().enqueue(_norm_kernel, false);
+
+ // Run normalization kernel
+ CLScheduler::get().enqueue(_norm_kernel);
}