From 368e63507ad62dc1607f752302d8db6b7d603f71 Mon Sep 17 00:00:00 2001 From: Giorgio Arena Date: Mon, 20 Aug 2018 15:06:07 +0100 Subject: COMPMID-1047 Extract Flatten function from Im2Col for NEON Change-Id: I80f3aaadc8cae8c9ca1a5a239e79bda302b89bd8 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/144813 Tested-by: Jenkins Reviewed-by: Gian Marco Iodice --- .../NEON/functions/NEFullyConnectedLayer.cpp | 32 +++++++++++----------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'src/runtime/NEON/functions/NEFullyConnectedLayer.cpp') diff --git a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp index f1606aa93e..60f6294394 100644 --- a/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp +++ b/src/runtime/NEON/functions/NEFullyConnectedLayer.cpp @@ -74,8 +74,8 @@ Status NEFullyConnectedLayerReshapeWeights::validate(const ITensorInfo *input, c } NEFullyConnectedLayer::NEFullyConnectedLayer(std::shared_ptr memory_manager) - : _memory_group(std::move(memory_manager)), _im2col_kernel(), _convert_weights(), _reshape_weights_function(), _mm_gemm(), _mm_gemmlowp(), _gemmlowp_output_stage(), _accumulate_biases_kernel(), - _im2col_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false), + : _memory_group(std::move(memory_manager)), _flatten_kernel(), _convert_weights(), _reshape_weights_function(), _mm_gemm(), _mm_gemmlowp(), _gemmlowp_output_stage(), _accumulate_biases_kernel(), + _flatten_output(), _gemmlowp_output(), _converted_weights_output(), _reshape_weights_output(), _original_weights(nullptr), _are_weights_converted(true), _are_weights_reshaped(false), _is_fc_after_conv(false), _accumulate_biases(false), _is_quantized(false), _is_prepared(false) { } @@ -112,19 +112,19 @@ void NEFullyConnectedLayer::configure_conv_fc(const ITensor *input, const ITenso // If the fully connected layer is called after a convolution layer, the input tensor must be linearized - // Initialize output tensor for im2col - TensorShape shape_im2col = compute_flatten_shape(input->info()); - _im2col_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_im2col)); + // Initialize output tensor for flatten + TensorShape shape_flatten = compute_flatten_shape(input->info()); + _flatten_output.allocator()->init(input->info()->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(shape_flatten)); - // Configure im2col kernel - _memory_group.manage(&_im2col_output); - _im2col_kernel.configure(input, &_im2col_output, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false, Size2D(1U, 1U), 1, true); + // Configure flatten kernel + _memory_group.manage(&_flatten_output); + _flatten_kernel.configure(input, &_flatten_output); // Configure matrix multiply kernel - configure_mm(&_im2col_output, weights, output); + configure_mm(&_flatten_output, weights, output); - // Allocate the output tensor for im2col once all the configure methods have been called - _im2col_output.allocator()->allocate(); + // Allocate the output tensor for flatten once all the configure methods have been called + _flatten_output.allocator()->allocate(); } void NEFullyConnectedLayer::configure_fc_fc(const ITensor *input, const ITensor *weights, ITensor *output) @@ -249,7 +249,7 @@ Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorIn bool is_fc_after_conv = true; bool is_quantized = is_data_type_quantized_asymmetric(input->data_type()); - const ITensorInfo &im2col_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input))); + const ITensorInfo &flatten_input = TensorInfo(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(input))); const ITensorInfo &reshaped_weights = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights))); const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone()); const ITensorInfo &gemmlowp_output = TensorInfo(output->clone()->set_is_resizable(true).reset_padding().set_data_type(DataType::S32)); @@ -307,9 +307,9 @@ Status NEFullyConnectedLayer::validate(const ITensorInfo *input, const ITensorIn // Fully Connected layer after a Convolution Layer without batches ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (input->dimension(0) * input->dimension(1) * input->dimension(2)))); - // Validate im2col kernel - ARM_COMPUTE_RETURN_ON_ERROR(NEIm2ColKernel::validate(input, &im2col_input, Size2D(1, 1), PadStrideInfo(1, 1, 0, 0), false, Size2D(1U, 1U), 1, true)); - input_to_use = &im2col_input; + // Validate flatten kernel + ARM_COMPUTE_RETURN_ON_ERROR(NEFlattenLayerKernel::validate(input, &flatten_input)); + input_to_use = &flatten_input; } else { @@ -337,7 +337,7 @@ void NEFullyConnectedLayer::run() // Linearize input if it comes from a convolutional layer if(_is_fc_after_conv) { - NEScheduler::get().schedule(&_im2col_kernel, Window::DimY); + NEScheduler::get().schedule(&_flatten_kernel, Window::DimY); } // Run matrix multiply -- cgit v1.2.1