From afbc5ffb0b567ae93fa2765066bd136d72be88ff Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 3 Oct 2018 14:18:19 +0100 Subject: COMPMID-1621 Deconvolution wrong output calculation Change-Id: Ida71312bcf6dbd854f2ab1efc65f74910c79e152 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/151510 Tested-by: bsgcomp Reviewed-by: Michele DiGiorgio --- src/core/CPP/kernels/CPPFlipWeightsKernel.cpp | 106 +++++++++++++++++++++ src/core/Utils.cpp | 15 ++- src/graph/nodes/DeconvolutionLayerNode.cpp | 6 +- src/runtime/CL/functions/CLDeconvolutionLayer.cpp | 50 +++++++--- .../NEON/functions/NEDeconvolutionLayer.cpp | 50 +++++++--- 5 files changed, 189 insertions(+), 38 deletions(-) create mode 100644 src/core/CPP/kernels/CPPFlipWeightsKernel.cpp (limited to 'src') diff --git a/src/core/CPP/kernels/CPPFlipWeightsKernel.cpp b/src/core/CPP/kernels/CPPFlipWeightsKernel.cpp new file mode 100644 index 0000000000..741218e4f7 --- /dev/null +++ b/src/core/CPP/kernels/CPPFlipWeightsKernel.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h" + +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Types.h" +#include "arm_compute/core/Validate.h" +#include "arm_compute/core/utils/misc/ShapeCalculator.h" + +#include +#include + +using namespace arm_compute; + +CPPFlipWeightsKernel::CPPFlipWeightsKernel() + : _input(nullptr), _output(nullptr), _func(nullptr) +{ +} + +template +void CPPFlipWeightsKernel::flip_weights(const Window &window_input, const Window &window) +{ + // Create iterators + Iterator in(_input, window_input); + + Iterator out(_output, window); + + const int kernel_size = _input->info()->dimension(0); + + execute_window_loop(window_input, [&](const Coordinates & id) + { + *((reinterpret_cast(out.ptr()) + kernel_size * (kernel_size - id.y() - 1) + (kernel_size - id.x() - 1))) = *(reinterpret_cast(in.ptr())); + }, + in, out); +} + +void CPPFlipWeightsKernel::configure(const ITensor *input, ITensor *output) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(input, output); + + _input = input; + _output = output; + + // Configure kernel window + Window win = calculate_max_window(*input->info(), Steps()); + + // The CPPFlipWeightsKernel doesn't need padding so update_window_and_padding() can be skipped + Coordinates coord; + coord.set_num_dimensions(output->info()->num_dimensions()); + output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape())); + + ICPPKernel::configure(win); + + switch(input->info()->data_type()) + { + case DataType::F32: + _func = &CPPFlipWeightsKernel::flip_weights; + break; + case DataType::F16: + _func = &CPPFlipWeightsKernel::flip_weights; + break; + case DataType::QASYMM8: + _func = &CPPFlipWeightsKernel::flip_weights; + break; + default: + ARM_COMPUTE_ERROR("Not supported"); + } +} + +void CPPFlipWeightsKernel::run(const Window &window, const ThreadInfo &info) +{ + ARM_COMPUTE_UNUSED(info); + ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this); + ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window); + ARM_COMPUTE_ERROR_ON(_func == nullptr); + + Window out_window{ window }; + out_window.set(Window::DimX, Window::Dimension(0, 0, 0)); + out_window.set(Window::DimY, Window::Dimension(0, 0, 0)); + + (this->*_func)(window, out_window); +} diff --git a/src/core/Utils.cpp b/src/core/Utils.cpp index 229579d8d9..a6a5771ec1 100644 --- a/src/core/Utils.cpp +++ b/src/core/Utils.cpp @@ -334,17 +334,14 @@ TensorShape arm_compute::deconvolution_output_shape(const std::pair arm_compute::deconvolution_output_dimensions( unsigned int in_width, unsigned int in_height, unsigned int kernel_width, unsigned int kernel_height, unsigned int padx, unsigned int pady, - unsigned int inner_border_right, unsigned int inner_border_top, unsigned int stride_x, unsigned int stride_y) + unsigned int stride_x, unsigned int stride_y) { ARM_COMPUTE_ERROR_ON(in_width < 1 || in_height < 1); - ARM_COMPUTE_ERROR_ON(((in_width - 1) * stride_x + kernel_width + inner_border_right) < 2 * padx); - ARM_COMPUTE_ERROR_ON(((in_height - 1) * stride_y + kernel_height + inner_border_top) < 2 * pady); - const int padx_deconv = (kernel_width - padx - 1); - const int pady_deconv = (kernel_height - pady - 1); - ARM_COMPUTE_ERROR_ON(padx_deconv < 0); - ARM_COMPUTE_ERROR_ON(pady_deconv < 0); - const int w = stride_x * (in_width - 1) + kernel_width + inner_border_right - 2 * padx_deconv; - const int h = stride_y * (in_height - 1) + kernel_height + inner_border_top - 2 * pady_deconv; + ARM_COMPUTE_ERROR_ON(((in_width - 1) * stride_x + kernel_width) < 2 * padx); + ARM_COMPUTE_ERROR_ON(((in_height - 1) * stride_y + kernel_height) < 2 * pady); + const int w = stride_x * (in_width - 1) + kernel_width - 2 * padx; + const int h = stride_y * (in_height - 1) + kernel_height - 2 * pady; + return std::make_pair(w, h); } diff --git a/src/graph/nodes/DeconvolutionLayerNode.cpp b/src/graph/nodes/DeconvolutionLayerNode.cpp index 9329ae3c23..e7ccffd04f 100644 --- a/src/graph/nodes/DeconvolutionLayerNode.cpp +++ b/src/graph/nodes/DeconvolutionLayerNode.cpp @@ -51,8 +51,7 @@ Size2D DeconvolutionLayerNode::inner_border() const TensorDescriptor DeconvolutionLayerNode::compute_output_descriptor(const TensorDescriptor &input_descriptor, const TensorDescriptor &weights_descriptor, - const PadStrideInfo &info, - const Size2D &inner_border) + const PadStrideInfo &info) { unsigned int output_width = 0; unsigned int output_height = 0; @@ -65,7 +64,6 @@ TensorDescriptor DeconvolutionLayerNode::compute_output_descriptor(const TensorD std::tie(output_width, output_height) = deconvolution_output_dimensions(input_width, input_height, kernel_width, kernel_height, info.pad().first, info.pad().second, - inner_border.x(), inner_border.y(), info.stride().first, info.stride().second); TensorDescriptor output_descriptor = input_descriptor; @@ -96,7 +94,7 @@ TensorDescriptor DeconvolutionLayerNode::configure_output(size_t idx) const ARM_COMPUTE_ERROR_ON(src == nullptr || weights == nullptr); - TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info, _inner_border); + TensorDescriptor output_info = compute_output_descriptor(src->desc(), weights->desc(), _info); return output_info; } diff --git a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp index 3f5b8c92dd..26d44e9c96 100644 --- a/src/runtime/CL/functions/CLDeconvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDeconvolutionLayer.cpp @@ -27,6 +27,8 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/CL/CLScheduler.h" +#include "arm_compute/runtime/CPP/CPPScheduler.h" #include #include @@ -38,7 +40,10 @@ CLDeconvolutionLayer::CLDeconvolutionLayer(std::shared_ptr memor : _memory_group(std::move(memory_manager)), _scale_f(), _conv_f(), + _flip_weights(), _scaled_output(), + _weights(), + _weights_flipped(), _is_prepared(false) { } @@ -59,7 +64,7 @@ Status CLDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf ARM_COMPUTE_RETURN_ERROR_ON_MSG(inner_border_top > stride_y - 1, "inner_border_top must be smaller than stride_y"); auto out_dims = deconvolution_output_dimensions(input->dimension(0), input->dimension(1), weights->dimension(0), weights->dimension(1), - info.pad().first, info.pad().second, inner_border_right, inner_border_top, stride_x, stride_y); + info.pad().first, info.pad().second, stride_x, stride_y); const TensorShape output_shape = deconvolution_output_shape(out_dims, input->tensor_shape(), weights->tensor_shape()); @@ -81,8 +86,9 @@ Status CLDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimY) != output_shape.y(), "Output's height is invalid."); ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimZ) != output_shape.z(), "Output's depth is invalid."); - TensorInfo scale_out_info(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_deconvolution_shape(*input, stride_x, stride_y, inner_border_right, inner_border_top, - info))); + TensorInfo scale_out_info(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_deconvolution_shape(*input, *weights, stride_x, stride_y, inner_border_right, + inner_border_top, + out_dims))); const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionLayerUpsample::validate(input, &scale_out_info, BorderSize(inner_border_right, inner_border_top), info)); @@ -91,7 +97,7 @@ Status CLDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf return Status{}; } -void CLDeconvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info, +void CLDeconvolutionLayer::configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info, unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info) { ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output); @@ -99,8 +105,12 @@ void CLDeconvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, const unsigned int stride_x = info.stride().first; const unsigned int stride_y = info.stride().second; + _weights = weights; + _weights_flipped.allocator()->init(TensorInfo(weights->info()->tensor_shape(), 1, weights->info()->data_type())); + _flip_weights.configure(weights, &_weights_flipped); + auto out_dims = deconvolution_output_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0), weights->info()->dimension(1), - info.pad().first, info.pad().second, inner_border_right, inner_border_top, stride_x, stride_y); + info.pad().first, info.pad().second, stride_x, stride_y); const TensorShape output_shape = deconvolution_output_shape(out_dims, input->info()->tensor_shape(), weights->info()->tensor_shape()); @@ -113,22 +123,31 @@ void CLDeconvolutionLayer::configure(ICLTensor *input, const ICLTensor *weights, _is_prepared = false; _memory_group.manage(&_scaled_output); + _memory_group.manage(&_weights_flipped); - // configure scale function - // Init and allocate intermediate tensor for output, same size as input but the first two axis are the same as the output tensor - TensorShape scale_out_shape(input->info()->tensor_shape()); - const unsigned int out_x = input->info()->dimension(0) + (input->info()->dimension(0) - 1) * (stride_x - 1) + inner_border_right + 2 * info.pad().first; - const unsigned int out_y = input->info()->dimension(1) + (input->info()->dimension(1) - 1) * (stride_y - 1) + inner_border_top + 2 * info.pad().second; + // Find the upsampled dimensions + unsigned int out_x = (input->info()->dimension(0) - 1) * stride_x + inner_border_right + 1; + unsigned int out_y = (input->info()->dimension(1) - 1) * stride_y + inner_border_top + 1; + + // Find the padding needed for the convolution with stride 1 in order to match output shape + unsigned int padx = out_dims.first - (out_x - weights->info()->dimension(0) + 1); + unsigned int pady = out_dims.second - (out_y - weights->info()->dimension(1) + 1); + out_x += padx; + out_y += pady; + + TensorShape scale_out_shape(input->info()->tensor_shape()); scale_out_shape.set(0, out_x); scale_out_shape.set(1, out_y); TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(), input->info()->quantization_info()); _scaled_output.allocator()->init(scale_out_info); - _scale_f.configure(input, &_scaled_output, BorderSize(inner_border_top, inner_border_right), info); + // configure scale function + const PadStrideInfo upsample_info(stride_x, stride_y, padx / 2, pady / 2); + _scale_f.configure(input, &_scaled_output, BorderSize(inner_border_top, inner_border_right), upsample_info); // setup the function to convolve the upscaled output const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); - _conv_f.configure(&_scaled_output, weights, bias, output, conv_info, weights_info); + _conv_f.configure(&_scaled_output, &_weights_flipped, bias, output, conv_info, weights_info); _scaled_output.allocator()->allocate(); } @@ -148,7 +167,14 @@ void CLDeconvolutionLayer::prepare() { if(!_is_prepared) { + _weights_flipped.allocator()->allocate(); + _weights_flipped.map(true); + _weights->map(CLScheduler::get().queue(), true); + CPPScheduler::get().schedule(&_flip_weights, Window::DimZ); + _weights_flipped.unmap(); + _weights->unmap(CLScheduler::get().queue()); _conv_f.prepare(); + _is_prepared = true; } } diff --git a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp index fda9f57499..6ca60c66a4 100644 --- a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp @@ -27,6 +27,7 @@ #include "arm_compute/core/Utils.h" #include "arm_compute/core/Validate.h" #include "arm_compute/core/utils/misc/ShapeCalculator.h" +#include "arm_compute/runtime/CPP/CPPScheduler.h" using namespace arm_compute; using namespace arm_compute::misc::shape_calculator; @@ -35,7 +36,9 @@ NEDeconvolutionLayer::NEDeconvolutionLayer(std::shared_ptr memor : _memory_group(std::move(memory_manager)), _conv_f(), _upsample_f(), + _flip_weights(), _scaled_output(), + _weights_flipped(), _input(nullptr), _info(), _inner_border(), @@ -60,7 +63,7 @@ Status NEDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf ARM_COMPUTE_RETURN_ERROR_ON_MSG(inner_border_top > stride_y - 1, "inner_border_top must be smaller than stride_y"); auto out_dims = deconvolution_output_dimensions(input->dimension(0), input->dimension(1), weights->dimension(0), weights->dimension(1), - info.pad().first, info.pad().second, inner_border_right, inner_border_top, stride_x, stride_y); + info.pad().first, info.pad().second, stride_x, stride_y); ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights, bias); @@ -74,14 +77,14 @@ Status NEDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInf ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output); const TensorShape output_shape = deconvolution_output_shape(out_dims, input->tensor_shape(), weights->tensor_shape()); - ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimX) != output_shape.x(), "Output's width is invalid."); ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimY) != output_shape.y(), "Output's height is invalid."); ARM_COMPUTE_RETURN_ERROR_ON_MSG(output->dimension(Window::DimZ) != output_shape.z(), "Output's depth is invalid."); } - TensorInfo scale_out_info(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_deconvolution_shape(*input, stride_x, stride_y, inner_border_right, inner_border_top, - info))); + TensorInfo scale_out_info(input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_deconvolution_shape(*input, *weights, stride_x, stride_y, inner_border_right, + inner_border_top, + out_dims))); const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i) @@ -107,25 +110,44 @@ void NEDeconvolutionLayer::configure(ITensor *input, const ITensor *weights, con const unsigned int stride_x = info.stride().first; const unsigned int stride_y = info.stride().second; + _weights_flipped.allocator()->init(TensorInfo(weights->info()->tensor_shape(), 1, weights->info()->data_type())); + _flip_weights.configure(weights, &_weights_flipped); + + auto out_dims = deconvolution_output_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0), weights->info()->dimension(1), + info.pad().first, info.pad().second, stride_x, stride_y); + + const TensorShape output_shape = deconvolution_output_shape(out_dims, input->info()->tensor_shape(), weights->info()->tensor_shape()); + // Output auto initialization if not yet initialized + auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->quantization_info()); + // Perform validation step ARM_COMPUTE_ERROR_THROW_ON(NEDeconvolutionLayer::validate(input->info(), weights->info(), bias == nullptr ? nullptr : bias->info(), output->info(), info, inner_border_right, inner_border_top)); _memory_group.manage(&_scaled_output); - // configure scale function - // Init and allocate intermmidiate tensor for output, same size as input but the first two axis are the same as the output tensor - const TensorInfo scale_out_info(compute_deconvolution_shape(*input->info(), stride_x, stride_y, inner_border_right, inner_border_top, info), 1, input->info()->data_type()); + // Find the upsampled dimensions + unsigned int out_x = (input->info()->dimension(0) - 1) * stride_x + inner_border_right + 1; + unsigned int out_y = (input->info()->dimension(1) - 1) * stride_y + inner_border_top + 1; + + // Find the padding needed for the convolution with stride 1 in order to match output shape + unsigned int padx = out_dims.first - (out_x - weights->info()->dimension(0) + 1); + unsigned int pady = out_dims.second - (out_y - weights->info()->dimension(1) + 1); + out_x += padx; + out_y += pady; + + TensorShape scale_out_shape(input->info()->tensor_shape()); + scale_out_shape.set(0, out_x); + scale_out_shape.set(1, out_y); + TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(), input->info()->quantization_info()); _scaled_output.allocator()->init(scale_out_info); + const PadStrideInfo upsample_info(stride_x, stride_y, padx / 2, pady / 2); + _upsample_f.configure(input, &_scaled_output, upsample_info, inner_border_right, inner_border_top); + // setup the function to convolve the upscaled output const PadStrideInfo conv_info(1, 1, 0, 0, 0, 0, DimensionRoundingType::CEIL); - _conv_f.configure(&_scaled_output, weights, bias, output, conv_info); - - // Allocate auxiliary tensors + _conv_f.configure(&_scaled_output, &_weights_flipped, bias, output, conv_info); _scaled_output.allocator()->allocate(); - - // configure upsample function - _upsample_f.configure(input, &_scaled_output, info, inner_border_right, inner_border_top); } void NEDeconvolutionLayer::run() @@ -144,6 +166,8 @@ void NEDeconvolutionLayer::prepare() { if(!_is_prepared) { + _weights_flipped.allocator()->allocate(); + CPPScheduler::get().schedule(&_flip_weights, Window::DimZ); _conv_f.prepare(); _is_prepared = true; } -- cgit v1.2.1