From afbc5ffb0b567ae93fa2765066bd136d72be88ff Mon Sep 17 00:00:00 2001 From: Michalis Spyrou Date: Wed, 3 Oct 2018 14:18:19 +0100 Subject: COMPMID-1621 Deconvolution wrong output calculation Change-Id: Ida71312bcf6dbd854f2ab1efc65f74910c79e152 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/151510 Tested-by: bsgcomp Reviewed-by: Michele DiGiorgio --- arm_compute/core/CPP/CPPKernels.h | 1 + .../core/CPP/kernels/CPPFlipWeightsKernel.h | 85 ++++++++++++++++++++++ arm_compute/core/Utils.h | 20 +++-- arm_compute/core/utils/misc/ShapeCalculator.h | 17 ++++- arm_compute/graph/nodes/DeconvolutionLayerNode.h | 4 +- .../runtime/CL/functions/CLDeconvolutionLayer.h | 15 +++- .../runtime/NEON/functions/NEDeconvolutionLayer.h | 15 ++-- 7 files changed, 132 insertions(+), 25 deletions(-) create mode 100644 arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h (limited to 'arm_compute') diff --git a/arm_compute/core/CPP/CPPKernels.h b/arm_compute/core/CPP/CPPKernels.h index a0c5707a79..bf24a94aa7 100644 --- a/arm_compute/core/CPP/CPPKernels.h +++ b/arm_compute/core/CPP/CPPKernels.h @@ -27,6 +27,7 @@ /* Header regrouping all the CPP kernels */ #include "arm_compute/core/CPP/kernels/CPPCornerCandidatesKernel.h" #include "arm_compute/core/CPP/kernels/CPPDetectionWindowNonMaximaSuppressionKernel.h" +#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h" #include "arm_compute/core/CPP/kernels/CPPPermuteKernel.h" #include "arm_compute/core/CPP/kernels/CPPSortEuclideanDistanceKernel.h" #include "arm_compute/core/CPP/kernels/CPPUpsampleKernel.h" diff --git a/arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h b/arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h new file mode 100644 index 0000000000..801934159d --- /dev/null +++ b/arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __ARM_COMPUTE_CPP_FLIP_WEIGHTS_KERNEL_H__ +#define __ARM_COMPUTE_CPP_FLIP_WEIGHTS_KERNEL_H__ + +#include "arm_compute/core/CPP/ICPPKernel.h" + +namespace arm_compute +{ +class ITensor; + +/** CPP kernel to perform 180 degrees flipping on deconvolution weights. */ +class CPPFlipWeightsKernel : public ICPPKernel +{ +public: + const char *name() const override + { + return "CPPFlipWeightsKernel"; + } + /** Default constructor */ + CPPFlipWeightsKernel(); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CPPFlipWeightsKernel(const CPPFlipWeightsKernel &) = delete; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CPPFlipWeightsKernel &operator=(const CPPFlipWeightsKernel &) = delete; + /** Allow instances of this class to be moved */ + CPPFlipWeightsKernel(CPPFlipWeightsKernel &&) = default; + /** Allow instances of this class to be moved */ + CPPFlipWeightsKernel &operator=(CPPFlipWeightsKernel &&) = default; + /** Default destructor */ + ~CPPFlipWeightsKernel() = default; + + /** Set the input and output of the kernel. + * + * @param[in] input The input tensor to flip. Data types supported: QASYMM8/F16/F32 + * @param[out] output The output tensor. Data types supported: Same as @p input + */ + void configure(const ITensor *input, ITensor *output); + + // Inherited methods overridden: + void run(const Window &window, const ThreadInfo &info) override; + + /** Function to perform flipping. + * + * @param[in] window_input Input region on which to execute the kernel. + * @param[in] window Output region on which to execute the kernel. + */ + template + void flip_weights(const Window &window_input, const Window &window); + + /** Common signature for all the specialised Flip functions + * + * @param[in] window_input Input region on which to execute the kernel. + * @param[in] window Output region on which to execute the kernel. + */ + using FlipWeightsFunction = void (CPPFlipWeightsKernel::*)(const Window &window_input, const Window &window); + +private: + const ITensor *_input; + ITensor *_output; + FlipWeightsFunction _func; +}; +} // namespace arm_compute +#endif /*__ARM_COMPUTE_CPP_FLIP_WEIGHTS_KERNEL_H__ */ diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h index c742ebc50e..7ee24e2736 100644 --- a/arm_compute/core/Utils.h +++ b/arm_compute/core/Utils.h @@ -827,22 +827,20 @@ TensorShape deconvolution_output_shape(const std::pair deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height, unsigned int kernel_width, unsigned int kernel_height, - unsigned int padx, unsigned int pady, unsigned int inner_border_right, unsigned int inner_border_top, + unsigned int padx, unsigned int pady, unsigned int stride_x, unsigned int stride_y); /** Returns expected width and height of output scaled tensor depending on dimensions rounding mode. diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h index 804ff3c709..f68401c1b9 100644 --- a/arm_compute/core/utils/misc/ShapeCalculator.h +++ b/arm_compute/core/utils/misc/ShapeCalculator.h @@ -229,11 +229,20 @@ inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input, return output_shape; } -inline TensorShape compute_deconvolution_shape(const ITensorInfo &input, unsigned int sx, unsigned int sy, unsigned int inner_border_right, unsigned int inner_border_top, const PadStrideInfo &info) +inline TensorShape compute_deconvolution_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy, unsigned int inner_border_right, unsigned int inner_border_top, + std::pair &out_dims) { - TensorShape scale_out_shape(input.tensor_shape()); - const unsigned int out_x = input.dimension(0) + (input.dimension(0) - 1) * (sx - 1) + inner_border_right + 2 * info.pad().first; - const unsigned int out_y = input.dimension(1) + (input.dimension(1) - 1) * (sy - 1) + inner_border_top + 2 * info.pad().second; + // Find the upsampled dimensions + unsigned int out_x = (input.dimension(0) - 1) * sx + inner_border_right + 1; + unsigned int out_y = (input.dimension(1) - 1) * sy + inner_border_top + 1; + + // Find the padding needed for the convolution with stride 1 in order to match output shape + unsigned int padx = out_dims.first - (out_x - weights.dimension(0) + 1); + unsigned int pady = out_dims.second - (out_y - weights.dimension(1) + 1); + out_x += padx; + out_y += pady; + + TensorShape scale_out_shape(input.tensor_shape()); scale_out_shape.set(0, out_x); scale_out_shape.set(1, out_y); diff --git a/arm_compute/graph/nodes/DeconvolutionLayerNode.h b/arm_compute/graph/nodes/DeconvolutionLayerNode.h index 73210a299e..19501482c6 100644 --- a/arm_compute/graph/nodes/DeconvolutionLayerNode.h +++ b/arm_compute/graph/nodes/DeconvolutionLayerNode.h @@ -55,14 +55,12 @@ public: * @param[in] input_descriptor Input descriptor * @param[in] weights_descriptor Weights descriptor * @param[in] info Convolution operation attributes - * @param[in] inner_border Inner border (right, top) * * @return Output descriptor */ static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor, const TensorDescriptor &weights_descriptor, - const PadStrideInfo &info, - const Size2D &inner_border); + const PadStrideInfo &info); // Inherited overridden methods: NodeType type() const override; diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h index 4dce1e1801..6716cd6fdd 100644 --- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h +++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h @@ -27,6 +27,8 @@ #include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h" #include "arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h" +#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h" + #include "arm_compute/runtime/CL/CLMemoryGroup.h" #include "arm_compute/runtime/CL/CLTensor.h" #include "arm_compute/runtime/IFunction.h" @@ -62,6 +64,14 @@ class CLDeconvolutionLayer : public IFunction public: /** Constructor */ CLDeconvolutionLayer(std::shared_ptr memory_manager = nullptr); + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLDeconvolutionLayer(const CLDeconvolutionLayer &) = delete; + /** Default move constructor */ + CLDeconvolutionLayer(CLDeconvolutionLayer &&) = default; + /** Prevent instances of this class from being copied (As this class contains pointers) */ + CLDeconvolutionLayer &operator=(const CLDeconvolutionLayer &) = delete; + /** Default move assignment operator */ + CLDeconvolutionLayer &operator=(CLDeconvolutionLayer &&) = default; /** Set the input, weights, biases and output tensors. * * @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32. @@ -74,7 +84,7 @@ public: * @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel. * */ - void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info, + void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info, unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info = WeightsInfo()); /** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer * @@ -100,7 +110,10 @@ private: CLMemoryGroup _memory_group; CLDeconvolutionLayerUpsample _scale_f; CLConvolutionLayer _conv_f; + CPPFlipWeightsKernel _flip_weights; CLTensor _scaled_output; + ICLTensor *_weights; + CLTensor _weights_flipped; bool _is_prepared; }; } diff --git a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h index 3e527168c1..0cca555621 100644 --- a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h +++ b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h @@ -28,6 +28,7 @@ #include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h" #include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h" +#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h" #include "arm_compute/core/Types.h" #include "arm_compute/runtime/IFunction.h" #include "arm_compute/runtime/IMemoryManager.h" @@ -111,12 +112,14 @@ public: void prepare() override; private: - MemoryGroup _memory_group; - NEConvolutionLayer _conv_f; - CPPUpsample _upsample_f; - Tensor _scaled_output; - ITensor *_input; - PadStrideInfo _info; + MemoryGroup _memory_group; + NEConvolutionLayer _conv_f; + CPPUpsample _upsample_f; + CPPFlipWeightsKernel _flip_weights; + Tensor _scaled_output; + Tensor _weights_flipped; + ITensor *_input; + PadStrideInfo _info; std::pair _inner_border; bool _is_prepared; }; -- cgit v1.2.1