aboutsummaryrefslogtreecommitdiff
path: root/arm_compute
diff options
context:
space:
mode:
authorMichalis Spyrou <michalis.spyrou@arm.com>2018-10-03 14:18:19 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:55:45 +0000
commitafbc5ffb0b567ae93fa2765066bd136d72be88ff (patch)
tree328005d70d5526609a9d84173a317fd1f10b4ed2 /arm_compute
parent67d94d29c154a376d12e582421323c1d250da20c (diff)
downloadComputeLibrary-afbc5ffb0b567ae93fa2765066bd136d72be88ff.tar.gz
COMPMID-1621 Deconvolution wrong output calculation
Change-Id: Ida71312bcf6dbd854f2ab1efc65f74910c79e152 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/151510 Tested-by: bsgcomp <bsgcomp@arm.com> Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Diffstat (limited to 'arm_compute')
-rw-r--r--arm_compute/core/CPP/CPPKernels.h1
-rw-r--r--arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h85
-rw-r--r--arm_compute/core/Utils.h20
-rw-r--r--arm_compute/core/utils/misc/ShapeCalculator.h17
-rw-r--r--arm_compute/graph/nodes/DeconvolutionLayerNode.h4
-rw-r--r--arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h15
-rw-r--r--arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h15
7 files changed, 132 insertions, 25 deletions
diff --git a/arm_compute/core/CPP/CPPKernels.h b/arm_compute/core/CPP/CPPKernels.h
index a0c5707a79..bf24a94aa7 100644
--- a/arm_compute/core/CPP/CPPKernels.h
+++ b/arm_compute/core/CPP/CPPKernels.h
@@ -27,6 +27,7 @@
/* Header regrouping all the CPP kernels */
#include "arm_compute/core/CPP/kernels/CPPCornerCandidatesKernel.h"
#include "arm_compute/core/CPP/kernels/CPPDetectionWindowNonMaximaSuppressionKernel.h"
+#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h"
#include "arm_compute/core/CPP/kernels/CPPPermuteKernel.h"
#include "arm_compute/core/CPP/kernels/CPPSortEuclideanDistanceKernel.h"
#include "arm_compute/core/CPP/kernels/CPPUpsampleKernel.h"
diff --git a/arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h b/arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h
new file mode 100644
index 0000000000..801934159d
--- /dev/null
+++ b/arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CPP_FLIP_WEIGHTS_KERNEL_H__
+#define __ARM_COMPUTE_CPP_FLIP_WEIGHTS_KERNEL_H__
+
+#include "arm_compute/core/CPP/ICPPKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** CPP kernel to perform 180 degrees flipping on deconvolution weights. */
+class CPPFlipWeightsKernel : public ICPPKernel
+{
+public:
+ const char *name() const override
+ {
+ return "CPPFlipWeightsKernel";
+ }
+ /** Default constructor */
+ CPPFlipWeightsKernel();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CPPFlipWeightsKernel(const CPPFlipWeightsKernel &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CPPFlipWeightsKernel &operator=(const CPPFlipWeightsKernel &) = delete;
+ /** Allow instances of this class to be moved */
+ CPPFlipWeightsKernel(CPPFlipWeightsKernel &&) = default;
+ /** Allow instances of this class to be moved */
+ CPPFlipWeightsKernel &operator=(CPPFlipWeightsKernel &&) = default;
+ /** Default destructor */
+ ~CPPFlipWeightsKernel() = default;
+
+ /** Set the input and output of the kernel.
+ *
+ * @param[in] input The input tensor to flip. Data types supported: QASYMM8/F16/F32
+ * @param[out] output The output tensor. Data types supported: Same as @p input
+ */
+ void configure(const ITensor *input, ITensor *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+ /** Function to perform flipping.
+ *
+ * @param[in] window_input Input region on which to execute the kernel.
+ * @param[in] window Output region on which to execute the kernel.
+ */
+ template <typename T>
+ void flip_weights(const Window &window_input, const Window &window);
+
+ /** Common signature for all the specialised Flip functions
+ *
+ * @param[in] window_input Input region on which to execute the kernel.
+ * @param[in] window Output region on which to execute the kernel.
+ */
+ using FlipWeightsFunction = void (CPPFlipWeightsKernel::*)(const Window &window_input, const Window &window);
+
+private:
+ const ITensor *_input;
+ ITensor *_output;
+ FlipWeightsFunction _func;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CPP_FLIP_WEIGHTS_KERNEL_H__ */
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index c742ebc50e..7ee24e2736 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -827,22 +827,20 @@ TensorShape deconvolution_output_shape(const std::pair<unsigned int, unsigned in
/** Returns expected width and height of the deconvolution's output tensor.
*
- * @param[in] in_width Width of input tensor (Number of columns)
- * @param[in] in_height Height of input tensor (Number of rows)
- * @param[in] kernel_width Kernel width.
- * @param[in] kernel_height Kernel height.
- * @param[in] padx X axis padding.
- * @param[in] pady Y axis padding.
- * @param[in] inner_border_right The number of zeros added to right edge of the input.
- * @param[in] inner_border_top The number of zeros added to top edge of the input.
- * @param[in] stride_x X axis input stride.
- * @param[in] stride_y Y axis input stride.
+ * @param[in] in_width Width of input tensor (Number of columns)
+ * @param[in] in_height Height of input tensor (Number of rows)
+ * @param[in] kernel_width Kernel width.
+ * @param[in] kernel_height Kernel height.
+ * @param[in] padx X axis padding.
+ * @param[in] pady Y axis padding.
+ * @param[in] stride_x X axis input stride.
+ * @param[in] stride_y Y axis input stride.
*
* @return A pair with the new width in the first position and the new height in the second.
*/
const std::pair<unsigned int, unsigned int> deconvolution_output_dimensions(unsigned int in_width, unsigned int in_height,
unsigned int kernel_width, unsigned int kernel_height,
- unsigned int padx, unsigned int pady, unsigned int inner_border_right, unsigned int inner_border_top,
+ unsigned int padx, unsigned int pady,
unsigned int stride_x, unsigned int stride_y);
/** Returns expected width and height of output scaled tensor depending on dimensions rounding mode.
diff --git a/arm_compute/core/utils/misc/ShapeCalculator.h b/arm_compute/core/utils/misc/ShapeCalculator.h
index 804ff3c709..f68401c1b9 100644
--- a/arm_compute/core/utils/misc/ShapeCalculator.h
+++ b/arm_compute/core/utils/misc/ShapeCalculator.h
@@ -229,11 +229,20 @@ inline TensorShape compute_depthwise_convolution_shape(const ITensorInfo &input,
return output_shape;
}
-inline TensorShape compute_deconvolution_shape(const ITensorInfo &input, unsigned int sx, unsigned int sy, unsigned int inner_border_right, unsigned int inner_border_top, const PadStrideInfo &info)
+inline TensorShape compute_deconvolution_shape(const ITensorInfo &input, const ITensorInfo &weights, unsigned int sx, unsigned int sy, unsigned int inner_border_right, unsigned int inner_border_top,
+ std::pair<unsigned int, unsigned int> &out_dims)
{
- TensorShape scale_out_shape(input.tensor_shape());
- const unsigned int out_x = input.dimension(0) + (input.dimension(0) - 1) * (sx - 1) + inner_border_right + 2 * info.pad().first;
- const unsigned int out_y = input.dimension(1) + (input.dimension(1) - 1) * (sy - 1) + inner_border_top + 2 * info.pad().second;
+ // Find the upsampled dimensions
+ unsigned int out_x = (input.dimension(0) - 1) * sx + inner_border_right + 1;
+ unsigned int out_y = (input.dimension(1) - 1) * sy + inner_border_top + 1;
+
+ // Find the padding needed for the convolution with stride 1 in order to match output shape
+ unsigned int padx = out_dims.first - (out_x - weights.dimension(0) + 1);
+ unsigned int pady = out_dims.second - (out_y - weights.dimension(1) + 1);
+ out_x += padx;
+ out_y += pady;
+
+ TensorShape scale_out_shape(input.tensor_shape());
scale_out_shape.set(0, out_x);
scale_out_shape.set(1, out_y);
diff --git a/arm_compute/graph/nodes/DeconvolutionLayerNode.h b/arm_compute/graph/nodes/DeconvolutionLayerNode.h
index 73210a299e..19501482c6 100644
--- a/arm_compute/graph/nodes/DeconvolutionLayerNode.h
+++ b/arm_compute/graph/nodes/DeconvolutionLayerNode.h
@@ -55,14 +55,12 @@ public:
* @param[in] input_descriptor Input descriptor
* @param[in] weights_descriptor Weights descriptor
* @param[in] info Convolution operation attributes
- * @param[in] inner_border Inner border (right, top)
*
* @return Output descriptor
*/
static TensorDescriptor compute_output_descriptor(const TensorDescriptor &input_descriptor,
const TensorDescriptor &weights_descriptor,
- const PadStrideInfo &info,
- const Size2D &inner_border);
+ const PadStrideInfo &info);
// Inherited overridden methods:
NodeType type() const override;
diff --git a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
index 4dce1e1801..6716cd6fdd 100644
--- a/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLDeconvolutionLayer.h
@@ -27,6 +27,8 @@
#include "arm_compute/runtime/CL/functions/CLConvolutionLayer.h"
#include "arm_compute/runtime/CL/functions/CLDeconvolutionLayerUpsample.h"
+#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h"
+
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/IFunction.h"
@@ -62,6 +64,14 @@ class CLDeconvolutionLayer : public IFunction
public:
/** Constructor */
CLDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDeconvolutionLayer(const CLDeconvolutionLayer &) = delete;
+ /** Default move constructor */
+ CLDeconvolutionLayer(CLDeconvolutionLayer &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLDeconvolutionLayer &operator=(const CLDeconvolutionLayer &) = delete;
+ /** Default move assignment operator */
+ CLDeconvolutionLayer &operator=(CLDeconvolutionLayer &&) = default;
/** Set the input, weights, biases and output tensors.
*
* @param[in,out] input Input tensor. 3 lower dimensions represent a single input, and an optional 4th dimension for batch of inputs. Data types supported: QASYMM8/F16/F32.
@@ -74,7 +84,7 @@ public:
* @param[in] weights_info (Optional) Weights information needed for @ref CLConvolutionLayer, specifies if the weights tensor has been reshaped with @ref CLWeightsReshapeKernel.
*
*/
- void configure(ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info,
+ void configure(ICLTensor *input, ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &info,
unsigned int inner_border_right, unsigned int inner_border_top, const WeightsInfo &weights_info = WeightsInfo());
/** Static function to check if given info will lead to a valid configuration of @ref CLDeconvolutionLayer
*
@@ -100,7 +110,10 @@ private:
CLMemoryGroup _memory_group;
CLDeconvolutionLayerUpsample _scale_f;
CLConvolutionLayer _conv_f;
+ CPPFlipWeightsKernel _flip_weights;
CLTensor _scaled_output;
+ ICLTensor *_weights;
+ CLTensor _weights_flipped;
bool _is_prepared;
};
}
diff --git a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
index 3e527168c1..0cca555621 100644
--- a/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h
@@ -28,6 +28,7 @@
#include "arm_compute/runtime/NEON/functions/NEConvolutionLayer.h"
#include "arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h"
+#include "arm_compute/core/CPP/kernels/CPPFlipWeightsKernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
@@ -111,12 +112,14 @@ public:
void prepare() override;
private:
- MemoryGroup _memory_group;
- NEConvolutionLayer _conv_f;
- CPPUpsample _upsample_f;
- Tensor _scaled_output;
- ITensor *_input;
- PadStrideInfo _info;
+ MemoryGroup _memory_group;
+ NEConvolutionLayer _conv_f;
+ CPPUpsample _upsample_f;
+ CPPFlipWeightsKernel _flip_weights;
+ Tensor _scaled_output;
+ Tensor _weights_flipped;
+ ITensor *_input;
+ PadStrideInfo _info;
std::pair<unsigned int, unsigned int> _inner_border;
bool _is_prepared;
};