From f5f34bb068565bf9435ba5561aae1c9280db8bbe Mon Sep 17 00:00:00 2001 From: Pablo Tello Date: Tue, 22 Aug 2017 13:34:13 +0100 Subject: COMPMID-470: Neon Deconvolution. Implemented by up-sampling the input with zeros insertions between the input samples and convolving the Deconvolution kernels on the up-sampled result. The upsampling is performed by the function NEDeconvolutionLayerUpsample. Convolving is done by NEDirectConvolutionLayer. Change-Id: I25f7ba7c6b99cd9310797972ede40aeff4a54900 Reviewed-on: http://mpd-gerrit.cambridge.arm.com/85319 Tested-by: Kaizen Reviewed-by: Anthony Barbier --- .../NEON/functions/NEDeconvolutionLayer.cpp | 114 +++++++++++++++++++ .../functions/NEDeconvolutionLayerUpsample.cpp | 121 +++++++++++++++++++++ 2 files changed, 235 insertions(+) create mode 100644 src/runtime/NEON/functions/NEDeconvolutionLayer.cpp create mode 100644 src/runtime/NEON/functions/NEDeconvolutionLayerUpsample.cpp (limited to 'src/runtime/NEON/functions') diff --git a/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp new file mode 100644 index 0000000000..7b4e77b296 --- /dev/null +++ b/src/runtime/NEON/functions/NEDeconvolutionLayer.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEDeconvolutionLayer.h" + +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/PixelValue.h" +#include "arm_compute/core/Utils.h" +#include "arm_compute/core/Validate.h" + +using namespace arm_compute; + +NEDeconvolutionLayer::NEDeconvolutionLayer(std::shared_ptr memory_manager) // NOLINT + : _memory_group(std::move(memory_manager)), + _scale_f(), + _conv_f(), + _scaled_output() +{ +} + +void NEDeconvolutionLayer::configure(ITensor *input, const ITensor *weights, const ITensor *bias, ITensor *output, const PadStrideInfo &info, + unsigned int ax, unsigned int ay, float upscalex, float upscaley) +{ + ARM_COMPUTE_ERROR_ON_NULLPTR(output); + ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) != weights->info()->dimension(1)); + ARM_COMPUTE_ERROR_ON(weights->info()->dimension(0) < 1); + + auto out_dims = deconvolution_output_dimensions(input->info()->dimension(0), input->info()->dimension(1), weights->info()->dimension(0), weights->info()->dimension(1), + info.pad().first, info.pad().second, ax, ay, upscalex, upscaley, info.round()); + + const TensorShape output_shape = deconvolution_output_shape(out_dims, input->info()->tensor_shape(), weights->info()->tensor_shape()); + + // Output auto initialization if not yet initialized + auto_init_if_empty(*output->info(), output_shape, 1, input->info()->data_type(), input->info()->fixed_point_position()); + + ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, weights, bias); + ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, weights, bias); + + ARM_COMPUTE_ERROR_ON_MSG(output->info()->dimension(Window::DimX) != output_shape.x(), "Output's width is invalid."); + ARM_COMPUTE_ERROR_ON_MSG(output->info()->dimension(Window::DimY) != output_shape.y(), "Output's height is invalid."); + ARM_COMPUTE_ERROR_ON_MSG(output->info()->dimension(Window::DimZ) != output_shape.z(), "Output's depth is invalid."); + + _memory_group.manage(&_scaled_output); + + // configure scale function + //Init and allocate intermmidiate tensor for output, same size as input but the first two axis are the same as the output tensor + TensorShape scale_out_shape(input->info()->tensor_shape()); + scale_out_shape.set(0, output->info()->dimension(0)); + scale_out_shape.set(1, output->info()->dimension(1)); + TensorInfo scale_out_info(scale_out_shape, 1, input->info()->data_type(), input->info()->fixed_point_position()); + _scaled_output.allocator()->init(scale_out_info); + const unsigned int kernel_size = weights->info()->dimension(0); + // Padding for the upsampled image is calculated with the equiation: p' = k - p - 1, where k is kernel size and p is the input padding + ARM_COMPUTE_ERROR_ON(info.pad().first > (kernel_size - 1)); + const unsigned int tr_px = kernel_size - info.pad().first - 1; + const unsigned int tr_py = kernel_size - info.pad().second - 1; + const unsigned int tr_stride = 1; + const PadStrideInfo transposed_info(tr_stride, tr_stride, tr_px, tr_py); + _scale_f.configure(input, &_scaled_output, std::make_pair(ax, ay), std::make_pair(info.stride().first - 1u, info.stride().second - 1u), transposed_info); + // setup the function to convolve the upscaled output + switch(kernel_size) + { + case 1: + { + _conv_f.configure(&_scaled_output, weights, bias, output, PadStrideInfo(1, 1, 0, 0, DimensionRoundingType::CEIL)); + break; + } + case 3: + { + _conv_f.configure(&_scaled_output, weights, bias, output, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL)); + break; + } + case 5: + { + _conv_f.configure(&_scaled_output, weights, bias, output, PadStrideInfo(1, 1, 2, 2, DimensionRoundingType::CEIL)); + break; + } + default: + { + ARM_COMPUTE_ERROR("Not supported"); + break; + } + } + _scaled_output.allocator()->allocate(); +} + +void NEDeconvolutionLayer::run() +{ + _memory_group.acquire(); + _scale_f.run(); + _conv_f.run(); + _memory_group.release(); +} diff --git a/src/runtime/NEON/functions/NEDeconvolutionLayerUpsample.cpp b/src/runtime/NEON/functions/NEDeconvolutionLayerUpsample.cpp new file mode 100644 index 0000000000..63f17bcb5a --- /dev/null +++ b/src/runtime/NEON/functions/NEDeconvolutionLayerUpsample.cpp @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2016, 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/functions/NEDeconvolutionLayerUpsample.h" + +#include "arm_compute/core/Coordinates.h" +#include "arm_compute/core/Error.h" +#include "arm_compute/core/Helpers.h" +#include "arm_compute/core/ITensor.h" +#include "arm_compute/core/NEON/kernels/NEDeconvolutionLayerUpsampleKernel.h" +#include "arm_compute/core/PixelValue.h" +#include "arm_compute/core/TensorInfo.h" +#include "arm_compute/core/Window.h" +#include "arm_compute/runtime/NEON/NEScheduler.h" +#include "arm_compute/runtime/TensorAllocator.h" +#include "support/ToolchainSupport.h" + +#include +#include +#include + +using namespace arm_compute; + +namespace +{ +inline void precompute_offsets(ITensor *offsets, float wr, size_t input_element_size, const std::pair &a, + const std::pair &iz, const PadStrideInfo &info) +{ + ARM_COMPUTE_ERROR_ON(nullptr == offsets); + Window win; + const int padx = info.pad().first; + const int pady = info.pad().second; + const int ax = a.first; + const int ay = a.second; + const int offset_width = offsets->info()->dimension(0); + const int offset_height = offsets->info()->dimension(1); + // The values of ax and ay denote the number of ZEROS to be added on the top and right inner border of the image. + // Step value along the XY axis will depend on the number of zeros to be inserted between samples (number of zeros + 1). + // Pre-compute the X offset, Y's stride is unknown at this point so we can't precompute Y's offsets + for(int yi = ay; yi < (offset_height - pady); yi += (1 + iz.second)) + { + for(int xi = padx; xi < (offset_width - ax); xi += (1 + iz.first)) + { + int *ptr = reinterpret_cast(offsets->ptr_to_element(Coordinates(xi, yi))); + const size_t in_xi = (xi + 0.5f) * wr; + *reinterpret_cast(ptr) = in_xi * input_element_size; + } + } +} +} // namespace + +NEDeconvolutionLayerUpsample::NEDeconvolutionLayerUpsample(std::shared_ptr memory_manager) // NOLINT + : _memory_group(std::move(memory_manager)), + _offsets(), + _border_handler(), + _upsample() +{ +} + +void NEDeconvolutionLayerUpsample::configure(ITensor *input, ITensor *output, const std::pair &a, + const std::pair &iz, const PadStrideInfo &info) +{ + ARM_COMPUTE_ERROR_ON(nullptr == input); + ARM_COMPUTE_ERROR_ON(nullptr == output); + + for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i) + { + ARM_COMPUTE_ERROR_ON(input->info()->dimension(i) != output->info()->dimension(i)); + } + + // Get the tensor shape + const TensorShape shape(output->info()->dimension(0), output->info()->dimension(1)); + + // Compute the ratio between source width/height and destination width/height + const auto wr = static_cast(input->info()->dimension(0)) / static_cast(output->info()->dimension(0)); + const auto hr = static_cast(input->info()->dimension(1)) / static_cast(output->info()->dimension(1)); + ARM_COMPUTE_UNUSED(hr); + // Get the element size of the input image + const size_t input_element_size = input->info()->element_size(); + + TensorInfo tensor_info_offsets(shape, Format::S32); + _offsets.allocator()->init(tensor_info_offsets); + + _upsample.configure(input, &_offsets, output); + + // Allocate once the configure methods have been called + _offsets.allocator()->allocate(); + // Pre-compute offsets for nearest interpolation + std::fill_n(reinterpret_cast(_offsets.buffer()), _offsets.info()->total_size() / sizeof(int32_t), -1 * input_element_size); + precompute_offsets(&_offsets, wr, input_element_size, a, iz, info); + + _border_handler.configure(input, _upsample.border_size(), BorderMode::CONSTANT, PixelValue(0)); +} + +void NEDeconvolutionLayerUpsample::run() +{ + NEScheduler::get().schedule(&_border_handler, Window::DimZ); + _memory_group.acquire(); + NEScheduler::get().schedule(&_upsample, Window::DimY); + _memory_group.release(); +} -- cgit v1.2.1