aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2018-02-13 12:15:13 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:47:18 +0000
commit284cfe2e3a44e5b20978e561c96c94d2193e93a1 (patch)
tree204cb044578d66c89b3a60d0a3c8c7920c8a768e /src
parentf29975848a384fc127cf5401683fc246bab0d903 (diff)
downloadComputeLibrary-284cfe2e3a44e5b20978e561c96c94d2193e93a1.tar.gz
COMPMID-903: Implements NEPermute for NHWC conversions
Change-Id: I4083e8d16bb23933634f229a1408dfd0e8f2922a Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/120069 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/NEON/kernels/NEPermuteKernel.cpp215
-rw-r--r--src/runtime/NEON/functions/NEConvolutionLayer.cpp41
-rw-r--r--src/runtime/NEON/functions/NEPermute.cpp41
3 files changed, 286 insertions, 11 deletions
diff --git a/src/core/NEON/kernels/NEPermuteKernel.cpp b/src/core/NEON/kernels/NEPermuteKernel.cpp
new file mode 100644
index 0000000000..f5f276f1cc
--- /dev/null
+++ b/src/core/NEON/kernels/NEPermuteKernel.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEPermuteKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+
+namespace
+{
+#include "arm_compute/core/NEON/kernels/convolution/common/shims.hpp"
+} // namespace
+
+#include <cstddef>
+#include <cstdint>
+
+using namespace arm_compute;
+
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::S8, DataType::QS8, DataType::QASYMM8,
+ DataType::U16, DataType::S16, DataType::QS16,
+ DataType::U32, DataType::S32,
+ DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() < 3, "Invalid input size!");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ (perm.num_dimensions() != 3 && ((perm[0] != 2 && perm[1] != 0 && perm[2] != 1) || (perm[0] != 1 && perm[1] != 2 && perm[2] != 0))),
+ "Only [2, 0, 1] and [1, 2, 0] permutation is supported");
+
+ const TensorShape output_shape = misc::shape_calculator::compute_permutation_output_shape(*input, perm);
+
+ // Validate configured output
+ if(output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+ }
+
+ return Status{};
+}
+} // namespace
+
+template <typename T>
+void NEPermuteKernel::run_permute(const Window &window)
+{
+ // Input window
+ Window window_in = window;
+ window_in.set(Window::DimX, Window::Dimension(window.x().start(), window.x().end(), window.x().end() - window.x().start()));
+ window_in.set(Window::DimY, Window::Dimension(window.y().start(), window.y().end(), window.y().end() - window.y().start()));
+ window_in.set(Window::DimZ, Window::Dimension(window.z().start(), window.z().end(), window.z().end() - window.z().start()));
+ window_in.set(3, Window::Dimension(window[3].start(), window[3].end(), window[3].end() - window[3].start()));
+
+ // Output window
+ Window window_out(window);
+ const Window::Dimension zero_window = Window::Dimension(0, 0, 0);
+ for(size_t d = 0; d <= _perm.num_dimensions(); ++d)
+ {
+ window_out.set(d, zero_window);
+ }
+
+ // Create iterators
+ Iterator in(_input, window_in);
+ Iterator out(_output, window_out);
+
+ // CHW -> HWC
+ if((_perm.num_dimensions() == 3) && (_perm[0] == 2) && (_perm[1] == 0) && (_perm[2] == 1))
+ {
+ const int in_row_stride = _input->info()->strides_in_bytes().y() / sizeof(T);
+ const int in_channel_stride = _input->info()->strides_in_bytes().z() / sizeof(T);
+ const int in_batch_stride = _input->info()->strides_in_bytes()[3] / sizeof(T);
+
+ const int out_channel_stride = _output->info()->strides_in_bytes().x() / sizeof(T);
+ const int out_col_stride = _output->info()->strides_in_bytes().y() / sizeof(T);
+ const int out_row_stride = _output->info()->strides_in_bytes().z() / sizeof(T);
+ const int out_batch_stride = _output->info()->strides_in_bytes()[3] / sizeof(T);
+
+ const int n_cols = _input->info()->tensor_shape().x();
+ const int n_rows = window_in.y().step();
+ const int n_channels = _input->info()->tensor_shape().z();
+ const int n_batches = _input->info()->tensor_shape()[3];
+
+ execute_window_loop(window_in, [&](const Coordinates & id)
+ {
+ const int idx = id[0] * out_col_stride + id[1] * out_row_stride + id[2] * out_channel_stride;
+ reorder::nchw_to_nhwc(reinterpret_cast<const T *>(in.ptr()), reinterpret_cast<T *>(out.ptr()) + idx,
+ n_batches, n_channels, n_rows, n_cols,
+ in_batch_stride, in_channel_stride, in_row_stride,
+ out_batch_stride, out_row_stride, out_col_stride);
+ },
+ in, out);
+ }
+ // HWC -> CHW
+ else if((_perm.num_dimensions() == 3) && (_perm[0] == 1) && (_perm[1] == 2) && (_perm[2] == 0))
+ {
+ const int in_col_stride = _input->info()->strides_in_bytes().y() / sizeof(T);
+ const int in_row_stride = _input->info()->strides_in_bytes().z() / sizeof(T);
+ const int in_batch_stride = _input->info()->strides_in_bytes()[3] / sizeof(T);
+
+ const int out_col_stride = _output->info()->strides_in_bytes().x() / sizeof(T);
+ const int out_row_stride = _output->info()->strides_in_bytes().y() / sizeof(T);
+ const int out_channel_stride = _output->info()->strides_in_bytes().z() / sizeof(T);
+ const int out_batch_stride = _output->info()->strides_in_bytes()[3] / sizeof(T);
+
+ const int n_channels = _input->info()->tensor_shape().x();
+ const int n_cols = window_in.y().step();
+ const int n_rows = _input->info()->tensor_shape().z();
+ const int n_batches = _input->info()->tensor_shape()[3];
+
+ execute_window_loop(window_in, [&](const Coordinates & id)
+ {
+ const int idx = id[0] * out_channel_stride + id[1] * out_col_stride + id[2] * out_row_stride;
+ reorder::nhwc_to_nchw(reinterpret_cast<const T *>(in.ptr()), reinterpret_cast<T *>(out.ptr()) + idx,
+ n_batches, n_rows, n_cols, n_channels,
+ in_batch_stride, in_row_stride, in_col_stride,
+ out_batch_stride, out_channel_stride, out_row_stride);
+ },
+ in, out);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Unsupported permutation vector");
+ }
+}
+
+NEPermuteKernel::NEPermuteKernel()
+ : _func(), _input(nullptr), _output(nullptr), _perm()
+{
+}
+
+void NEPermuteKernel::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ const TensorShape output_shape = misc::shape_calculator::compute_permutation_output_shape(*input->info(), perm);
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
+
+ // Perform validation step
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), perm));
+
+ _input = input;
+ _output = output;
+ _perm = perm;
+
+ switch(input->info()->element_size())
+ {
+ case 1:
+ _func = &NEPermuteKernel::run_permute<uint8_t>;
+ break;
+ case 2:
+ _func = &NEPermuteKernel::run_permute<uint16_t>;
+ break;
+ case 4:
+ _func = &NEPermuteKernel::run_permute<uint32_t>;
+ break;
+ default:
+ ARM_COMPUTE_ERROR("Element size not supported");
+ break;
+ }
+
+ // Configure kernel window
+ Window win = calculate_max_window(*input->info(), Steps());
+
+ // The NEPermute doesn't need padding so update_window_and_padding() can be skipped
+ Coordinates coord;
+ coord.set_num_dimensions(output->info()->num_dimensions());
+ output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
+
+ ICPPKernel::configure(win);
+}
+
+Status NEPermuteKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, perm));
+ return Status{};
+}
+
+void NEPermuteKernel::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window);
+
+ if(_func != nullptr)
+ {
+ (this->*_func)(window);
+ }
+}
diff --git a/src/runtime/NEON/functions/NEConvolutionLayer.cpp b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
index c6f99782ec..f790f6a95f 100644
--- a/src/runtime/NEON/functions/NEConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEConvolutionLayer.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/NEON/kernels/arm32/NEGEMMAArch32Kernel.h"
#include "arm_compute/core/NEON/kernels/arm64/NEGEMMAArch64Kernel.h"
+#include "arm_compute/core/NEON/kernels/arm64/NEGEMMAArch64NativeKernel.h"
#include "arm_compute/core/PixelValue.h"
#include "arm_compute/core/Size2D.h"
#include "arm_compute/core/Utils.h"
@@ -255,6 +256,25 @@ void NEConvolutionLayer::configure_mm(const ITensor *input, const ITensor *weigh
}
}
+void NEConvolutionLayer::configure_asm_mm(const struct CPUInfo &ci, int M, int N, int K)
+{
+ ARM_COMPUTE_UNUSED(ci);
+ ARM_COMPUTE_UNUSED(M);
+ ARM_COMPUTE_UNUSED(N);
+ ARM_COMPUTE_UNUSED(K);
+#if defined(__arm__) || defined(__aarch64__)
+#if defined(__arm__)
+ GemmInterleaved<sgemm_8x6, float, float> gemm(&ci, M, N, K, false, false);
+#elif defined(__aarch64__)
+ GemmInterleaved<sgemm_12x8, float, float> gemm(&ci, M, N, K, false, false);
+#endif /* defined(__arm__) || defined(__aarch64__) */
+
+ constexpr size_t alignment = 4096;
+ _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
+ _memory_group.manage(&_workspace);
+#endif /* defined(__arm__) || defined(__aarch64__) */
+}
+
void NEConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info)
{
// Perform validate step
@@ -384,7 +404,6 @@ void NEConvolutionLayer::configure(const ITensor *input, const ITensor *weights,
_input_im2col_kernel.configure(input, &_input_im2col_reshaped, Size2D(kernel_width, kernel_height), conv_info, _append_bias);
// Configure matrix multiply
-#if defined(__arm__) || defined(__aarch64__)
if(_mm_optimised_kernel != nullptr)
{
struct CPUInfo ci = NEScheduler::get().cpu_info();
@@ -393,15 +412,16 @@ void NEConvolutionLayer::configure(const ITensor *input, const ITensor *weights,
const int N = _gemm_output.info()->tensor_shape().x();
const int K = _input_im2col_reshaped.info()->tensor_shape().x();
-#if defined(__arm__)
- GemmInterleaved<sgemm_8x6, float, float> gemm(&ci, M, N, K, false, false);
-#elif defined(__aarch64__)
- GemmInterleaved<sgemm_12x8, float, float> gemm(&ci, M, N, K, false, false);
-#endif /* defined(__arm__) || defined(__aarch64__) */
-
- constexpr size_t alignment = 4096;
- _workspace.allocator()->init(TensorInfo(TensorShape{ (gemm.get_working_size() + alignment - 1) * NEScheduler::get().num_threads() }, 1, DataType::U8));
- _memory_group.manage(&_workspace);
+#if defined(__aarch64__)
+ if((N <= 128) && (K <= 128))
+ {
+ _mm_optimised_kernel = support::cpp14::make_unique<NEGEMMAArch64NativeKernel>();
+ }
+ else
+#endif /* defined(__aarch64__) */
+ {
+ configure_asm_mm(ci, M, N, K);
+ }
// Configure matrix multiplication kernel
_mm_optimised_kernel->configure(&_input_im2col_reshaped, weights, &_gemm_output, &_workspace);
@@ -409,7 +429,6 @@ void NEConvolutionLayer::configure(const ITensor *input, const ITensor *weights,
_workspace.allocator()->allocate();
}
else
-#endif /* defined(__arm__) || defined(__aarch64__) */
{
if(_is_interleaved_transposed)
{
diff --git a/src/runtime/NEON/functions/NEPermute.cpp b/src/runtime/NEON/functions/NEPermute.cpp
new file mode 100644
index 0000000000..92abd03e2a
--- /dev/null
+++ b/src/runtime/NEON/functions/NEPermute.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NEPermute.h"
+
+#include "arm_compute/core/NEON/kernels/NEPermuteKernel.h"
+#include "support/ToolchainSupport.h"
+
+using namespace arm_compute;
+
+void NEPermute::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
+{
+ auto k = arm_compute::support::cpp14::make_unique<NEPermuteKernel>();
+ k->configure(input, output, perm);
+ _kernel = std::move(k);
+}
+
+Status NEPermute::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
+{
+ return NEPermuteKernel::validate(input, output, perm);
+}