aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorPablo Tello <pablo.tello@arm.com>2017-12-15 09:48:59 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:42:33 +0000
commit02541fb21eca5574fcce012973774a6f213877ee (patch)
treefb3b2652cfab65119f9814f4167a10b030e5f307 /src
parentd064389293e4a71781984b2b24f3d44964812949 (diff)
downloadComputeLibrary-02541fb21eca5574fcce012973774a6f213877ee.tar.gz
COMPMID-719: NEWinogradLayer reordering using NEPermute.
Input reordering from NCHW to NHWC Output reordering from NHWC to NCHW Weights reordering from [Ofm x Ifm x Height x Width] to [Height x Width x Ifm x Ofm] Change-Id: I85aabedb1f9c13700bc4919eb3130f4d4bd0b465 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/113631 Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Tested-by: Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/core/CPP/kernels/CPPPermuteKernel.cpp37
-rw-r--r--src/core/NEON/kernels/NEWinogradLayerKernel.cpp19
-rw-r--r--src/runtime/NEON/functions/NEWinogradLayer.cpp69
3 files changed, 87 insertions, 38 deletions
diff --git a/src/core/CPP/kernels/CPPPermuteKernel.cpp b/src/core/CPP/kernels/CPPPermuteKernel.cpp
index 4b137b01d4..80b0abaabc 100644
--- a/src/core/CPP/kernels/CPPPermuteKernel.cpp
+++ b/src/core/CPP/kernels/CPPPermuteKernel.cpp
@@ -51,13 +51,18 @@ Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, c
DataType::U32, DataType::S32,
DataType::F16, DataType::F32);
ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() < 3, "Invalid input size!");
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(perm.num_dimensions() != 3 && ((perm[0] != 2 && perm[1] != 0 && perm[2] != 1) || (perm[0] != 1 && perm[1] != 2 && perm[2] != 0)),
- "Only [2, 0, 1] and [1, 2, 0] permutation is supported");
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(
+ (perm.num_dimensions() != 3 && ((perm[0] != 2 && perm[1] != 0 && perm[2] != 1) || (perm[0] != 1 && perm[1] != 2 && perm[2] != 0))) && (perm.num_dimensions() != 4 && ((perm[0] != 2 && perm[1] != 0
+ && perm[2] != 1)
+ || (perm[0] != 1 && perm[1] != 2 && perm[2] != 0))),
+ "Only [2, 0, 1],[1, 2, 0] and [3, 2, 0, 1] permutation is supported");
+
+ const TensorShape output_shape = get_output_shape(input, perm);
// Validate configured output
if(output->total_size() != 0)
{
- ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), get_output_shape(input, perm));
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
}
@@ -72,11 +77,13 @@ void CPPPermuteKernel::run_permute(const Window &window)
const int output_stride_x = _output->info()->strides_in_bytes().x();
const int output_stride_y = _output->info()->strides_in_bytes().y();
const int output_stride_z = _output->info()->strides_in_bytes().z();
+ const int output_stride_w = _output->info()->strides_in_bytes()[3];
Window window_out(window);
window_out.set(Window::DimX, Window::Dimension(0, 0, 0));
window_out.set(Window::DimY, Window::Dimension(0, 0, 0));
window_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
+ window_out.set(3, Window::Dimension(0, 0, 0));
// Create iterators
Iterator in(_input, window);
@@ -87,21 +94,35 @@ void CPPPermuteKernel::run_permute(const Window &window)
{
execute_window_loop(window, [&](const Coordinates & id)
{
- const int idx = id.y() * output_stride_z + id.x() * output_stride_y + id.z() * output_stride_x;
+ const int idx = id[3] * output_stride_w + id.y() * output_stride_z + id.x() * output_stride_y + id.z() * output_stride_x;
*(reinterpret_cast<T *>(out.ptr() + idx)) = *(reinterpret_cast<const T *>(in.ptr()));
},
in, out);
}
// Run [1, 2, 0] permute
- else
+ else if(_perm[0] == 1 && _perm[1] == 2 && _perm[2] == 0)
{
execute_window_loop(window, [&](const Coordinates & id)
{
- const int idx = id.x() * output_stride_z + id.z() * output_stride_y + id.y() * output_stride_x;
+ const int idx = id[3] * output_stride_w + id.x() * output_stride_z + id.z() * output_stride_y + id.y() * output_stride_x;
*(reinterpret_cast<T *>(out.ptr() + idx)) = *(reinterpret_cast<const T *>(in.ptr()));
},
in, out);
}
+ // Run [3, 2, 0, 1] permute
+ else if(_perm[0] == 3 && _perm[1] == 2 && _perm[2] == 0 && _perm[3] == 1)
+ {
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const int idx = id[3] * output_stride_x + id[2] * output_stride_y + id[0] * output_stride_z + id[1] * output_stride_w;
+ *(reinterpret_cast<T *>(out.ptr() + idx)) = *(reinterpret_cast<const T *>(in.ptr()));
+ },
+ in, out);
+ }
+ else
+ {
+ ARM_COMPUTE_ERROR("Not supported.");
+ }
}
CPPPermuteKernel::CPPPermuteKernel()
@@ -112,9 +133,9 @@ CPPPermuteKernel::CPPPermuteKernel()
void CPPPermuteKernel::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
+ const TensorShape output_shape = get_output_shape(input->info(), perm);
// Output auto inizialitation if not yet initialized
- auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(get_output_shape(input->info(), perm)));
+ auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
// Perform validation step
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), perm));
diff --git a/src/core/NEON/kernels/NEWinogradLayerKernel.cpp b/src/core/NEON/kernels/NEWinogradLayerKernel.cpp
index fe633368c0..eaf77e6253 100644
--- a/src/core/NEON/kernels/NEWinogradLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEWinogradLayerKernel.cpp
@@ -108,30 +108,25 @@ size_t NEWinogradLayerKernel::get_kernel_transform_working_size(const KernelShap
}
NEWinogradLayerKernel::NEWinogradLayerKernel()
- : _convolver(nullptr), _output(nullptr)
+ : _convolver(nullptr)
{
}
-void NEWinogradLayerKernel::configure(ITensor *output, Winograd3x3F32 *convolver)
+void NEWinogradLayerKernel::configure(Winograd3x3F32 *convolver)
{
- ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON_NULLPTR(convolver);
_convolver = convolver;
- Window win = calculate_max_window(*output->info());
+ Window win;
+ win.set(Window::DimX, Window::Dimension(0, 15, 1));
INEKernel::configure(win);
}
void NEWinogradLayerKernel::run(const Window &window, const ThreadInfo &info)
{
- ARM_COMPUTE_UNUSED(window);
ARM_COMPUTE_UNUSED(info);
ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
- ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
- ARM_COMPUTE_ERROR_ON(info.num_threads < 1);
- const size_t tid = info.thread_id;
- const size_t num_threads = std::min(info.num_threads, 16);
- const size_t num_gemms_per_thread = 16 / num_threads;
- const size_t first_gemm = tid * num_gemms_per_thread;
- const size_t last_gemm = (tid == (num_threads - 1)) ? 15 : first_gemm + num_gemms_per_thread - 1;
+ const size_t first_gemm = window.x().start();
+ const size_t last_gemm = window.x().end();
_convolver->_pimpl->convolver.execute(first_gemm, last_gemm);
}
} // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEWinogradLayer.cpp b/src/runtime/NEON/functions/NEWinogradLayer.cpp
index 3251de4ae4..800153e8b1 100644
--- a/src/runtime/NEON/functions/NEWinogradLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradLayer.cpp
@@ -43,7 +43,8 @@ inline Tensor4DShape internal_get_input_shape(const arm_compute::ITensor *input)
namespace arm_compute
{
NEWinogradLayer::NEWinogradLayer(std::shared_ptr<IMemoryManager> memory_manager)
- : _memory_group(std::move(memory_manager)), _winograd_kernel(), _weights_workspace(), _workspace(), _kernel_storage(), _input(), _weights(), _output(), _reshaped_kernel(false), _conv()
+ : _memory_group(std::move(memory_manager)), _winograd_kernel(), _permute_input(), _permute_weights(), _permute_output(), _workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(),
+ _weights_hwio(), _input(), _weights(), _output(), _reshaped_kernel(false), _conv()
{
} /* arm_compute */
@@ -71,9 +72,8 @@ void NEWinogradLayer::configure(const ITensor *input, const ITensor *weights, co
ARM_COMPUTE_ERROR_ON_MSG(stride_y != 1 || stride_x != 1, "Winograd layer only supports unit strides.");
// Get convolved dimensions
- auto padding = PADDING_VALID;
- const int in_channels = input->info()->dimension(2);
-
+ auto padding = PADDING_VALID;
+ const int in_channels = input->info()->dimension(2);
const int out_channels = output->info()->dimension(2);
const int weights_width = weights->info()->dimension(0);
const int weights_height = weights->info()->dimension(1);
@@ -88,25 +88,45 @@ void NEWinogradLayer::configure(const ITensor *input, const ITensor *weights, co
_memory_group.manage(&_kernel_storage);
// Get workbench size and allocate memory
+
constexpr size_t wspace_alignment = 64;
const size_t ws_size = NEWinogradLayerKernel::get_working_space_size(in_shape, kernel_shape, padding);
_workspace.allocator()->init(TensorInfo(TensorShape{ (ws_size + wspace_alignment - 1) }, 1, DataType::U8));
_memory_group.manage(&_workspace);
-
- // Workspace for weights transform
- const size_t weights_transform_size = NEWinogradLayerKernel::get_kernel_transform_working_size(kernel_shape);
- _weights_workspace.allocator()->init(TensorInfo(TensorShape{ (weights_transform_size + wspace_alignment - 1) }, 1, DataType::U8));
- _memory_group.manage(&_weights_workspace);
-
+ _memory_group.manage(&_input_nhwc);
_kernel_storage.allocator()->allocate();
_workspace.allocator()->allocate();
- _weights_workspace.allocator()->allocate();
// Create Winograd operator object
_conv = support::cpp14::make_unique<Winograd3x3F32>(kernel_shape, in_shape, padding, _kernel_storage.buffer());
// Configure the kernel, padding not needed so it's safe to call configure after allocare
- _winograd_kernel.configure(output, _conv.get());
+ _winograd_kernel.configure(_conv.get());
+
+ // Re-order a weight tensor from [Output feature map x Input feature map x Height x Width] to [Height x Width x Input feature map x Output feature map]
+ switch(weights->info()->num_dimensions())
+ {
+ case 3:
+ {
+ _permute_weights.configure(weights, &_weights_hwio, PermutationVector(2U, 0U, 1U));
+ break;
+ }
+ case 4:
+ {
+ _permute_weights.configure(weights, &_weights_hwio, PermutationVector(3U, 2U, 0U, 1U));
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Not supported.");
+ break;
+ }
+ }
+ // configure the kernel to transform the input tensor from NCHW -> NHWC
+ _permute_input.configure(input, &_input_nhwc, PermutationVector(2U, 0U, 1U));
+
+ _weights_hwio.allocator()->allocate();
+ _input_nhwc.allocator()->allocate();
}
void NEWinogradLayer::run()
@@ -115,29 +135,42 @@ void NEWinogradLayer::run()
_memory_group.acquire();
if(!_reshaped_kernel)
{
- _conv->transform_weights(reinterpret_cast<const float *>(_weights->buffer()), reinterpret_cast<float *>(_weights_workspace.buffer()));
_reshaped_kernel = true;
+ _permute_weights.run();
+ _conv->transform_weights(reinterpret_cast<const float *>(_weights_hwio.buffer()), nullptr);
}
const Tensor4DShape in_shape(internal_get_input_shape(_input));
auto padding = PADDING_VALID;
//Bring channels to the front as Winograd code expects the tensor to be in the format NHWC
- _conv->nchw2nhwc(in_shape, padding, _workspace.buffer(), reinterpret_cast<const float *>(_input->buffer()));
+ _permute_input.run();
//Get ptrs into the workspace
std::pair<void *, void *> nhwc_ptrs = _conv->get_nhwc_ptrs(in_shape, padding, _workspace.buffer());
//Setup matrices ptrs and transfor the input tensor to the appropriate form before running GEMM.
- _conv->reshape_input(in_shape, padding, nhwc_ptrs.second, _workspace.buffer());
+ _conv->reshape_input(in_shape, padding, reinterpret_cast<float *>(_input_nhwc.buffer()), _workspace.buffer());
//Run 16 GEMMs in multiple threads, each kernel runs one or more GEMMs
- NEScheduler::get().schedule(&_winograd_kernel, Window::DimY);
+ NEScheduler::get().schedule(&_winograd_kernel, Window::DimX);
//Transform the output to the appropriate form
_conv->reshape_output(in_shape, padding, nhwc_ptrs.first);
- //Transform back to NCHW
- _conv->nhwc2nchw(in_shape, padding, _workspace.buffer(), reinterpret_cast<float *>(_output->buffer()));
+ const unsigned int out_width = _output->info()->dimension(0);
+ const unsigned int out_height = _output->info()->dimension(1);
+ const unsigned int out_channels = _output->info()->dimension(2);
+ const unsigned int out_batches = _output->info()->dimension(3);
+
+ // We create a temporary tensor with the results in the workspace so that the we can run a function to reorder from NHWC -> NCHW
+ Tensor output_nhwc;
+ TensorInfo info(TensorShape(out_channels, out_width, out_height, out_batches), 1, _output->info()->data_type());
+ output_nhwc.allocator()->init(info);
+ output_nhwc.allocator()->import_memory(Memory(static_cast<uint8_t *>(nhwc_ptrs.first)));
+
+ // Reorder the convoluted output to ACL's ordering NCHW
+ _permute_output.configure(&output_nhwc, _output, PermutationVector(1U, 2U, 0U));
+ _permute_output.run();
_memory_group.release();
#else /* __aarch64__ */