From ca62c6f53eb7244e6fed9f7e932608aa2496d9eb Mon Sep 17 00:00:00 2001 From: Manuel Bottini Date: Tue, 23 Mar 2021 11:50:34 +0000 Subject: Mixed data-layout testing on high priority operators Change data layouts after the configure in validation tests for: - Scale - Pooling - FullyConnected - DepthwiseConvolution - DirectConvolution - FFTConvolution - WinogradConvolution - GEMMConvolution (Indirect GEMM included) Extending fixtures Fixes for new mixed data layout tests Resolves: COMPMID-4162 Change-Id: I2f2eb2075f7e24ab3872249d88cadb57b82c5dde Signed-off-by: Manuel Bottini Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5326 Comments-Addressed: Arm Jenkins Tested-by: Arm Jenkins Reviewed-by: Georgios Pinitas --- .../NEON/functions/NEWinogradConvolutionLayer.h | 1 + .../kernels/NEDirectConvolutionLayerKernel.cpp | 11 +- .../NEON/kernels/NEDirectConvolutionLayerKernel.h | 1 + src/core/cpu/kernels/pooling/neon/fp16.cpp | 4 +- src/core/cpu/kernels/pooling/neon/fp32.cpp | 4 +- src/core/cpu/kernels/pooling/neon/list.h | 4 +- src/core/cpu/kernels/pooling/neon/nchw/all.cpp | 2 +- src/core/cpu/kernels/scale/sve/qasymm8.cpp | 7 +- src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp | 7 +- .../CL/functions/CLDepthwiseConvolutionLayer.cpp | 6 +- .../NEON/functions/NEWinogradConvolutionLayer.cpp | 24 +- tests/datasets/ScaleValidationDataset.h | 19 +- tests/validation/CL/ConvolutionLayer.cpp | 55 ++++- tests/validation/CL/DepthwiseConvolutionLayer.cpp | 36 +++ tests/validation/CL/DirectConvolutionLayer.cpp | 94 +++++++- tests/validation/CL/FFT.cpp | 10 + tests/validation/CL/FullyConnectedLayer.cpp | 53 ++++- tests/validation/CL/PoolingLayer.cpp | 41 ++++ tests/validation/CL/Scale.cpp | 11 + tests/validation/CL/Winograd.cpp | 76 ++++++- tests/validation/NEON/ConvolutionLayer.cpp | 71 ++++++ .../validation/NEON/DepthwiseConvolutionLayer.cpp | 80 +++++-- tests/validation/NEON/DirectConvolutionLayer.cpp | 10 + tests/validation/NEON/FFT.cpp | 11 +- tests/validation/NEON/FullyConnectedLayer.cpp | 55 ++++- tests/validation/NEON/PoolingLayer.cpp | 53 ++++- tests/validation/NEON/Scale.cpp | 22 ++ .../validation/fixtures/ConvolutionLayerFixture.h | 38 +++- .../fixtures/DepthwiseConvolutionLayerFixture.h | 47 +++- .../fixtures/DirectConvolutionLayerFixture.h | 39 +++- tests/validation/fixtures/FFTFixture.h | 37 +++- .../fixtures/FullyConnectedLayerFixture.h | 39 +++- tests/validation/fixtures/PoolingLayerFixture.h | 40 +++- tests/validation/fixtures/ScaleFixture.h | 42 +++- .../fixtures/WinogradConvolutionLayerFixture.h | 241 ++++++++++----------- 35 files changed, 1007 insertions(+), 284 deletions(-) diff --git a/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h index befc373646..3367b10a96 100644 --- a/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h +++ b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h @@ -135,6 +135,7 @@ private: ITensor *_output; bool _is_prepared; bool _is_activationlayer_enabled; + DataLayout _data_layout; }; } // namespace arm_compute #endif /* ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H */ diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp index 09f99748bf..98b76c7db3 100644 --- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp +++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp @@ -1217,7 +1217,7 @@ void NEDirectConvolutionLayerKernel::convolve_nhwc(const Window &window) NEDirectConvolutionLayerKernel::NEDirectConvolutionLayerKernel() : _input(nullptr), _weights(nullptr), _output(nullptr), _conv_info(), _border_size(0), _kernel_size(0), _num_weight_elems_read_per_row(0), _num_elems_read_per_iteration(0), - _num_elems_written_per_iteration(0) + _num_elems_written_per_iteration(0), _data_layout() { } @@ -1234,13 +1234,14 @@ void NEDirectConvolutionLayerKernel::configure(const ITensor *input, const ITens _weights = weights; _output = output; _conv_info = conv_info; - _kernel_size = weights->info()->dimension(get_data_layout_dimension_index(weights->info()->data_layout(), DataLayoutDimension::WIDTH)); + _data_layout = _input->info()->data_layout(); + _kernel_size = weights->info()->dimension(get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH)); const unsigned int conv_pad_left = conv_info.pad_left(); const unsigned int conv_pad_top = conv_info.pad_top(); const unsigned int conv_pad_right = conv_info.pad_right(); const unsigned int conv_pad_bottom = conv_info.pad_bottom(); - if(_input->info()->data_layout() == DataLayout::NCHW) + if(_data_layout == DataLayout::NCHW) { _border_size = BorderSize(conv_pad_top, conv_pad_right, conv_pad_bottom, conv_pad_left); } @@ -1294,9 +1295,9 @@ void NEDirectConvolutionLayerKernel::run(const Window &window, const ThreadInfo ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window); ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr); - const int kernel_size = _weights->info()->dimension(get_data_layout_dimension_index(_weights->info()->data_layout(), DataLayoutDimension::WIDTH)); + const int kernel_size = _weights->info()->dimension(get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH)); - if(_input->info()->data_layout() == DataLayout::NCHW) + if(_data_layout == DataLayout::NCHW) { switch(kernel_size) { diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h index 258def77a3..259eb683f6 100644 --- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h +++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h @@ -103,6 +103,7 @@ private: unsigned int _num_weight_elems_read_per_row; unsigned int _num_elems_read_per_iteration; unsigned int _num_elems_written_per_iteration; + DataLayout _data_layout; }; } // namespace arm_compute #endif /*ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYERKERNEL_H */ diff --git a/src/core/cpu/kernels/pooling/neon/fp16.cpp b/src/core/cpu/kernels/pooling/neon/fp16.cpp index 314be3704e..1ecceafe86 100644 --- a/src/core/cpu/kernels/pooling/neon/fp16.cpp +++ b/src/core/cpu/kernels/pooling/neon/fp16.cpp @@ -93,7 +93,7 @@ void pooling2_f16_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *ds // Store result vst1q_f16(reinterpret_cast(out.ptr()) + x_off, vres); - const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y); + const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NHWC); const uint32_t offset_x0 = (uint32_t)offset_base / sizeof(float16_t) + x_off; const uint32_t offset_x1 = (uint32_t)offset_x0 + in_stride_y / sizeof(float16_t) - pad_right; const uint32_t offset_x2 = (uint32_t)offset_x0 + in_stride_z / sizeof(float16_t) - pad_right * src->info()->tensor_shape()[1]; @@ -132,7 +132,7 @@ void pooling2_f16_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *ds // Store result *(reinterpret_cast(out.ptr()) + x_off) = res; - const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y); + const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NHWC); const uint32_t offset_x0 = (uint32_t)offset_base / sizeof(float16_t) + x_off; const uint32_t offset_x1 = (uint32_t)offset_x0 + in_stride_y / sizeof(float16_t) - pad_right; const uint32_t offset_x2 = (uint32_t)offset_x0 + in_stride_z / sizeof(float16_t) - pad_right * src->info()->tensor_shape()[1]; diff --git a/src/core/cpu/kernels/pooling/neon/fp32.cpp b/src/core/cpu/kernels/pooling/neon/fp32.cpp index e319047d76..a2bd4a6bb3 100644 --- a/src/core/cpu/kernels/pooling/neon/fp32.cpp +++ b/src/core/cpu/kernels/pooling/neon/fp32.cpp @@ -95,7 +95,7 @@ void pooling2_f32_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *ds // Store result vst1q_f32(reinterpret_cast(out.ptr()) + x_off, vres); - const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y); + const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NHWC); const uint32_t offset_x0 = (uint32_t)offset_base / sizeof(float) + x_off; const uint32_t offset_x1 = (uint32_t)offset_x0 + in_stride_y / sizeof(float) - pad_right; const uint32_t offset_x2 = (uint32_t)offset_x0 + in_stride_z / sizeof(float) - pad_right * src->info()->tensor_shape()[1]; @@ -124,7 +124,7 @@ void pooling2_f32_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *ds // Store result *(reinterpret_cast(out.ptr()) + x_off) = res; - const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y); + const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NHWC); const uint32_t offset_x0 = (uint32_t)offset_base / sizeof(float) + x_off; const uint32_t offset_x1 = (uint32_t)offset_x0 + in_stride_y / sizeof(float) - pad_right; const uint32_t offset_x2 = (uint32_t)offset_x0 + in_stride_z / sizeof(float) - pad_right * src->info()->tensor_shape()[1]; diff --git a/src/core/cpu/kernels/pooling/neon/list.h b/src/core/cpu/kernels/pooling/neon/list.h index 3435ee6724..bec1536f61 100644 --- a/src/core/cpu/kernels/pooling/neon/list.h +++ b/src/core/cpu/kernels/pooling/neon/list.h @@ -59,7 +59,7 @@ DECLARE_POOLING_KERNEL(poolingMxN_fp32_neon_nchw); #undef DECLARE_POOLING_KERNEL template -inline uint32_t offset_no_padding(uint32_t padded_offset, const Coordinates &id, const ITensorInfo &info, int pool_stride_x, int pool_stride_y) +inline uint32_t offset_no_padding(uint32_t padded_offset, const Coordinates &id, const ITensorInfo &info, int pool_stride_x, int pool_stride_y, DataLayout data_layout) { const int pad_left = info.padding().left; const int pad_right = info.padding().right; @@ -70,7 +70,7 @@ inline uint32_t offset_no_padding(uint32_t padded_offset, const Coordinates &id, const int pad_horiz = pad_left + pad_right; const int pad_vert = pad_top + pad_bottom; - if(info.data_layout() == DataLayout::NCHW) + if(data_layout == DataLayout::NCHW) { const uint32_t offset_base = padded_offset - sizeof(T) * pad_horiz * id.y() * pool_stride_y /* subtract padding elems per row */ diff --git a/src/core/cpu/kernels/pooling/neon/nchw/all.cpp b/src/core/cpu/kernels/pooling/neon/nchw/all.cpp index 47ac7b4f7f..80eac684aa 100644 --- a/src/core/cpu/kernels/pooling/neon/nchw/all.cpp +++ b/src/core/cpu/kernels/pooling/neon/nchw/all.cpp @@ -150,7 +150,7 @@ void pooling2_nchw_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *d *(reinterpret_cast(out.ptr())) = static_cast(vget_lane_f32(max_data, 0)); // Calculate max data indice, which will be used in max unpool. - const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y); + const uint32_t offset_base = offset_no_padding(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NCHW); const uint32_t offset_top = (uint32_t)(offset_base / sizeof(T)); const uint32_t offset_bottom = offset_top + in_stride_y / sizeof(T) - pad_right - pad_left; const uint32x2_t voffset_top = { offset_top, offset_top + 1u }; diff --git a/src/core/cpu/kernels/scale/sve/qasymm8.cpp b/src/core/cpu/kernels/scale/sve/qasymm8.cpp index c475ad615c..c041f14b22 100644 --- a/src/core/cpu/kernels/scale/sve/qasymm8.cpp +++ b/src/core/cpu/kernels/scale/sve/qasymm8.cpp @@ -89,10 +89,9 @@ void qasymm8_sve_scale_bilinear(const ITensor *src, ITensor *dst, const ITensor BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window) { - // Get data layout and width/height indices - const DataLayout data_layout = src->info()->data_layout(); - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + // Data layout is NHWC + const int idx_width = 1; + const int idx_height = 2; // Compute the ratio between source height and destination height const auto hr = scale_utils::calculate_resize_ratio(src->info()->dimension(idx_height), dst->info()->dimension(idx_height), align_corners); diff --git a/src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp b/src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp index b39b75abba..9df4301fe3 100644 --- a/src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp +++ b/src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp @@ -89,10 +89,9 @@ void qasymm8_signed_sve_scale_bilinear(const ITensor *src, ITensor *dst, const I BorderMode border_mode, PixelValue constant_border_value, float sampling_offset, bool align_corners, const Window &window) { - // Get data layout and width/height indices - const DataLayout data_layout = src->info()->data_layout(); - const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); + // Data layout is NHWC + const int idx_width = 1; + const int idx_height = 2; // Compute the ratio between source height and destination height const auto hr = scale_utils::calculate_resize_ratio(src->info()->dimension(idx_height), dst->info()->dimension(idx_height), align_corners); diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp index 8d2c81bc15..5ed8aa98c9 100644 --- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp +++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -324,7 +324,7 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::prepare() { _output_multipliers.map(); _output_shifts.map(); - const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL); + const unsigned int idx_ofms = _needs_permute ? 2 : 0; quantization::compute_quantized_multipliers_and_shifts(_input->info(), _original_weights->info(), _output->info(), @@ -529,7 +529,7 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::prepar { _output_multipliers.map(); _output_shifts.map(); - const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL); + const unsigned int idx_ofms = _needs_permute ? 2 : 0; quantization::compute_quantized_multipliers_and_shifts(_input->info(), _original_weights->info(), _output->info(), diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp index dc3bbbe562..941cb21e5e 100644 --- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp +++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp @@ -303,7 +303,7 @@ arm_gemm::Activation arm_gemm_activation_from_acl_activation(const ActivationLay NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(const std::shared_ptr &memory_manager) : _memory_group(memory_manager), _gemm_function(memory_manager), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr), _activationlayer_function(), _permute_input(), _permute_weights(), _permute_output(), _input_transformed(), _output_transformed(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(), - _weights_hwio(), _input(), _weights(), _output(), _is_prepared(false), _is_activationlayer_enabled(false) + _weights_hwio(), _input(), _weights(), _output(), _is_prepared(false), _is_activationlayer_enabled(false), _data_layout() { } @@ -314,10 +314,10 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor * ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info)); // Get indices for the width and height - const DataLayout data_layout = input->info()->data_layout(); - const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH); - const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT); - const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL); + _data_layout = input->info()->data_layout(); + const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH); + const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT); + const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL); const Size2D input_dims = Size2D(input->info()->dimension(width_idx), input->info()->dimension(height_idx)); const Size2D kernel_size = Size2D(weights->info()->dimension(width_idx), weights->info()->dimension(height_idx)); @@ -537,7 +537,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor * const unsigned int max_num_threads = NEScheduler::get().num_threads(); // Configure the kernel to transform the input tensor from NCHW -> NHWC - if(data_layout == DataLayout::NCHW) + if(_data_layout == DataLayout::NCHW) { _memory_group.manage(&_input_nhwc); _permute_input.configure(input, &_input_nhwc, PermutationVector(2U, 0U, 1U)); @@ -554,7 +554,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor * TensorInfo input_workspace_info(TensorShape(input_workspace_size), 1, _input->info()->data_type()); _input_workspace.allocator()->init(input_workspace_info); _input_workspace.allocator()->allocate(); - if(data_layout == DataLayout::NCHW) + if(_data_layout == DataLayout::NCHW) { _input_nhwc.allocator()->allocate(); } @@ -570,7 +570,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor * // Configure output transform function // The biases tensor has not been allocated at this point in time, the output transform will add the biases to the final result in the run() method - if(data_layout == DataLayout::NCHW) + if(_data_layout == DataLayout::NCHW) { _memory_group.manage(&_output_nhwc); output_to_use = &_output_nhwc; @@ -595,7 +595,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor * _output_transformed.allocator()->allocate(); // Reorder the convoluted output to ACL's ordering NCHW - if(data_layout == DataLayout::NCHW) + if(_data_layout == DataLayout::NCHW) { _permute_output.configure(&_output_nhwc, _output, PermutationVector(1U, 2U, 0U)); _output_nhwc.allocator()->allocate(); @@ -615,13 +615,11 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor * void NEWinogradConvolutionLayer::run() { - const DataLayout data_layout = _input->info()->data_layout(); - prepare(); MemoryGroupResourceScope scope_mg(_memory_group); - if(data_layout == DataLayout::NCHW) + if(_data_layout == DataLayout::NCHW) { //Bring channels to the front as Winograd code expects the tensor to be in the format NHWC _permute_input.run(); @@ -636,7 +634,7 @@ void NEWinogradConvolutionLayer::run() // Transform output tensor to the spatial domain NEScheduler::get().schedule(_transform_output_kernel.get(), Window::DimX); - if(data_layout == DataLayout::NCHW) + if(_data_layout == DataLayout::NCHW) { // Reorder the convoluted output to ACL's ordering NCHW _permute_output.run(); diff --git a/tests/datasets/ScaleValidationDataset.h b/tests/datasets/ScaleValidationDataset.h index 881be0fc26..c0073f93f5 100644 --- a/tests/datasets/ScaleValidationDataset.h +++ b/tests/datasets/ScaleValidationDataset.h @@ -145,12 +145,10 @@ framework::dataset::make("AlignCorners", { true })); * - 3D shapes with 0, 1 vector iterations * - 4D shapes with 0 vector iterations */ -#define SCALE_SHAPE_DATASET(element_per_iteration) \ - concat(concat(concat(concat(concat(ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 0>(), \ - ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 1>()), \ - ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 2>()), \ - ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 0>()), \ - ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 1>()), \ +#define SCALE_SHAPE_DATASET(element_per_iteration) \ + concat(concat(concat(ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 0>(), \ + ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 2>()), \ + ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 1>()), \ ScaleShapesBaseDataSet<3, 3, (element_per_iteration), 0>()) // To prevent long precommit time for OpenCL, shape set for OpenCL is separated into below two parts. @@ -166,11 +164,10 @@ framework::dataset::make("AlignCorners", { true })); * - 3D shapes with 0 vector iterations (1 vector iteration is covered by SCALE_PRECOMMIT_SHAPE_DATASET) * - 4D shapes with 0 vector iterations */ -#define SCALE_NIGHTLY_SHAPE_DATASET(element_per_iteration) \ - concat(concat(concat(concat(ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 0>(), \ - ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 1>()), \ - ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 2>()), \ - ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 0>()), \ +#define SCALE_NIGHTLY_SHAPE_DATASET(element_per_iteration) \ + concat(concat(concat(ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 0>(), \ + ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 1>()), \ + ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 0>()), \ ScaleShapesBaseDataSet<3, 3, (element_per_iteration), 0>()) /** Generating dataset for non-quantized data tyeps with the given shapes */ diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp index b66cfd97e7..31eed7646c 100644 --- a/tests/validation/CL/ConvolutionLayer.cpp +++ b/tests/validation/CL/ConvolutionLayer.cpp @@ -186,6 +186,8 @@ TEST_SUITE_END() // ConvolutionLayer TEST_SUITE(GEMMConvolutionLayer) template using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture; +template +using CLGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP16) @@ -214,12 +216,30 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture, framework // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType",DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float template using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture; template +using CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture; +template using CLGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture; const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo", @@ -267,9 +287,25 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + QuantizationData), + QuantizedActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) - FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(), framework::dataset::make("ReshapeWeights", { true })), @@ -281,6 +317,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture, // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + QuantizationData), + QuantizedActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM8_PER_CHANNEL) diff --git a/tests/validation/CL/DepthwiseConvolutionLayer.cpp b/tests/validation/CL/DepthwiseConvolutionLayer.cpp index b2009c26ad..c88f7c1624 100644 --- a/tests/validation/CL/DepthwiseConvolutionLayer.cpp +++ b/tests/validation/CL/DepthwiseConvolutionLayer.cpp @@ -154,6 +154,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi template using CLDepthwiseConvolutionLayerFixture = DepthwiseConvolutionLayerValidationFixture; +template +using CLDepthwiseConvolutionLayerMixedDataLayoutFixture = DepthwiseConvolutionLayerValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP16) @@ -347,6 +349,16 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerFixture, { validate(CLAccessor(_target), _reference, tolerance_f32); } +FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, CLDepthwiseConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(), + framework::dataset::make("DepthMultiplier", { 2 })), + framework::dataset::make("DataType", + DataType::F32)), + framework::dataset::make("DataLayout", DataLayout::NHWC)), + framework::dataset::make("ActivationInfo", ActivationLayerInfo()))) +{ + validate(CLAccessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(), large_depth_multipliers), framework::dataset::make("DataType", @@ -430,6 +442,8 @@ TEST_SUITE_END() // Float template using CLDepthwiseConvolutionLayerQuantizedFixture = DepthwiseConvolutionLayerValidationQuantizedFixture; template +using CLDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture = DepthwiseConvolutionLayerValidationQuantizedFixture; +template using CLDepthwiseConvolutionLayerQuantizedPerChannelFixture = DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture; TEST_SUITE(Quantized) @@ -517,6 +531,17 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture { validate(CLAccessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, CLDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(), + framework::dataset::make("DepthMultiplier", { 2 })), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10), QuantizationInfo(2.2f, 10) })), + framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("ActivationInfo", ActivationLayerInfo()))) +{ + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(), large_depth_multipliers), @@ -545,6 +570,17 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture { validate(CLAccessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, CLDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(), + framework::dataset::make("DepthMultiplier", { 2 })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.3f, 10), QuantizationInfo(2.2f, 10) })), + framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 4) })), + framework::dataset::make("DataLayout", { DataLayout::NCHW })), + framework::dataset::make("ActivationInfo", ActivationLayerInfo()))) +{ + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE(Dilation) FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(), diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp index c01234020f..946de7f943 100644 --- a/tests/validation/CL/DirectConvolutionLayer.cpp +++ b/tests/validation/CL/DirectConvolutionLayer.cpp @@ -207,10 +207,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( template using CLDirectConvolutionLayerFixture = DirectConvolutionValidationFixture; template +using CLDirectConvolutionLayerMixedDataLayoutFixture = DirectConvolutionValidationFixture; +template using CLDirectConvolutionValidationWithTensorShapesFixture = DirectConvolutionValidationWithTensorShapesFixture; template using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture; template +using CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture = DirectConvolutionValidationQuantizedFixture; +template using CLDirectConvolutionValidationWithTensorShapesQuantizedFixture = DirectConvolutionValidationWithTensorShapesQuantizedFixture; TEST_SUITE(NHWC) @@ -271,7 +275,24 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerFixture, framewo { validate(CLAccessor(_target), _reference, tolerance_fp32); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U), + TensorShape(9U, 5U, 6U, 4U), + TensorShape(3U, 5U, 7U, 2U), + TensorShape(32U, 37U, 3U) } ), + framework::dataset::make("StrideX", { 1 })), + framework::dataset::make("StrideY", { 2 })), + framework::dataset::make("PadX", { 1 })), + framework::dataset::make("PadY", { 3 })), + framework::dataset::make("KernelSize", { 3 })), + framework::dataset::make("NumKernels", { 3 })), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )), + framework::dataset::make("DataLayout", DataLayout::NHWC))) +{ + validate(CLAccessor(_target), _reference, tolerance_fp32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(zip(zip(zip(zip(zip(zip( framework::dataset::make("InputShape", { TensorShape(800U, 800U, 3U) } ), @@ -287,7 +308,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerFixture, framewo { validate(CLAccessor(_target), _reference, tolerance_fp32); } - TEST_SUITE_END() // FP32 TEST_SUITE(Quantized) @@ -311,7 +331,25 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U), + TensorShape(9U, 5U, 6U, 4U), + TensorShape(3U, 5U, 7U, 2U), + TensorShape(32U, 37U, 3U) } ), + framework::dataset::make("StrideX", { 1 })), + framework::dataset::make("StrideY", { 2 })), + framework::dataset::make("PadX", { 1 })), + framework::dataset::make("PadY", { 1 })), + framework::dataset::make("KernelSize", { 3 })), + framework::dataset::make("NumKernels", { 3 })), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("QuantizationInfo", QuantizationInfo(1.1f / 255, 10))), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )), + framework::dataset::make("DataLayout", DataLayout::NHWC))) +{ + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(zip(zip(zip(zip(zip(zip( framework::dataset::make("InputShape", { TensorShape(800U, 800U, 3U) } ), @@ -330,7 +368,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(zip(zip(zip(zip(zip(zip( @@ -351,7 +388,25 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(zip(zip(zip(zip(zip(zip( + framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U), + TensorShape(9U, 5U, 6U, 4U), + TensorShape(3U, 5U, 7U, 2U), + TensorShape(32U, 37U, 3U) } ), + framework::dataset::make("StrideX", { 1 })), + framework::dataset::make("StrideY", { 1 })), + framework::dataset::make("PadX", { 1 })), + framework::dataset::make("PadY", { 1 })), + framework::dataset::make("KernelSize", { 3 })), + framework::dataset::make("NumKernels", { 3 })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255, 10))), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )), + framework::dataset::make("DataLayout", DataLayout::NHWC))) +{ + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(zip(zip(zip(zip(zip(zip( framework::dataset::make("InputShape", { TensorShape(800U, 800U, 3U) } ), @@ -401,6 +456,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerFixture, framewo { validate(CLAccessor(_target), _reference, tolerance_fp32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit, framework::dataset::make("DataType", + DataType::F32)), + ActivationFunctionsDataset), + framework::dataset::make("DataLayout", { DataLayout::NCHW }))) +{ + validate(CLAccessor(_target), _reference, tolerance_fp32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_nightly, framework::dataset::make("DataType", DataType::F32)), ActivationFunctionsDataset), framework::dataset::make("DataLayout", { DataLayout::NCHW }))) @@ -428,9 +490,17 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ }); TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data_precommit, + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10)})), + QuantizedActivationFunctionsDataset), + framework::dataset::make("DataLayout", { DataLayout::NCHW }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data_precommit, - framework::dataset::make("DataType", - DataType::QASYMM8)), + framework::dataset::make("DataType", DataType::QASYMM8)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, 10) })), QuantizedActivationFunctionsDataset), framework::dataset::make("DataLayout", { DataLayout::NCHW }))) @@ -494,7 +564,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(data_precommit, framework::dataset::make("DataType", + DataType::QASYMM8_SIGNED)), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.1f, -10) })), + QuantizedActivationFunctionsDataset), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} FIXTURE_DATA_TEST_CASE(RunSmall9x9, CLDirectConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, combine(combine(combine(combine(data_precommit_9x9, framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), diff --git a/tests/validation/CL/FFT.cpp b/tests/validation/CL/FFT.cpp index fb2f1f53e2..99a83abe5c 100644 --- a/tests/validation/CL/FFT.cpp +++ b/tests/validation/CL/FFT.cpp @@ -175,6 +175,8 @@ TEST_SUITE(FFTConvolutionLayer) template using CLFFTConvolutionLayerFixture = FFTConvolutionValidationFixture; +template +using CLFFTConvolutionLayerMixedDataLayoutFixture = FFTConvolutionValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP32) @@ -186,6 +188,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture, framework: // Validate output validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLFFTConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsSmallDataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num_f32); +} TEST_SUITE_END() // FP32 TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(), diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp index 78195a556b..066f0b689e 100644 --- a/tests/validation/CL/FullyConnectedLayer.cpp +++ b/tests/validation/CL/FullyConnectedLayer.cpp @@ -138,6 +138,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( template using CLFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture; +template +using CLFullyConnectedLayerMixedDataLayoutFixture = FullyConnectedLayerValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP16) @@ -167,6 +169,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerFixture, framework: // Validate output validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLFullyConnectedLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(9U, 5U, 7U)), + framework::dataset::make("Weights", TensorShape(315U, 271U))), + framework::dataset::make("Biases", TensorShape(271U))), + framework::dataset::make("Output", TensorShape(271U))), + FullyConnectedParameters), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)))) +{ + // Validate output + validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLFullyConnectedLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeFullyConnectedLayerDataset(), FullyConnectedParameters), framework::dataset::make("DataType", DataType::F32)), ActivationFunctionsDataset)) @@ -179,6 +193,8 @@ TEST_SUITE_END() template using CLFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture; +template +using CLFullyConnectedLayerQuantizedMixedDataLayoutFixture = FullyConnectedLayerValidationQuantizedFixture; TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) @@ -189,6 +205,20 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerQuantizedFixture, // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLFullyConnectedLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(9U, 5U, 7U)), + framework::dataset::make("Weights", TensorShape(315U, 271U))), + framework::dataset::make("Biases", TensorShape(271U))), + framework::dataset::make("Output", TensorShape(271U))), + FullyConnectedParameters), + framework::dataset::make("DataType", DataType::QASYMM8)), + QuantizationData), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLFullyConnectedLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeFullyConnectedLayerDataset(), FullyConnectedParameters), framework::dataset::make("DataType", DataType::QASYMM8)), QuantizationData), ActivationFunctionsQuantizedDataset)) @@ -205,11 +235,24 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerQuantizedFixture, // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } -TEST_SUITE_END() /* QASYMM8_SIGNED */ -TEST_SUITE_END() /* Quantized */ - -TEST_SUITE_END() -TEST_SUITE_END() +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLFullyConnectedLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(9U, 5U, 7U)), + framework::dataset::make("Weights", TensorShape(315U, 271U))), + framework::dataset::make("Biases", TensorShape(271U))), + framework::dataset::make("Output", TensorShape(271U))), + FullyConnectedParameters), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + QuantizationData), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} +TEST_SUITE_END() // QASYMM8_SIGNED +TEST_SUITE_END() // Quantized +TEST_SUITE_END() // FullyConnectedLayer +TEST_SUITE_END() // CL } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp index de5c9f2e8d..f42c187f8f 100644 --- a/tests/validation/CL/PoolingLayer.cpp +++ b/tests/validation/CL/PoolingLayer.cpp @@ -131,6 +131,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( template using CLPoolingLayerFixture = PoolingLayerValidationFixture; +template +using CLPoolingLayerMixedDataLayoutFixture = PoolingLayerValidationFixture; template using CLSpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture; @@ -156,6 +158,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture, framework::Datase // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), + combine(combine(combine(combine(datasets::PoolingTypes(), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) })), + framework::dataset::make("ExcludePadding", { false })), + framework::dataset::make("DataType", DataType::F32))), + pool_data_layout_dataset)) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP, framework::dataset::make("DataType", DataType::F32))), @@ -210,6 +223,8 @@ TEST_SUITE(Quantized) template using CLPoolingLayerQuantizedFixture = PoolingLayerValidationQuantizedFixture; +template +using CLPoolingLayerQuantizedMixedDataLayoutFixture = PoolingLayerValidationQuantizedFixture; TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), @@ -222,6 +237,19 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture, framew // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), + combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })), + framework::dataset::make("ExcludePadding", { true })), + framework::dataset::make("DataType", DataType::QASYMM8))), + framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })), + framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10) })), + framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) @@ -235,6 +263,19 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture, framewo // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8_s); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), + combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })), + framework::dataset::make("ExcludePadding", { true })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))), + framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })), + framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })), + framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_qasymm8_s); +} TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE_END() // Quantized TEST_SUITE_END() // PoolingLayer diff --git a/tests/validation/CL/Scale.cpp b/tests/validation/CL/Scale.cpp index 523b49deb7..2b34f1f353 100644 --- a/tests/validation/CL/Scale.cpp +++ b/tests/validation/CL/Scale.cpp @@ -210,6 +210,8 @@ TEST_SUITE_END() // Validate template using CLScaleFixture = ScaleValidationFixture; +template +using CLScaleMixedDataLayoutFixture = ScaleValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP32) @@ -223,6 +225,15 @@ FIXTURE_DATA_TEST_CASE(Run, CLScaleFixture, framework::DatasetMode::ALL, // Validate output validate(CLAccessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32, tolerance_f32_absolute); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLScaleMixedDataLayoutFixture, framework::DatasetMode::ALL, ASSEMBLE_DATASET(f32_shape, ScaleSamplingPolicySet)) +{ + //Create valid region + TensorInfo src_info(_shape, 1, _data_type); + const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); + + // Validate output + validate(CLAccessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32, tolerance_f32_absolute); +} FIXTURE_DATA_TEST_CASE(RunAlignCorners, CLScaleFixture, framework::DatasetMode::ALL, ASSEMBLE_DATASET(f32_shape, ScaleAlignCornersSamplingPolicySet)) { //Create valid region diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp index 7a987ba65f..7ccc850be5 100644 --- a/tests/validation/CL/Winograd.cpp +++ b/tests/validation/CL/Winograd.cpp @@ -228,6 +228,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( } using CLWinogradInputTransformFixtureFP32 = WinogradInputTransformValidationFixture; +using CLWinogradInputTransformMixedDataLayoutFixtureFP32 = WinogradInputTransformValidationFixture; using CLWinogradInputTransformFixtureFP16 = WinogradInputTransformValidationFixture; TEST_SUITE(NCHW) @@ -238,7 +239,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixtureFP32, framework: { validate(CLAccessor(_target), _reference, tolerance_f32); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradInputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT, combine(combine( + datasets::SmallWinogradInputTransformDataset2x2_3x3(), + framework::dataset::make("DataLayout", { DataLayout::NCHW })), + framework::dataset::make("DataType", { DataType::F32 }))) +{ + validate(CLAccessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNCHW, framework::dataset::make("DataLayout", { DataLayout::NCHW })), framework::dataset::make("DataType", { DataType::F32 }))) @@ -287,7 +294,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixtureFP32, framework: { validate(CLAccessor(_target), _reference, tolerance_f32); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradInputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT, combine(combine( + datasets::SmallWinogradInputTransformDataset4x4_3x3(), + framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("DataType", { DataType::F32 }))) +{ + validate(CLAccessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNHWC_FP32, framework::dataset::make("DataLayout", { DataLayout::NHWC })), framework::dataset::make("DataType", { DataType::F32 }))) @@ -335,6 +348,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( using CLWinogradFilterTransform = CLSynthetizeFunctionWithZeroConstantBorder; using CLWinogradFilterTransformFixtureFP32 = WinogradFilterTransformValidationFixture; +using CLWinogradFilterTransformMixedDataLayoutFixtureFP32 = WinogradFilterTransformValidationFixture; using CLWinogradFilterTransformFixtureFP16 = WinogradFilterTransformValidationFixture; TEST_SUITE(NCHW) @@ -347,7 +361,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixtureFP32, framework // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradFilterTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(datasets::Small3x3Shapes(), + framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })), + framework::dataset::make("DataLayout", { DataLayout::NCHW })), + framework::dataset::make("DataType", { DataType::F32 }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradFilterTransformDatasetNCHW, framework::dataset::make("DataLayout", { DataLayout::NCHW })), @@ -407,7 +429,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixtureFP32, framework // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradFilterTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(datasets::Small3x3Shapes(), + framework::dataset::make("OutputTile", { Size2D(4U, 4U) })), + framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("DataType", { DataType::F32 }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradFilterTransformDatasetNHWC_F32, framework::dataset::make("DataLayout", { DataLayout::NHWC })), @@ -474,6 +504,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( using CLWinogradOutputTransform = CLSynthetizeFunctionWithZeroConstantBorder; using CLWinogradOutputTransformFixtureFP32 = WinogradOutputTransformValidationFixture; +using CLWinogradOutputTransformMixedDataLayoutFixtureFP32 = WinogradOutputTransformValidationFixture; using CLWinogradOutputTransformFixtureFP16 = WinogradOutputTransformValidationFixture; TEST_SUITE(NCHW) @@ -505,7 +536,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixtureFP32, framework // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradOutputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::ALL, + combine(combine(combine(framework::dataset::make("Input", TensorShape(13U, 6U, 16U)), + framework::dataset::make("WinogradInfo", WinogradInfo(Size2D(2U, 2U),Size2D(3U, 3U), Size2D(7U, 6U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW))), + framework::dataset::make("DataType", { DataType::F32 })), + framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) )) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradOutputTransformDatasetNCHW, framework::dataset::make("DataType", { DataType::F32 })), @@ -546,7 +585,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixtureFP32, framework // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradOutputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::ALL, + combine(combine(combine(framework::dataset::make("Input", TensorShape(1U, 4U, 64U)), + framework::dataset::make("WinogradInfo", WinogradInfo(Size2D(2U, 2U), Size2D(7U, 7U), Size2D(9U, 9U), PadStrideInfo(1, 1, 0, 0), DataLayout::NHWC))), + framework::dataset::make("DataType", { DataType::F32 })), + framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) )) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradOutputTransformDatasetNHWC_F32, framework::dataset::make("DataType", { DataType::F32 })), @@ -604,6 +651,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( TEST_SUITE(FP32) using CLWinogradConvolutionLayerFastMathFixture = WinogradConvolutionLayerFastMathValidationFixture; +using CLWinogradConvolutionLayerFastMathMixedDataLayoutFixture = WinogradConvolutionLayerFastMathValidationFixture; TEST_SUITE(Conv3x3) FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(), @@ -614,7 +662,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, fram // Validate output validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradConvolutionLayerFastMathMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(8U, 8U, 32U)), + framework::dataset::make("Weight", TensorShape(1U, 3U, 32U, 1U))), + framework::dataset::make("Bias", TensorShape(1U))), + framework::dataset::make("Output", TensorShape(8U, 6U, 1U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(1, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1U, 1U))), + framework::dataset::make("DataType", { DataType::F32 })), + ActivationFunctionsSmallDataset), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(), framework::dataset::make("DataType", { DataType::F32 })), diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp index 6b152c9b68..b435744cdc 100644 --- a/tests/validation/NEON/ConvolutionLayer.cpp +++ b/tests/validation/NEON/ConvolutionLayer.cpp @@ -150,6 +150,8 @@ TEST_SUITE_END() // ConvolutionLayer TEST_SUITE(WinogradLayer) template using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture; +template +using NEWinogradConvolutionLayerMixedDataLayoutFixture = WinogradConvolutionLayerFastMathValidationFixture; template using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture; @@ -166,6 +168,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture, frame // Validate output validate(Accessor(_target), _reference, abs_tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEWinogradConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(8U, 8U, 32U)), + framework::dataset::make("Weight", TensorShape(1U, 3U, 32U, 1U))), + framework::dataset::make("Bias", TensorShape(1U))), + framework::dataset::make("Output", TensorShape(8U, 6U, 1U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(1, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1U, 1U))), + framework::dataset::make("DataType", { DataType::F32 })), + ActivationFunctionsDataset), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(Accessor(_target), _reference, abs_tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(), framework::dataset::make("DataType", { DataType::F32 })), @@ -384,6 +401,8 @@ TEST_SUITE_END() // WinogradLayer TEST_SUITE(GEMMConvolutionLayer) template using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture; +template +using NEGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture; TEST_SUITE(Float) #if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) @@ -424,11 +443,29 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture, framework // Validate output validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32)); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsDataset)) +{ + // Validate output + validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32)); +} TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float template using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture; +template +using NEGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture; template using NEGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture; @@ -451,6 +488,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::QASYMM8)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), + QuantizedActivationFunctionsDataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE_END() // QASYMM8 TEST_SUITE(QASYMM8_SIGNED) @@ -464,6 +518,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture, // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture, framework::DatasetMode::ALL, + combine(combine(combine(combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(23U, 27U, 5U)), + framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))), + framework::dataset::make("Bias", TensorShape(2U))), + framework::dataset::make("Output", TensorShape(11U, 25U, 2U))), + framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))), + framework::dataset::make("Dilation", Size2D(1, 1))), + framework::dataset::make("ReshapeWeights", { true })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })), + QuantizedActivationFunctionsDataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE_END() // QASYMM8_SIGNED TEST_SUITE(QSYMM8_PER_CHANNEL) diff --git a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp index 6bb40be036..a9c4edf5dd 100644 --- a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp +++ b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp @@ -242,19 +242,28 @@ DATA_TEST_CASE(ValidateGeneric, framework::DatasetMode::ALL, zip(zip(zip(zip(zip // *INDENT-ON* template using NEDepthwiseConvolutionLayerFixture = DepthwiseConvolutionLayerValidationFixture; +template +using NEDepthwiseConvolutionLayerMixedDataLayoutFixture = DepthwiseConvolutionLayerValidationFixture; TEST_SUITE(Float) TEST_SUITE(F32) TEST_SUITE(Generic) FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(), depth_multipliers), - framework::dataset::make("DataType", - DataType::F32)), + framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), ActivationFunctionsDataset)) { validate(Accessor(_target), _reference, tolerance_f32); } +FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, NEDepthwiseConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(), + framework::dataset::make("DepthMultiplier", { 2 })), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("ActivationInfo", ActivationLayerInfo()))) +{ + validate(Accessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset(), large_depth_multipliers), framework::dataset::make("DataType", @@ -345,6 +354,15 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(), + framework::dataset::make("DepthMultiplier", 1)), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + framework::dataset::make("ActivationInfo", ActivationLayerInfo()))) +{ + validate(Accessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset5x5(), framework::dataset::make("DepthMultiplier", 1)), @@ -500,10 +518,10 @@ TEST_SUITE_END() // FP16 TEST_SUITE_END() // Float -template -using NEDepthwiseConvolutionLayerQuantizedFixtureOptimized = DepthwiseConvolutionLayerValidationQuantizedFixture; template using NEDepthwiseConvolutionLayerQuantizedFixture = DepthwiseConvolutionLayerValidationQuantizedFixture; +template +using NEDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture = DepthwiseConvolutionLayerValidationQuantizedFixture; using NEDepthwiseConvolutionLayerQuantizedSymmetricPerChannelFixture = DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture; TEST_SUITE(Quantized) @@ -520,7 +538,17 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture { validate(Accessor(_target), _reference, tolerance_qasymm8); } - +FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, NEDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(), + framework::dataset::make("DepthMultiplier", { 2 })), + framework::dataset::make("DataType", DataType::QASYMM8)), + input_qinfo_dataset), + framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 4) })), + framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("ActivationInfo", ActivationLayerInfo()))) +{ + validate(Accessor(_target), _reference, tolerance_qasymm8); +} TEST_SUITE(Dilation) FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(), @@ -547,7 +575,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture TEST_SUITE_END() // Dilation TEST_SUITE_END() // Generic TEST_SUITE(W3x3) -FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::PRECOMMIT, +FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(), depth_multipliers), framework::dataset::make("DataType", DataType::QASYMM8)), input_qinfo_dataset), @@ -557,7 +585,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture { validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::NIGHTLY, +FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(), large_depth_multipliers), framework::dataset::make("DataType", DataType::QASYMM8)), @@ -571,7 +599,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture TEST_SUITE(Dilation) -FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::PRECOMMIT, +FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(), depth_multipliers), framework::dataset::make("DataType", DataType::QASYMM8)), input_qinfo_dataset), @@ -581,7 +609,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture { validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::NIGHTLY, +FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(), large_depth_multipliers), framework::dataset::make("DataType", DataType::QASYMM8)), @@ -596,11 +624,10 @@ TEST_SUITE_END() // Dilation TEST_SUITE_END() // W3x3 TEST_SUITE(Optimized) -FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::PRECOMMIT, +FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(), framework::dataset::make("DepthMultiplier", 1)), - framework::dataset::make("DataType", - DataType::QASYMM8)), + framework::dataset::make("DataType", DataType::QASYMM8)), input_qinfo_dataset), framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })), framework::dataset::make("DataLayout", { DataLayout::NHWC })), @@ -608,7 +635,18 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixt { validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::PRECOMMIT, +FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout3x3, NEDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(), + framework::dataset::make("DepthMultiplier", 1)), + framework::dataset::make("DataType", DataType::QASYMM8)), + input_qinfo_dataset), + framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })), + framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("ActivationInfo", ActivationLayerInfo()))) +{ + validate(Accessor(_target), _reference, tolerance_qasymm8); +} +FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset5x5(), framework::dataset::make("DepthMultiplier", 1)), framework::dataset::make("DataType", @@ -620,7 +658,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixt { validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE_NEW(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::NIGHTLY, +FIXTURE_DATA_TEST_CASE_NEW(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeOptimizedDepthwiseConvolutionLayerDataset3x3(), framework::dataset::make("DepthMultiplier", 1)), framework::dataset::make("DataType", @@ -676,7 +714,7 @@ TEST_SUITE_END() // Dilation TEST_SUITE_END() // Generic TEST_SUITE(W3x3) -FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::PRECOMMIT, +FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(), depth_multipliers), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), input_qinfo_dataset), @@ -686,7 +724,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture { validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::NIGHTLY, +FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(), large_depth_multipliers), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), @@ -699,7 +737,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture } TEST_SUITE(Dilation) -FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::PRECOMMIT, +FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(), depth_multipliers), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), input_qinfo_dataset), @@ -709,7 +747,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture { validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::NIGHTLY, +FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(), large_depth_multipliers), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), @@ -724,7 +762,7 @@ TEST_SUITE_END() // Dilation TEST_SUITE_END() // W3x3 TEST_SUITE(Optimized) -FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::PRECOMMIT, +FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(), framework::dataset::make("DepthMultiplier", 1)), framework::dataset::make("DataType", @@ -736,7 +774,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixt { validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::PRECOMMIT, +FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset5x5(), framework::dataset::make("DepthMultiplier", 1)), framework::dataset::make("DataType", @@ -748,7 +786,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixt { validate(Accessor(_target), _reference, tolerance_qasymm8); } -FIXTURE_DATA_TEST_CASE_NEW(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized, framework::DatasetMode::NIGHTLY, +FIXTURE_DATA_TEST_CASE_NEW(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeOptimizedDepthwiseConvolutionLayerDataset3x3(), framework::dataset::make("DepthMultiplier", 1)), framework::dataset::make("DataType", diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp index ffffe7e3d5..c3d6e94bfc 100644 --- a/tests/validation/NEON/DirectConvolutionLayer.cpp +++ b/tests/validation/NEON/DirectConvolutionLayer.cpp @@ -278,6 +278,8 @@ DATA_TEST_CASE(NoPaddingNHWCKernel, framework::DatasetMode::ALL, combine(combine template using NEDirectConvolutionLayerFixture = DirectConvolutionValidationFixture; +template +using NEDirectConvolutionLayerMixedDataLayoutFixture = DirectConvolutionValidationFixture; TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC @@ -309,6 +311,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolutionLayerFixture, framewo // Validate output validate(Accessor(_target), _reference, tolerance_fp32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEDirectConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit, + framework::dataset::make("DataType", DataType::F32)), + ActivationFunctionsDataset), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC }))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_fp32); +} FIXTURE_DATA_TEST_CASE(RunSmall9x9, NEDirectConvolutionLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit9x9, framework::dataset::make("DataType", DataType::F32)), ActivationFunctionsDataset), diff --git a/tests/validation/NEON/FFT.cpp b/tests/validation/NEON/FFT.cpp index 7125158a21..f7ef0a314e 100644 --- a/tests/validation/NEON/FFT.cpp +++ b/tests/validation/NEON/FFT.cpp @@ -158,6 +158,8 @@ TEST_SUITE(FFTConvolutionLayer) template using NEFFTConvolutionLayerFixture = FFTConvolutionValidationFixture; +template +using NEFFTConvolutionLayerMixedDataLayoutFixture = FFTConvolutionValidationFixture; TEST_SUITE(Float) TEST_SUITE(FP32) @@ -169,10 +171,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFFTConvolutionLayerFixture, framework: // Validate output validate(Accessor(_target), _reference, tolerance_f32, tolerance_num); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFFTConvolutionLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), + ActivationFunctionsSmallDataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f32, tolerance_num); +} TEST_SUITE_END() // FP32 TEST_SUITE_END() // Float TEST_SUITE_END() // FFTConvolutionLayer - TEST_SUITE_END() // Neon } // namespace validation } // namespace test diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp index d8c2203802..4bb48bf42c 100644 --- a/tests/validation/NEON/FullyConnectedLayer.cpp +++ b/tests/validation/NEON/FullyConnectedLayer.cpp @@ -143,6 +143,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip( template using NEFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture; +template +using NEFullyConnectedLayerMixedDataLayoutFixture = FullyConnectedLayerValidationFixture; TEST_SUITE(Float) #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC @@ -183,6 +185,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture, framework: // Validate output validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(9U, 5U, 7U)), + framework::dataset::make("Weights", TensorShape(315U, 271U))), + framework::dataset::make("Biases", TensorShape(271U))), + framework::dataset::make("Output", TensorShape(271U))), + FullyConnectedParameters), + framework::dataset::make("DataType", DataType::F32)), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)))) +{ + // Validate output + validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine( combine(datasets::FullyConnectedLayerWithActivationDataset(), FullyConnectedParameters), @@ -204,6 +218,8 @@ TEST_SUITE_END() template using NEFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture; +template +using NEFullyConnectedLayerQuantizedMixedDataLayoutFixture = FullyConnectedLayerValidationQuantizedFixture; TEST_SUITE(Quantized) TEST_SUITE(QASYMM8) @@ -217,7 +233,20 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture, // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(9U, 5U, 7U)), + framework::dataset::make("Weights", TensorShape(315U, 271U))), + framework::dataset::make("Biases", TensorShape(271U))), + framework::dataset::make("Output", TensorShape(271U))), + FullyConnectedParameters), + framework::dataset::make("DataType", DataType::QASYMM8)), + QuantizationData), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine( combine(datasets::FullyConnectedLayerWithActivationDataset(), FullyConnectedParameters), @@ -251,7 +280,20 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture, // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } - +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, + combine(combine(combine(combine(combine(combine(combine( + framework::dataset::make("Input", TensorShape(9U, 5U, 7U)), + framework::dataset::make("Weights", TensorShape(315U, 271U))), + framework::dataset::make("Biases", TensorShape(271U))), + framework::dataset::make("Output", TensorShape(271U))), + FullyConnectedParameters), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), + QuantizationData), + framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)))) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8); +} FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine( combine(datasets::FullyConnectedLayerWithActivationDataset(), FullyConnectedParameters), @@ -262,11 +304,10 @@ FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerQuantizedFixture< // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); } -TEST_SUITE_END() -TEST_SUITE_END() - -TEST_SUITE_END() -TEST_SUITE_END() +TEST_SUITE_END() // QASYMM8_SIGNED +TEST_SUITE_END() // Quantized +TEST_SUITE_END() // FullyConnectedLayer +TEST_SUITE_END() // NEON } // namespace validation } // namespace test } // namespace arm_compute diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index 24e552ed0c..9a6af49836 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -130,6 +130,8 @@ using NEPoolingLayerIndicesFixture = PoolingLayerIndicesValidationFixture using NEPoolingLayerFixture = PoolingLayerValidationFixture; +template +using NEPoolingLayerMixedDataLayoutFixture = PoolingLayerValidationFixture; template using NESpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture; @@ -165,6 +167,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixture, framework::Datase // Validate output validate(Accessor(_target), _reference, tolerance_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEPoolingLayerMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), + combine(combine(combine(combine(datasets::PoolingTypes(), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) })), + framework::dataset::make("ExcludePadding", { false })), + framework::dataset::make("DataType", DataType::F32))), + pool_data_layout_dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_f32); +} FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP, framework::dataset::make("DataType", DataType::F32))), @@ -199,6 +212,8 @@ TEST_SUITE(Quantized) template using NEPoolingLayerQuantizedFixture = PoolingLayerValidationQuantizedFixture; +template +using NEPoolingLayerQuantizedMixedDataLayoutFixture = PoolingLayerValidationQuantizedFixture; TEST_SUITE(QASYMM8) FIXTURE_DATA_TEST_CASE(RunSmallNCHW, NEPoolingLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), @@ -221,24 +236,40 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerQuantizedFixture, framew // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); } -TEST_SUITE_END() // QASYMM8 -TEST_SUITE(QASYMM8_SIGNED) -FIXTURE_DATA_TEST_CASE(RunSmallNCHW, NEPoolingLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), - combine(PoolingLayerDatasetQASYMM8Small, - framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))), - framework::dataset::make("DataLayout", { DataLayout::NCHW })), - qasymm8_signed_in_qinfo_dataset), - qasymm8_signed_in_qinfo_dataset)) +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEPoolingLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), + combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })), + framework::dataset::make("ExcludePadding", { true })), + framework::dataset::make("DataType", DataType::QASYMM8))), + framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })), + framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10) })), + framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5) }))) { // Validate output - validate(Accessor(_target), _reference, tolerance_qasymm8_s); + validate(Accessor(_target), _reference, tolerance_qasymm8); } +TEST_SUITE_END() // QASYMM8 +TEST_SUITE(QASYMM8_SIGNED) FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerQuantizedFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQASYMM8Small, framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))), - framework::dataset::make("DataLayout", { DataLayout::NHWC })), + framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })), qasymm8_signed_in_qinfo_dataset), - qasymm8_signed_out_qinfo_dataset)) + qasymm8_signed_in_qinfo_dataset)) +{ + // Validate output + validate(Accessor(_target), _reference, tolerance_qasymm8_s); +} +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEPoolingLayerQuantizedMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(), + combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }), + framework::dataset::make("PoolingSize", { Size2D(2, 2) })), + framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })), + framework::dataset::make("ExcludePadding", { true })), + framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))), + framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })), + framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })), + framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_s); diff --git a/tests/validation/NEON/Scale.cpp b/tests/validation/NEON/Scale.cpp index eab241cb88..64427ae34f 100644 --- a/tests/validation/NEON/Scale.cpp +++ b/tests/validation/NEON/Scale.cpp @@ -316,7 +316,11 @@ DATA_TEST_CASE(CheckNoPaddingInterpAREA, framework::DatasetMode::ALL, combine(co template using NEScaleFixture = ScaleValidationFixture; template +using NEScaleMixedDataLayoutFixture = ScaleValidationFixture; +template using NEScaleQuantizedFixture = ScaleValidationQuantizedFixture; +template +using NEScaleQuantizedMixedDataLayoutFixture = ScaleValidationQuantizedFixture; TEST_SUITE(Float) TEST_SUITE(FP32) @@ -330,6 +334,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEScaleFixture, framework::DatasetMode:: // Validate output validate(Accessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEScaleMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT, ASSEMBLE_DATASET(f32_shape, ScaleSamplingPolicySet)) +{ + //Create valid region + TensorInfo src_info(_shape, 1, _data_type); + ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); + + // Validate output + validate(Accessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32); +} FIXTURE_DATA_TEST_CASE(RunSmallAlignCorners, NEScaleFixture, framework::DatasetMode::ALL, ASSEMBLE_DATASET(f32_shape, ScaleAlignCornersSamplingPolicySet)) { //Create valid region @@ -422,6 +435,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEScaleQuantizedFixture, framework::Da // Validate output validate(Accessor(_target), _reference, valid_region, tolerance_u8); } +FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEScaleQuantizedMixedDataLayoutFixture, framework::DatasetMode::ALL, ASSEMBLE_QUANTIZED_DATASET(qasymm8_shape, ScaleSamplingPolicySet, QuantizationInfoSet)) +{ + //Create valid region + TensorInfo src_info(_shape, 1, _data_type); + ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED)); + + // Validate output + validate(Accessor(_target), _reference, valid_region, tolerance_u8); +} FIXTURE_DATA_TEST_CASE(RunSmallAlignCorners, NEScaleQuantizedFixture, framework::DatasetMode::ALL, ASSEMBLE_QUANTIZED_DATASET(qasymm8_shape, ScaleAlignCornersSamplingPolicySet, QuantizationInfoSet)) { diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h index a4db49fc8e..07790e84d9 100644 --- a/tests/validation/fixtures/ConvolutionLayerFixture.h +++ b/tests/validation/fixtures/ConvolutionLayerFixture.h @@ -69,8 +69,9 @@ public: public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights, - DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info) + DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, bool mixed_layout = false) { + _mixed_layout = mixed_layout; _data_type = data_type; _weights_data_type = weights_data_type; _is_quantized = is_data_type_quantized_asymmetric(data_type); @@ -86,6 +87,21 @@ public: } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(_data_layout); + dst.info()->set_data_layout(_data_layout); + } + void regularize_values(void *values, size_t size) { float *fvalues = static_cast(values); @@ -214,8 +230,15 @@ protected: fill(AccessorType(weights), 1); fill(AccessorType(bias), 2); - // Compute NEConvolutionLayer function - conv.run(); + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute Convolution function + conv.run(); + } return dst; } @@ -264,9 +287,10 @@ protected: QuantizationInfo _weight_quantization_info{}; bool _is_quantized = false; bool _is_bfloat16 = false; + bool _mixed_layout = false; }; -template +template class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture { public: @@ -276,11 +300,11 @@ public: { ConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, data_type, data_type, data_layout, - QuantizationInfo(), QuantizationInfo(), act_info); + QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout); } }; -template +template class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture { public: @@ -289,7 +313,7 @@ public: DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info) { ConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights, - data_type, data_type, data_layout, quantization_info, quantization_info, act_info); + data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout); } }; diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h index d9806b5c84..0aa43d82b4 100644 --- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h @@ -59,8 +59,9 @@ public: void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation, unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type, QuantizationInfo input_quantization_info, QuantizationInfo weights_quantization_info, QuantizationInfo output_quantization_info, - DataLayout data_layout, ActivationLayerInfo act_info) + DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false) { + _mixed_layout = mixed_layout; _input_shape = in_shape; _input_data_type = input_data_type; _weights_data_type = weights_data_type; @@ -130,9 +131,16 @@ public: fill(AccessorType(_src), 0); fill(AccessorType(_weights), 1); fill(AccessorType(_biases), 2); - - // Compute function - _dwc.run(); + + if(_mixed_layout) + { + mix_layout(_dwc, _src, _target); + } + else + { + // Compute function + _dwc.run(); + } } void compute_reference() @@ -150,6 +158,21 @@ public: } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(_data_layout); + dst.info()->set_data_layout(_data_layout); + } + template void fill(U &&tensor, int i) { @@ -214,9 +237,10 @@ protected: ActivationLayerInfo _act_info{}; unsigned int _depth_multiplier{}; Size2D _dilation{}; + bool _mixed_layout{false}; }; -template +template class DepthwiseConvolutionLayerValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture { public: @@ -226,7 +250,7 @@ public: { DepthwiseConvolutionLayerValidationGenericFixture::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type, data_type, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(), - data_layout, act_info); + data_layout, act_info, mixed_layout); } }; @@ -434,8 +458,15 @@ public: fill(AccessorType(_weights), 1); fill(AccessorType(_biases), 2); + // Test Multi DataLayout graph cases, when the data layout changes after configure + _src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + _target.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + // Compute function _dwc.run(); + + // Reinstating original data layout for the test suite to properly check the values + _target.info()->set_data_layout(_data_layout); } void compute_reference() @@ -496,7 +527,7 @@ protected: unsigned int _n0{}; }; -template +template class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConvolutionLayerValidationGenericFixture { public: @@ -506,7 +537,7 @@ public: { DepthwiseConvolutionLayerValidationGenericFixture::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type, data_type, input_quantization_info, input_quantization_info, output_quantization_info, - data_layout, act_info); + data_layout, act_info, mixed_layout); } }; diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h index 8e4de77535..5ed0b9f9a3 100644 --- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h +++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h @@ -53,10 +53,11 @@ public: template void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels, - DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout) + DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false) { _quantization_info = quantization_info; _data_type = data_type; + _mixed_layout = mixed_layout; TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels); const TensorShape bias_shape(num_kernels); @@ -89,6 +90,22 @@ public: } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); + } + template void fill(U &&tensor, int i) { @@ -171,8 +188,15 @@ protected: fill(AccessorType(weights), 1); fill(AccessorType(bias), 2); - // Compute NEConvolutionLayer function - conv.run(); + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute Convolution function + conv.run(); + } return dst; } @@ -197,9 +221,10 @@ protected: SimpleTensor _reference{}; QuantizationInfo _quantization_info{}; DataType _data_type{}; + bool _mixed_layout {false}; }; -template +template class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture { public: @@ -208,11 +233,11 @@ public: DataLayout data_layout) { DirectConvolutionValidationGenericFixture::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(), - act_info, data_layout); + act_info, data_layout, mixed_layout); } }; -template +template class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture { public: @@ -221,7 +246,7 @@ public: ActivationLayerInfo act_info, DataLayout data_layout) { DirectConvolutionValidationGenericFixture::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info, - act_info, data_layout); + act_info, data_layout, mixed_layout); } }; diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h index 86a97272a0..199730d5d0 100644 --- a/tests/validation/fixtures/FFTFixture.h +++ b/tests/validation/fixtures/FFTFixture.h @@ -134,8 +134,9 @@ class FFTConvolutionValidationGenericFixture : public framework::Fixture public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, - DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) + DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false) { + _mixed_layout = mixed_layout; _data_type = data_type; _data_layout = data_layout; @@ -144,6 +145,21 @@ public: } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(_data_layout); + dst.info()->set_data_layout(_data_layout); + } + template void fill(U &&tensor, int i) { @@ -209,10 +225,16 @@ protected: fill(AccessorType(src), 0); fill(AccessorType(weights), 1); fill(AccessorType(bias), 2); - - // Compute convolution function - conv.run(); - + + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute Convolution function + conv.run(); + } return dst; } @@ -239,9 +261,10 @@ protected: SimpleTensor _reference{}; DataType _data_type{}; DataLayout _data_layout{}; + bool _mixed_layout{false}; }; -template +template class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture { public: @@ -250,7 +273,7 @@ public: DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info) { FFTConvolutionValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, - data_type, data_layout, act_info); + data_type, data_layout, act_info, mixed_layout); } }; } // namespace validation diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h index 3760cfb8b7..8f38aae187 100644 --- a/tests/validation/fixtures/FullyConnectedLayerFixture.h +++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h @@ -56,11 +56,12 @@ public: public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights, - DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info) + DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info, bool mixed_layout = false) { ARM_COMPUTE_UNUSED(weights_shape); ARM_COMPUTE_UNUSED(bias_shape); + _mixed_layout = mixed_layout; _data_type = data_type; _bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type; _quantization_info = quantization_info; @@ -71,6 +72,22 @@ public: } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); + } + template void fill(U &&tensor, int i) { @@ -189,8 +206,15 @@ protected: fill(AccessorType(weights), 1); } - // Compute NEFullyConnectedLayer function - fc.run(); + if(_mixed_layout) + { + mix_layout(fc, src, dst); + } + else + { + // Compute NEFullyConnectedLayer function + fc.run(); + } return dst; } @@ -214,11 +238,12 @@ protected: SimpleTensor _reference{}; DataType _data_type{}; DataType _bias_data_type{}; + bool _mixed_layout{false}; QuantizationInfo _quantization_info{}; ActivationLayerInfo _activation_info{}; }; -template +template class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationGenericFixture { public: @@ -228,11 +253,11 @@ public: { FullyConnectedLayerValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, - QuantizationInfo(), activation_info); + QuantizationInfo(), activation_info, mixed_layout); } }; -template +template class FullyConnectedLayerValidationQuantizedFixture : public FullyConnectedLayerValidationGenericFixture { public: @@ -242,7 +267,7 @@ public: { FullyConnectedLayerValidationGenericFixture::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights, reshape_weights, data_type, - quantization_info, activation_info); + quantization_info, activation_info, mixed_layout); } }; } // namespace validation diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h index af078d4ce3..ee81ff5538 100644 --- a/tests/validation/fixtures/PoolingLayerFixture.h +++ b/tests/validation/fixtures/PoolingLayerFixture.h @@ -47,14 +47,31 @@ class PoolingLayerValidationGenericFixture : public framework::Fixture public: template void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, bool indices = false, - QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo()) + QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo(), bool mixed_layout = false) { + _mixed_layout = mixed_layout; _pool_info = pool_info; _target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); _reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices); } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); + } + template void fill(U &&tensor) { @@ -110,9 +127,15 @@ protected: // Fill tensors fill(AccessorType(src)); - // Compute function - pool_layer.run(); - + if(_mixed_layout) + { + mix_layout(pool_layer, src, dst); + } + else + { + // Compute function + pool_layer.run(); + } return dst; } @@ -129,6 +152,7 @@ protected: TensorType _target{}; SimpleTensor _reference{}; PoolingLayerInfo _pool_info{}; + bool _mixed_layout{false}; TensorType _target_indices{}; SimpleTensor _ref_indices{}; }; @@ -144,7 +168,7 @@ public: } }; -template +template class PoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture { public: @@ -152,7 +176,7 @@ public: void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout) { PoolingLayerValidationGenericFixture::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding), - data_type, data_layout); + data_type, data_layout, false, mixed_layout); } }; @@ -168,7 +192,7 @@ public: } }; -template +template class PoolingLayerValidationQuantizedFixture : public PoolingLayerValidationGenericFixture { public: @@ -177,7 +201,7 @@ public: QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo()) { PoolingLayerValidationGenericFixture::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding), - data_type, data_layout, false, input_qinfo, output_qinfo); + data_type, data_layout, false, input_qinfo, output_qinfo, mixed_layout); } }; diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h index dd521470e6..9e0f620abe 100644 --- a/tests/validation/fixtures/ScaleFixture.h +++ b/tests/validation/fixtures/ScaleFixture.h @@ -46,7 +46,7 @@ class ScaleValidationGenericFixture : public framework::Fixture public: template void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy, - bool align_corners) + bool align_corners, bool mixed_layout) { _shape = shape; _policy = policy; @@ -55,6 +55,7 @@ public: _data_type = data_type; _quantization_info = quantization_info; _align_corners = align_corners; + _mixed_layout = mixed_layout; generate_scale(shape); @@ -67,6 +68,22 @@ public: } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); + } + void generate_scale(const TensorShape &shape) { static constexpr float _min_scale{ 0.25f }; @@ -155,9 +172,15 @@ protected: // Fill tensors fill(AccessorType(src)); - // Compute function - scale.run(); - + if(_mixed_layout) + { + mix_layout(scale, src, dst); + } + else + { + // Compute function + scale.run(); + } return dst; } @@ -182,11 +205,12 @@ protected: DataType _data_type{}; QuantizationInfo _quantization_info{}; bool _align_corners{ false }; + bool _mixed_layout{ false }; float _scale_x{ 1.f }; float _scale_y{ 1.f }; }; -template +template class ScaleValidationQuantizedFixture : public ScaleValidationGenericFixture { public: @@ -201,10 +225,11 @@ public: policy, border_mode, sampling_policy, - align_corners); + align_corners, + mixed_layout); } }; -template +template class ScaleValidationFixture : public ScaleValidationGenericFixture { public: @@ -218,7 +243,8 @@ public: policy, border_mode, sampling_policy, - align_corners); + align_corners, + mixed_layout); } }; } // namespace validation diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h index 03ec920c4e..f956963e14 100644 --- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h +++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h @@ -51,130 +51,38 @@ namespace validation { using namespace arm_compute::misc::shape_calculator; -template -class WinogradConvolutionLayerValidationFixture : public framework::Fixture +template +class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture { public: template void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, - DataType data_type, ActivationLayerInfo act_info) + DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout) + { ARM_COMPUTE_UNUSED(dilation); - - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info); - _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info); + _mixed_layout = mixed_layout; + _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout); + _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info); } protected: - template - void fill(U &&tensor, int i, float min, float max) - { - switch(tensor.data_type()) - { - case DataType::F16: - { - arm_compute::utils::uniform_real_distribution_16bit distribution{ float(min), float(max) }; - library->fill(tensor, distribution, i); - break; - } - case DataType::F32: - { - std::uniform_real_distribution distribution(min, max); - library->fill(tensor, distribution, i); - break; - } - default: - { - ARM_COMPUTE_ERROR("Not supported"); - } - } - } - TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info, - DataType data_type, ActivationLayerInfo act_info) + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) { - // Create tensors - TensorType src = create_tensor(input_shape, data_type, 1); - TensorType weights = create_tensor(weights_shape, data_type, 1); - TensorType bias = create_tensor(bias_shape, data_type, 1); - TensorType dst = create_tensor(output_shape, data_type, 1); - - // Create and configure function - FunctionType conv; - ARM_COMPUTE_EXPECT(static_cast(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS); - conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info); - - ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Allocate tensors - src.allocator()->allocate(); - weights.allocator()->allocate(); - dst.allocator()->allocate(); - bias.allocator()->allocate(); - - ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS); - ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS); - - // Fill tensors - fill(AccessorType(src), 0, -1.f, 1.f); - fill(AccessorType(weights), 1, -1.f, 1.f); - fill(AccessorType(bias), 2, -1.f, 1.f); - - // Compute Winograd Convolution function - conv.run(); - - return dst; - } - - SimpleTensor compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info, - DataType data_type, ActivationLayerInfo act_info) - { - // Create reference - SimpleTensor src{ input_shape, data_type, 1 }; - SimpleTensor weights{ weights_shape, data_type, 1 }; - SimpleTensor bias{ bias_shape, data_type, 1 }; - - // Fill reference - fill(src, 0, -1.f, 1.f); - fill(weights, 1, -1.f, 1.f); - if(use_bias) - { - fill(bias, 2, -1.f, 1.f); - } - else - { - fill(bias, 2, 0.f, 0.f); - } + const DataLayout data_layout = src.info()->data_layout(); + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); - SimpleTensor conv_out = reference::convolution_layer(src, weights, bias, output_shape, info); - - return (act_info.enabled()) ? reference::activation_layer(conv_out, act_info) : conv_out; - } - - TensorType _target{}; - SimpleTensor _reference{}; -}; + // Compute Convolution function + layer.run(); -template -class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture -{ -public: - template - void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, - DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout) - - { - ARM_COMPUTE_UNUSED(dilation); - _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout); - _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info); + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout); + dst.info()->set_data_layout(data_layout); } -protected: template void fill(U &&tensor, int i, float min, float max) { @@ -242,9 +150,15 @@ protected: fill(AccessorType(weights), 1, -0.5f, 0.5f); fill(AccessorType(bias), 2, -0.5f, 0.5f); - // Compute Winograd Convolution function - conv.run(); - + if(_mixed_layout) + { + mix_layout(conv, src, dst); + } + else + { + // Compute function + conv.run(); + } return dst; } @@ -321,9 +235,10 @@ protected: TensorType _target{}; SimpleTensor _reference{}; + bool _mixed_layout{false}; }; -template +template class WinogradInputTransformValidationFixture : public framework::Fixture { public: @@ -331,12 +246,30 @@ public: void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type) { TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info); - + _mixed_layout = mixed_layout; _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout_src = src.info()->data_layout(); + const DataLayout data_layout_dst = dst.info()->data_layout(); + + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout_src); + dst.info()->set_data_layout(data_layout_dst); + } + template void fill(U &&tensor, int i, float min, float max) { @@ -388,9 +321,15 @@ protected: // Fill tensors fill(AccessorType(src), 0, -1.f, 1.f); - // Compute Winograd input transform function - transf.run(); - + if(_mixed_layout) + { + mix_layout(transf, src, dst); + } + else + { + // Compute Winograd input transform function + transf.run(); + } return dst; } @@ -405,11 +344,12 @@ protected: return reference::winograd_input_transform(src, output_shape, winograd_info); } + bool _mixed_layout {false}; TensorType _target{}; SimpleTensor _reference{}; }; -template +template class WinogradFilterTransformValidationFixture : public framework::Fixture { public: @@ -419,11 +359,30 @@ public: WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */); TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info); + _mixed_layout = mixed_layout; _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type); _reference = compute_reference(input_shape, output_shape, winograd_info, data_type); } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout_src = src.info()->data_layout(); + const DataLayout data_layout_dst = dst.info()->data_layout(); + + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout_src); + dst.info()->set_data_layout(data_layout_dst); + } + template void fill(U &&tensor, int i, float min, float max) { @@ -476,8 +435,15 @@ protected: // Fill tensors fill(AccessorType(src), 0, -1.f, 1.f); - filter_transform.run(); - + if(_mixed_layout) + { + mix_layout(filter_transform, src, dst); + } + else + { + // Compute Winograd filter transform function + filter_transform.run(); + } return dst; } @@ -492,11 +458,12 @@ protected: return reference::winograd_filter_transform(src, output_shape, winograd_info); } + bool _mixed_layout {false}; TensorType _target{}; SimpleTensor _reference{}; }; -template +template class WinogradOutputTransformValidationFixture : public framework::Fixture { public: @@ -508,6 +475,24 @@ public: } protected: + + void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst) + { + const DataLayout data_layout_src = src.info()->data_layout(); + const DataLayout data_layout_dst = dst.info()->data_layout(); + + // Test Multi DataLayout graph cases, when the data layout changes after configure + src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW); + + // Compute Convolution function + layer.run(); + + // Reinstating original data layout for the test suite to properly check the values + src.info()->set_data_layout(data_layout_src); + dst.info()->set_data_layout(data_layout_dst); + } + template void fill(U &&tensor, int i, float min, float max) { @@ -562,8 +547,15 @@ protected: fill(AccessorType(src), 0, -1.f, 1.f); fill(AccessorType(bias), 1, -1.f, 1.f); - output_transform.run(); - + if(_mixed_layout) + { + mix_layout(output_transform, src, dst); + } + else + { + // Compute Winograd output transform function + output_transform.run(); + } return dst; } @@ -585,10 +577,11 @@ protected: return (act_info.enabled()) ? reference::activation_layer(winograd_output, act_info) : winograd_output; } + bool _mixed_layout {false}; TensorType _target{}; SimpleTensor _reference{}; }; } // namespace validation } // namespace test } // namespace arm_compute -#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */ +#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */ \ No newline at end of file -- cgit v1.2.1