aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-03-23 11:50:34 +0000
committerManuel Bottini <manuel.bottini@arm.com>2021-04-06 11:28:16 +0000
commitca62c6f53eb7244e6fed9f7e932608aa2496d9eb (patch)
treee5c7630c40d9f009e9baef4e849c6c7cc6ca90a7
parent4ed7b39dbbe8ccc6267a9eacefca51717c3b3e10 (diff)
downloadComputeLibrary-ca62c6f53eb7244e6fed9f7e932608aa2496d9eb.tar.gz
Mixed data-layout testing on high priority operators
Change data layouts after the configure in validation tests for: - Scale - Pooling - FullyConnected - DepthwiseConvolution - DirectConvolution - FFTConvolution - WinogradConvolution - GEMMConvolution (Indirect GEMM included) Extending fixtures Fixes for new mixed data layout tests Resolves: COMPMID-4162 Change-Id: I2f2eb2075f7e24ab3872249d88cadb57b82c5dde Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5326 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
-rw-r--r--arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h1
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp11
-rw-r--r--src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h1
-rw-r--r--src/core/cpu/kernels/pooling/neon/fp16.cpp4
-rw-r--r--src/core/cpu/kernels/pooling/neon/fp32.cpp4
-rw-r--r--src/core/cpu/kernels/pooling/neon/list.h4
-rw-r--r--src/core/cpu/kernels/pooling/neon/nchw/all.cpp2
-rw-r--r--src/core/cpu/kernels/scale/sve/qasymm8.cpp7
-rw-r--r--src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp7
-rw-r--r--src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp6
-rw-r--r--src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp24
-rw-r--r--tests/datasets/ScaleValidationDataset.h19
-rw-r--r--tests/validation/CL/ConvolutionLayer.cpp55
-rw-r--r--tests/validation/CL/DepthwiseConvolutionLayer.cpp36
-rw-r--r--tests/validation/CL/DirectConvolutionLayer.cpp94
-rw-r--r--tests/validation/CL/FFT.cpp10
-rw-r--r--tests/validation/CL/FullyConnectedLayer.cpp53
-rw-r--r--tests/validation/CL/PoolingLayer.cpp41
-rw-r--r--tests/validation/CL/Scale.cpp11
-rw-r--r--tests/validation/CL/Winograd.cpp76
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp71
-rw-r--r--tests/validation/NEON/DepthwiseConvolutionLayer.cpp80
-rw-r--r--tests/validation/NEON/DirectConvolutionLayer.cpp10
-rw-r--r--tests/validation/NEON/FFT.cpp11
-rw-r--r--tests/validation/NEON/FullyConnectedLayer.cpp55
-rw-r--r--tests/validation/NEON/PoolingLayer.cpp53
-rw-r--r--tests/validation/NEON/Scale.cpp22
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h38
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h47
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerFixture.h39
-rw-r--r--tests/validation/fixtures/FFTFixture.h37
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h39
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h40
-rw-r--r--tests/validation/fixtures/ScaleFixture.h42
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h241
35 files changed, 1007 insertions, 284 deletions
diff --git a/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h
index befc373646..3367b10a96 100644
--- a/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEWinogradConvolutionLayer.h
@@ -135,6 +135,7 @@ private:
ITensor *_output;
bool _is_prepared;
bool _is_activationlayer_enabled;
+ DataLayout _data_layout;
};
} // namespace arm_compute
#endif /* ARM_COMPUTE_NEWINOGRADCONVOLUTIONLAYER_H */
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
index 09f99748bf..98b76c7db3 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.cpp
@@ -1217,7 +1217,7 @@ void NEDirectConvolutionLayerKernel::convolve_nhwc(const Window &window)
NEDirectConvolutionLayerKernel::NEDirectConvolutionLayerKernel()
: _input(nullptr), _weights(nullptr), _output(nullptr), _conv_info(), _border_size(0), _kernel_size(0), _num_weight_elems_read_per_row(0), _num_elems_read_per_iteration(0),
- _num_elems_written_per_iteration(0)
+ _num_elems_written_per_iteration(0), _data_layout()
{
}
@@ -1234,13 +1234,14 @@ void NEDirectConvolutionLayerKernel::configure(const ITensor *input, const ITens
_weights = weights;
_output = output;
_conv_info = conv_info;
- _kernel_size = weights->info()->dimension(get_data_layout_dimension_index(weights->info()->data_layout(), DataLayoutDimension::WIDTH));
+ _data_layout = _input->info()->data_layout();
+ _kernel_size = weights->info()->dimension(get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH));
const unsigned int conv_pad_left = conv_info.pad_left();
const unsigned int conv_pad_top = conv_info.pad_top();
const unsigned int conv_pad_right = conv_info.pad_right();
const unsigned int conv_pad_bottom = conv_info.pad_bottom();
- if(_input->info()->data_layout() == DataLayout::NCHW)
+ if(_data_layout == DataLayout::NCHW)
{
_border_size = BorderSize(conv_pad_top, conv_pad_right, conv_pad_bottom, conv_pad_left);
}
@@ -1294,9 +1295,9 @@ void NEDirectConvolutionLayerKernel::run(const Window &window, const ThreadInfo
ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
ARM_COMPUTE_ERROR_ON(_input->buffer() == nullptr);
- const int kernel_size = _weights->info()->dimension(get_data_layout_dimension_index(_weights->info()->data_layout(), DataLayoutDimension::WIDTH));
+ const int kernel_size = _weights->info()->dimension(get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH));
- if(_input->info()->data_layout() == DataLayout::NCHW)
+ if(_data_layout == DataLayout::NCHW)
{
switch(kernel_size)
{
diff --git a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
index 258def77a3..259eb683f6 100644
--- a/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
+++ b/src/core/NEON/kernels/NEDirectConvolutionLayerKernel.h
@@ -103,6 +103,7 @@ private:
unsigned int _num_weight_elems_read_per_row;
unsigned int _num_elems_read_per_iteration;
unsigned int _num_elems_written_per_iteration;
+ DataLayout _data_layout;
};
} // namespace arm_compute
#endif /*ARM_COMPUTE_NEDIRECTCONVOLUTIONLAYERKERNEL_H */
diff --git a/src/core/cpu/kernels/pooling/neon/fp16.cpp b/src/core/cpu/kernels/pooling/neon/fp16.cpp
index 314be3704e..1ecceafe86 100644
--- a/src/core/cpu/kernels/pooling/neon/fp16.cpp
+++ b/src/core/cpu/kernels/pooling/neon/fp16.cpp
@@ -93,7 +93,7 @@ void pooling2_f16_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *ds
// Store result
vst1q_f16(reinterpret_cast<float16_t *>(out.ptr()) + x_off, vres);
- const uint32_t offset_base = offset_no_padding<float16_t>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y);
+ const uint32_t offset_base = offset_no_padding<float16_t>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NHWC);
const uint32_t offset_x0 = (uint32_t)offset_base / sizeof(float16_t) + x_off;
const uint32_t offset_x1 = (uint32_t)offset_x0 + in_stride_y / sizeof(float16_t) - pad_right;
const uint32_t offset_x2 = (uint32_t)offset_x0 + in_stride_z / sizeof(float16_t) - pad_right * src->info()->tensor_shape()[1];
@@ -132,7 +132,7 @@ void pooling2_f16_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *ds
// Store result
*(reinterpret_cast<float16_t *>(out.ptr()) + x_off) = res;
- const uint32_t offset_base = offset_no_padding<float16_t>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y);
+ const uint32_t offset_base = offset_no_padding<float16_t>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NHWC);
const uint32_t offset_x0 = (uint32_t)offset_base / sizeof(float16_t) + x_off;
const uint32_t offset_x1 = (uint32_t)offset_x0 + in_stride_y / sizeof(float16_t) - pad_right;
const uint32_t offset_x2 = (uint32_t)offset_x0 + in_stride_z / sizeof(float16_t) - pad_right * src->info()->tensor_shape()[1];
diff --git a/src/core/cpu/kernels/pooling/neon/fp32.cpp b/src/core/cpu/kernels/pooling/neon/fp32.cpp
index e319047d76..a2bd4a6bb3 100644
--- a/src/core/cpu/kernels/pooling/neon/fp32.cpp
+++ b/src/core/cpu/kernels/pooling/neon/fp32.cpp
@@ -95,7 +95,7 @@ void pooling2_f32_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *ds
// Store result
vst1q_f32(reinterpret_cast<float *>(out.ptr()) + x_off, vres);
- const uint32_t offset_base = offset_no_padding<float>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y);
+ const uint32_t offset_base = offset_no_padding<float>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NHWC);
const uint32_t offset_x0 = (uint32_t)offset_base / sizeof(float) + x_off;
const uint32_t offset_x1 = (uint32_t)offset_x0 + in_stride_y / sizeof(float) - pad_right;
const uint32_t offset_x2 = (uint32_t)offset_x0 + in_stride_z / sizeof(float) - pad_right * src->info()->tensor_shape()[1];
@@ -124,7 +124,7 @@ void pooling2_f32_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *ds
// Store result
*(reinterpret_cast<float *>(out.ptr()) + x_off) = res;
- const uint32_t offset_base = offset_no_padding<float>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y);
+ const uint32_t offset_base = offset_no_padding<float>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NHWC);
const uint32_t offset_x0 = (uint32_t)offset_base / sizeof(float) + x_off;
const uint32_t offset_x1 = (uint32_t)offset_x0 + in_stride_y / sizeof(float) - pad_right;
const uint32_t offset_x2 = (uint32_t)offset_x0 + in_stride_z / sizeof(float) - pad_right * src->info()->tensor_shape()[1];
diff --git a/src/core/cpu/kernels/pooling/neon/list.h b/src/core/cpu/kernels/pooling/neon/list.h
index 3435ee6724..bec1536f61 100644
--- a/src/core/cpu/kernels/pooling/neon/list.h
+++ b/src/core/cpu/kernels/pooling/neon/list.h
@@ -59,7 +59,7 @@ DECLARE_POOLING_KERNEL(poolingMxN_fp32_neon_nchw);
#undef DECLARE_POOLING_KERNEL
template <typename T>
-inline uint32_t offset_no_padding(uint32_t padded_offset, const Coordinates &id, const ITensorInfo &info, int pool_stride_x, int pool_stride_y)
+inline uint32_t offset_no_padding(uint32_t padded_offset, const Coordinates &id, const ITensorInfo &info, int pool_stride_x, int pool_stride_y, DataLayout data_layout)
{
const int pad_left = info.padding().left;
const int pad_right = info.padding().right;
@@ -70,7 +70,7 @@ inline uint32_t offset_no_padding(uint32_t padded_offset, const Coordinates &id,
const int pad_horiz = pad_left + pad_right;
const int pad_vert = pad_top + pad_bottom;
- if(info.data_layout() == DataLayout::NCHW)
+ if(data_layout == DataLayout::NCHW)
{
const uint32_t offset_base = padded_offset
- sizeof(T) * pad_horiz * id.y() * pool_stride_y /* subtract padding elems per row */
diff --git a/src/core/cpu/kernels/pooling/neon/nchw/all.cpp b/src/core/cpu/kernels/pooling/neon/nchw/all.cpp
index 47ac7b4f7f..80eac684aa 100644
--- a/src/core/cpu/kernels/pooling/neon/nchw/all.cpp
+++ b/src/core/cpu/kernels/pooling/neon/nchw/all.cpp
@@ -150,7 +150,7 @@ void pooling2_nchw_maxpool_indices(const ITensor *src, ITensor *dst0, ITensor *d
*(reinterpret_cast<T *>(out.ptr())) = static_cast<T>(vget_lane_f32(max_data, 0));
// Calculate max data indice, which will be used in max unpool.
- const uint32_t offset_base = offset_no_padding<T>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y);
+ const uint32_t offset_base = offset_no_padding<T>(in.offset(), id, *src->info(), pool_stride_x, pool_stride_y, DataLayout::NCHW);
const uint32_t offset_top = (uint32_t)(offset_base / sizeof(T));
const uint32_t offset_bottom = offset_top + in_stride_y / sizeof(T) - pad_right - pad_left;
const uint32x2_t voffset_top = { offset_top, offset_top + 1u };
diff --git a/src/core/cpu/kernels/scale/sve/qasymm8.cpp b/src/core/cpu/kernels/scale/sve/qasymm8.cpp
index c475ad615c..c041f14b22 100644
--- a/src/core/cpu/kernels/scale/sve/qasymm8.cpp
+++ b/src/core/cpu/kernels/scale/sve/qasymm8.cpp
@@ -89,10 +89,9 @@ void qasymm8_sve_scale_bilinear(const ITensor *src, ITensor *dst, const ITensor
BorderMode border_mode, PixelValue constant_border_value, float sampling_offset,
bool align_corners, const Window &window)
{
- // Get data layout and width/height indices
- const DataLayout data_layout = src->info()->data_layout();
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ // Data layout is NHWC
+ const int idx_width = 1;
+ const int idx_height = 2;
// Compute the ratio between source height and destination height
const auto hr = scale_utils::calculate_resize_ratio(src->info()->dimension(idx_height), dst->info()->dimension(idx_height), align_corners);
diff --git a/src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp b/src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp
index b39b75abba..9df4301fe3 100644
--- a/src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp
+++ b/src/core/cpu/kernels/scale/sve/qasymm8_signed.cpp
@@ -89,10 +89,9 @@ void qasymm8_signed_sve_scale_bilinear(const ITensor *src, ITensor *dst, const I
BorderMode border_mode, PixelValue constant_border_value, float sampling_offset,
bool align_corners, const Window &window)
{
- // Get data layout and width/height indices
- const DataLayout data_layout = src->info()->data_layout();
- const int idx_width = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const int idx_height = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
+ // Data layout is NHWC
+ const int idx_width = 1;
+ const int idx_height = 2;
// Compute the ratio between source height and destination height
const auto hr = scale_utils::calculate_resize_ratio(src->info()->dimension(idx_height), dst->info()->dimension(idx_height), align_corners);
diff --git a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
index 8d2c81bc15..5ed8aa98c9 100644
--- a/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthwiseConvolutionLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -324,7 +324,7 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerGeneric::prepare()
{
_output_multipliers.map();
_output_shifts.map();
- const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL);
+ const unsigned int idx_ofms = _needs_permute ? 2 : 0;
quantization::compute_quantized_multipliers_and_shifts(_input->info(),
_original_weights->info(),
_output->info(),
@@ -529,7 +529,7 @@ void CLDepthwiseConvolutionLayer::CLDepthwiseConvolutionLayerInternal3x3::prepar
{
_output_multipliers.map();
_output_shifts.map();
- const unsigned int idx_ofms = get_data_layout_dimension_index(_output->info()->data_layout(), DataLayoutDimension::CHANNEL);
+ const unsigned int idx_ofms = _needs_permute ? 2 : 0;
quantization::compute_quantized_multipliers_and_shifts(_input->info(),
_original_weights->info(),
_output->info(),
diff --git a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
index dc3bbbe562..941cb21e5e 100644
--- a/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEWinogradConvolutionLayer.cpp
@@ -303,7 +303,7 @@ arm_gemm::Activation arm_gemm_activation_from_acl_activation(const ActivationLay
NEWinogradConvolutionLayer::NEWinogradConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager)
: _memory_group(memory_manager), _gemm_function(memory_manager), _transform_input_kernel(nullptr), _transform_output_kernel(nullptr), _transform_weights_kernel(nullptr), _activationlayer_function(),
_permute_input(), _permute_weights(), _permute_output(), _input_transformed(), _output_transformed(), _input_workspace(), _output_workspace(), _kernel_storage(), _input_nhwc(), _output_nhwc(),
- _weights_hwio(), _input(), _weights(), _output(), _is_prepared(false), _is_activationlayer_enabled(false)
+ _weights_hwio(), _input(), _weights(), _output(), _is_prepared(false), _is_activationlayer_enabled(false), _data_layout()
{
}
@@ -314,10 +314,10 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), weights->info(), (biases != nullptr) ? biases->info() : nullptr, output->info(), conv_info));
// Get indices for the width and height
- const DataLayout data_layout = input->info()->data_layout();
- const unsigned int width_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
- const unsigned int height_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
- const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ _data_layout = input->info()->data_layout();
+ const unsigned int width_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
+ const unsigned int height_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
+ const unsigned int channel_idx = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
const Size2D input_dims = Size2D(input->info()->dimension(width_idx), input->info()->dimension(height_idx));
const Size2D kernel_size = Size2D(weights->info()->dimension(width_idx), weights->info()->dimension(height_idx));
@@ -537,7 +537,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
const unsigned int max_num_threads = NEScheduler::get().num_threads();
// Configure the kernel to transform the input tensor from NCHW -> NHWC
- if(data_layout == DataLayout::NCHW)
+ if(_data_layout == DataLayout::NCHW)
{
_memory_group.manage(&_input_nhwc);
_permute_input.configure(input, &_input_nhwc, PermutationVector(2U, 0U, 1U));
@@ -554,7 +554,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
TensorInfo input_workspace_info(TensorShape(input_workspace_size), 1, _input->info()->data_type());
_input_workspace.allocator()->init(input_workspace_info);
_input_workspace.allocator()->allocate();
- if(data_layout == DataLayout::NCHW)
+ if(_data_layout == DataLayout::NCHW)
{
_input_nhwc.allocator()->allocate();
}
@@ -570,7 +570,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
// Configure output transform function
// The biases tensor has not been allocated at this point in time, the output transform will add the biases to the final result in the run() method
- if(data_layout == DataLayout::NCHW)
+ if(_data_layout == DataLayout::NCHW)
{
_memory_group.manage(&_output_nhwc);
output_to_use = &_output_nhwc;
@@ -595,7 +595,7 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
_output_transformed.allocator()->allocate();
// Reorder the convoluted output to ACL's ordering NCHW
- if(data_layout == DataLayout::NCHW)
+ if(_data_layout == DataLayout::NCHW)
{
_permute_output.configure(&_output_nhwc, _output, PermutationVector(1U, 2U, 0U));
_output_nhwc.allocator()->allocate();
@@ -615,13 +615,11 @@ void NEWinogradConvolutionLayer::configure(const ITensor *input, const ITensor *
void NEWinogradConvolutionLayer::run()
{
- const DataLayout data_layout = _input->info()->data_layout();
-
prepare();
MemoryGroupResourceScope scope_mg(_memory_group);
- if(data_layout == DataLayout::NCHW)
+ if(_data_layout == DataLayout::NCHW)
{
//Bring channels to the front as Winograd code expects the tensor to be in the format NHWC
_permute_input.run();
@@ -636,7 +634,7 @@ void NEWinogradConvolutionLayer::run()
// Transform output tensor to the spatial domain
NEScheduler::get().schedule(_transform_output_kernel.get(), Window::DimX);
- if(data_layout == DataLayout::NCHW)
+ if(_data_layout == DataLayout::NCHW)
{
// Reorder the convoluted output to ACL's ordering NCHW
_permute_output.run();
diff --git a/tests/datasets/ScaleValidationDataset.h b/tests/datasets/ScaleValidationDataset.h
index 881be0fc26..c0073f93f5 100644
--- a/tests/datasets/ScaleValidationDataset.h
+++ b/tests/datasets/ScaleValidationDataset.h
@@ -145,12 +145,10 @@ framework::dataset::make("AlignCorners", { true }));
* - 3D shapes with 0, 1 vector iterations
* - 4D shapes with 0 vector iterations
*/
-#define SCALE_SHAPE_DATASET(element_per_iteration) \
- concat(concat(concat(concat(concat(ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 0>(), \
- ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 1>()), \
- ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 2>()), \
- ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 0>()), \
- ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 1>()), \
+#define SCALE_SHAPE_DATASET(element_per_iteration) \
+ concat(concat(concat(ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 0>(), \
+ ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 2>()), \
+ ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 1>()), \
ScaleShapesBaseDataSet<3, 3, (element_per_iteration), 0>())
// To prevent long precommit time for OpenCL, shape set for OpenCL is separated into below two parts.
@@ -166,11 +164,10 @@ framework::dataset::make("AlignCorners", { true }));
* - 3D shapes with 0 vector iterations (1 vector iteration is covered by SCALE_PRECOMMIT_SHAPE_DATASET)
* - 4D shapes with 0 vector iterations
*/
-#define SCALE_NIGHTLY_SHAPE_DATASET(element_per_iteration) \
- concat(concat(concat(concat(ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 0>(), \
- ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 1>()), \
- ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 2>()), \
- ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 0>()), \
+#define SCALE_NIGHTLY_SHAPE_DATASET(element_per_iteration) \
+ concat(concat(concat(ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 0>(), \
+ ScaleShapesBaseDataSet<1, 1, (element_per_iteration), 1>()), \
+ ScaleShapesBaseDataSet<3, 1, (element_per_iteration), 0>()), \
ScaleShapesBaseDataSet<3, 3, (element_per_iteration), 0>())
/** Generating dataset for non-quantized data tyeps with the given shapes */
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index b66cfd97e7..31eed7646c 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -186,6 +186,8 @@ TEST_SUITE_END() // ConvolutionLayer
TEST_SUITE(GEMMConvolutionLayer)
template <typename T>
using CLGEMMConvolutionLayerFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
+template <typename T>
+using CLGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
@@ -214,12 +216,30 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerFixture<float>, framework
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
+ framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
+ framework::dataset::make("Bias", TensorShape(2U))),
+ framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
+ framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
+ framework::dataset::make("Dilation", Size2D(1, 1))),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType",DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ ActivationFunctionsSmallDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
template <typename T>
using CLGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T>;
template <typename T>
+using CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, true>;
+template <typename T>
using CLGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<CLTensor, CLAccessor, CLGEMMConvolutionLayer, T, int8_t>;
const auto QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
@@ -267,9 +287,25 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<uint8_t>
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
+ framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
+ framework::dataset::make("Bias", TensorShape(2U))),
+ framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
+ framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
+ framework::dataset::make("Dilation", Size2D(1, 1))),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ QuantizationData),
+ QuantizedActivationFunctionsSmallDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
-
FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL,
combine(combine(combine(combine(combine(datasets::SmallConvolutionLayerDataset(),
framework::dataset::make("ReshapeWeights", { true })),
@@ -281,6 +317,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLGEMMConvolutionLayerQuantizedFixture<int8_t>,
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLGEMMConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
+ framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
+ framework::dataset::make("Bias", TensorShape(2U))),
+ framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
+ framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
+ framework::dataset::make("Dilation", Size2D(1, 1))),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ QuantizationData),
+ QuantizedActivationFunctionsSmallDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE(QSYMM8_PER_CHANNEL)
diff --git a/tests/validation/CL/DepthwiseConvolutionLayer.cpp b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
index b2009c26ad..c88f7c1624 100644
--- a/tests/validation/CL/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/CL/DepthwiseConvolutionLayer.cpp
@@ -154,6 +154,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(zi
template <typename T>
using CLDepthwiseConvolutionLayerFixture = DepthwiseConvolutionLayerValidationFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayer, T>;
+template <typename T>
+using CLDepthwiseConvolutionLayerMixedDataLayoutFixture = DepthwiseConvolutionLayerValidationFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayer, T, true>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
@@ -347,6 +349,16 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerFixture<float>,
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, CLDepthwiseConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DepthMultiplier", { 2 })),
+ framework::dataset::make("DataType",
+ DataType::F32)),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())))
+{
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
large_depth_multipliers),
framework::dataset::make("DataType",
@@ -430,6 +442,8 @@ TEST_SUITE_END() // Float
template <typename T>
using CLDepthwiseConvolutionLayerQuantizedFixture = DepthwiseConvolutionLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayer, T>;
template <typename T>
+using CLDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture = DepthwiseConvolutionLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayer, T, true>;
+template <typename T>
using CLDepthwiseConvolutionLayerQuantizedPerChannelFixture = DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture<CLTensor, CLAccessor, CLDepthwiseConvolutionLayer, T, int8_t>;
TEST_SUITE(Quantized)
@@ -517,6 +531,17 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, CLDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(),
+ framework::dataset::make("DepthMultiplier", { 2 })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.5f, 10), QuantizationInfo(2.2f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())))
+{
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE_NEW(RunLarge, CLDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(),
large_depth_multipliers),
@@ -545,6 +570,17 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, CLDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
+ framework::dataset::make("DepthMultiplier", { 2 })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("SrcQuantizationInfo", { QuantizationInfo(0.3f, 10), QuantizationInfo(2.2f, 10) })),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 4) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())))
+{
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
TEST_SUITE(Dilation)
FIXTURE_DATA_TEST_CASE_NEW(RunSmall, CLDepthwiseConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(),
diff --git a/tests/validation/CL/DirectConvolutionLayer.cpp b/tests/validation/CL/DirectConvolutionLayer.cpp
index c01234020f..946de7f943 100644
--- a/tests/validation/CL/DirectConvolutionLayer.cpp
+++ b/tests/validation/CL/DirectConvolutionLayer.cpp
@@ -207,10 +207,14 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
template <typename T>
using CLDirectConvolutionLayerFixture = DirectConvolutionValidationFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
template <typename T>
+using CLDirectConvolutionLayerMixedDataLayoutFixture = DirectConvolutionValidationFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T, true>;
+template <typename T>
using CLDirectConvolutionValidationWithTensorShapesFixture = DirectConvolutionValidationWithTensorShapesFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
template <typename T>
using CLDirectConvolutionLayerQuantizedFixture = DirectConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
template <typename T>
+using CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture = DirectConvolutionValidationQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T, true>;
+template <typename T>
using CLDirectConvolutionValidationWithTensorShapesQuantizedFixture = DirectConvolutionValidationWithTensorShapesQuantizedFixture<CLTensor, CLAccessor, CLDirectConvolutionLayer, T>;
TEST_SUITE(NHWC)
@@ -271,7 +275,24 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerFixture<float>, framewo
{
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U),
+ TensorShape(9U, 5U, 6U, 4U),
+ TensorShape(3U, 5U, 7U, 2U),
+ TensorShape(32U, 37U, 3U) } ),
+ framework::dataset::make("StrideX", { 1 })),
+ framework::dataset::make("StrideY", { 2 })),
+ framework::dataset::make("PadX", { 1 })),
+ framework::dataset::make("PadY", { 3 })),
+ framework::dataset::make("KernelSize", { 3 })),
+ framework::dataset::make("NumKernels", { 3 })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputShape", { TensorShape(800U, 800U, 3U) } ),
@@ -287,7 +308,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerFixture<float>, framewo
{
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
-
TEST_SUITE_END() // FP32
TEST_SUITE(Quantized)
@@ -311,7 +331,25 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<uint8_
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U),
+ TensorShape(9U, 5U, 6U, 4U),
+ TensorShape(3U, 5U, 7U, 2U),
+ TensorShape(32U, 37U, 3U) } ),
+ framework::dataset::make("StrideX", { 1 })),
+ framework::dataset::make("StrideY", { 2 })),
+ framework::dataset::make("PadX", { 1 })),
+ framework::dataset::make("PadY", { 1 })),
+ framework::dataset::make("KernelSize", { 3 })),
+ framework::dataset::make("NumKernels", { 3 })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", QuantizationInfo(1.1f / 255, 10))),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputShape", { TensorShape(800U, 800U, 3U) } ),
@@ -330,7 +368,6 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture<uint8_
}
TEST_SUITE_END() // QASYMM8
-//
TEST_SUITE(QASYMM8_SIGNED)
FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(
@@ -351,7 +388,25 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<int8_t
{
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(
+ framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U),
+ TensorShape(9U, 5U, 6U, 4U),
+ TensorShape(3U, 5U, 7U, 2U),
+ TensorShape(32U, 37U, 3U) } ),
+ framework::dataset::make("StrideX", { 1 })),
+ framework::dataset::make("StrideY", { 1 })),
+ framework::dataset::make("PadX", { 1 })),
+ framework::dataset::make("PadY", { 1 })),
+ framework::dataset::make("KernelSize", { 3 })),
+ framework::dataset::make("NumKernels", { 3 })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("QuantizationInfo", QuantizationInfo(2.f / 255, 10))),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU) )),
+ framework::dataset::make("DataLayout", DataLayout::NHWC)))
+{
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(zip(zip(zip(zip(zip(zip(
framework::dataset::make("InputShape", { TensorShape(800U, 800U, 3U) } ),
@@ -401,6 +456,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerFixture<float>, framewo
{
validate(CLAccessor(_target), _reference, tolerance_fp32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit, framework::dataset::make("DataType",
+ DataType::F32)),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ validate(CLAccessor(_target), _reference, tolerance_fp32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLDirectConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(data_nightly, framework::dataset::make("DataType", DataType::F32)),
ActivationFunctionsDataset),
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
@@ -428,9 +490,17 @@ const auto QuantizedActivationFunctionsDataset = framework::dataset::make("Activ
});
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data_precommit,
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10)})),
+ QuantizedActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(data_precommit,
- framework::dataset::make("DataType",
- DataType::QASYMM8)),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255, 10), QuantizationInfo(1.1f, 10) })),
QuantizedActivationFunctionsDataset),
framework::dataset::make("DataLayout", { DataLayout::NCHW })))
@@ -494,7 +564,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLDirectConvolutionLayerQuantizedFixture<int8_t
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLDirectConvolutionLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(data_precommit, framework::dataset::make("DataType",
+ DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.1f, -10) })),
+ QuantizedActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE(RunSmall9x9, CLDirectConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(combine(data_precommit_9x9,
framework::dataset::make("DataType",
DataType::QASYMM8_SIGNED)),
diff --git a/tests/validation/CL/FFT.cpp b/tests/validation/CL/FFT.cpp
index fb2f1f53e2..99a83abe5c 100644
--- a/tests/validation/CL/FFT.cpp
+++ b/tests/validation/CL/FFT.cpp
@@ -175,6 +175,8 @@ TEST_SUITE(FFTConvolutionLayer)
template <typename T>
using CLFFTConvolutionLayerFixture = FFTConvolutionValidationFixture<CLTensor, CLAccessor, CLFFTConvolutionLayer, T>;
+template <typename T>
+using CLFFTConvolutionLayerMixedDataLayoutFixture = FFTConvolutionValidationFixture<CLTensor, CLAccessor, CLFFTConvolutionLayer, T, true>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
@@ -186,6 +188,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture<float>, framework:
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLFFTConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ ActivationFunctionsSmallDataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num_f32);
+}
TEST_SUITE_END() // FP32
TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(),
diff --git a/tests/validation/CL/FullyConnectedLayer.cpp b/tests/validation/CL/FullyConnectedLayer.cpp
index 78195a556b..066f0b689e 100644
--- a/tests/validation/CL/FullyConnectedLayer.cpp
+++ b/tests/validation/CL/FullyConnectedLayer.cpp
@@ -138,6 +138,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
template <typename T>
using CLFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T>;
+template <typename T>
+using CLFullyConnectedLayerMixedDataLayoutFixture = FullyConnectedLayerValidationFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, true>;
TEST_SUITE(Float)
TEST_SUITE(FP16)
@@ -167,6 +169,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerFixture<float>, framework:
// Validate output
validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLFullyConnectedLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLFullyConnectedLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeFullyConnectedLayerDataset(), FullyConnectedParameters),
framework::dataset::make("DataType", DataType::F32)),
ActivationFunctionsDataset))
@@ -179,6 +193,8 @@ TEST_SUITE_END()
template <typename T>
using CLFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T>;
+template <typename T>
+using CLFullyConnectedLayerQuantizedMixedDataLayoutFixture = FullyConnectedLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLFullyConnectedLayer, T, true>;
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
@@ -189,6 +205,20 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerQuantizedFixture<uint8_t>,
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLFullyConnectedLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ QuantizationData),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLFullyConnectedLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(datasets::LargeFullyConnectedLayerDataset(), FullyConnectedParameters), framework::dataset::make("DataType", DataType::QASYMM8)), QuantizationData),
ActivationFunctionsQuantizedDataset))
@@ -205,11 +235,24 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLFullyConnectedLayerQuantizedFixture<int8_t>,
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
-TEST_SUITE_END() /* QASYMM8_SIGNED */
-TEST_SUITE_END() /* Quantized */
-
-TEST_SUITE_END()
-TEST_SUITE_END()
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLFullyConnectedLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ QuantizationData),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8_SIGNED
+TEST_SUITE_END() // Quantized
+TEST_SUITE_END() // FullyConnectedLayer
+TEST_SUITE_END() // CL
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
index de5c9f2e8d..f42c187f8f 100644
--- a/tests/validation/CL/PoolingLayer.cpp
+++ b/tests/validation/CL/PoolingLayer.cpp
@@ -131,6 +131,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
template <typename T>
using CLPoolingLayerFixture = PoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+template <typename T>
+using CLPoolingLayerMixedDataLayoutFixture = PoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T, true>;
template <typename T>
using CLSpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
@@ -156,6 +158,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::Datase
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ combine(combine(combine(combine(datasets::PoolingTypes(),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) })),
+ framework::dataset::make("ExcludePadding", { false })),
+ framework::dataset::make("DataType", DataType::F32))),
+ pool_data_layout_dataset))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
framework::dataset::make("DataType",
DataType::F32))),
@@ -210,6 +223,8 @@ TEST_SUITE(Quantized)
template <typename T>
using CLPoolingLayerQuantizedFixture = PoolingLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+template <typename T>
+using CLPoolingLayerQuantizedMixedDataLayoutFixture = PoolingLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLPoolingLayer, T, true>;
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
@@ -222,6 +237,19 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framew
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+ combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
+ framework::dataset::make("ExcludePadding", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8))),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })),
+ framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10) })),
+ framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
@@ -235,6 +263,19 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<int8_t>, framewo
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8_s);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+ combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
+ framework::dataset::make("ExcludePadding", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })),
+ framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })),
+ framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_qasymm8_s);
+}
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE_END() // Quantized
TEST_SUITE_END() // PoolingLayer
diff --git a/tests/validation/CL/Scale.cpp b/tests/validation/CL/Scale.cpp
index 523b49deb7..2b34f1f353 100644
--- a/tests/validation/CL/Scale.cpp
+++ b/tests/validation/CL/Scale.cpp
@@ -210,6 +210,8 @@ TEST_SUITE_END() // Validate
template <typename T>
using CLScaleFixture = ScaleValidationFixture<CLTensor, CLAccessor, CLScale, T>;
+template <typename T>
+using CLScaleMixedDataLayoutFixture = ScaleValidationFixture<CLTensor, CLAccessor, CLScale, T, true>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
@@ -223,6 +225,15 @@ FIXTURE_DATA_TEST_CASE(Run, CLScaleFixture<float>, framework::DatasetMode::ALL,
// Validate output
validate(CLAccessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32, tolerance_f32_absolute);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLScaleMixedDataLayoutFixture<float>, framework::DatasetMode::ALL, ASSEMBLE_DATASET(f32_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ const ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED));
+
+ // Validate output
+ validate(CLAccessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32, tolerance_f32_absolute);
+}
FIXTURE_DATA_TEST_CASE(RunAlignCorners, CLScaleFixture<float>, framework::DatasetMode::ALL, ASSEMBLE_DATASET(f32_shape, ScaleAlignCornersSamplingPolicySet))
{
//Create valid region
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 7a987ba65f..7ccc850be5 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -228,6 +228,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
}
using CLWinogradInputTransformFixtureFP32 = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, float>;
+using CLWinogradInputTransformMixedDataLayoutFixtureFP32 = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, float, true>;
using CLWinogradInputTransformFixtureFP16 = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, half>;
TEST_SUITE(NCHW)
@@ -238,7 +239,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixtureFP32, framework:
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradInputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT, combine(combine(
+ datasets::SmallWinogradInputTransformDataset2x2_3x3(),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataType", { DataType::F32 })))
+{
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNCHW,
framework::dataset::make("DataLayout", { DataLayout::NCHW })),
framework::dataset::make("DataType", { DataType::F32 })))
@@ -287,7 +294,13 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixtureFP32, framework:
{
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradInputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT, combine(combine(
+ datasets::SmallWinogradInputTransformDataset4x4_3x3(),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+ framework::dataset::make("DataType", { DataType::F32 })))
+{
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixtureFP32, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNHWC_FP32,
framework::dataset::make("DataLayout", { DataLayout::NHWC })),
framework::dataset::make("DataType", { DataType::F32 })))
@@ -335,6 +348,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
using CLWinogradFilterTransform = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradFilterTransformKernel, 0>;
using CLWinogradFilterTransformFixtureFP32 = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float>;
+using CLWinogradFilterTransformMixedDataLayoutFixtureFP32 = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float, true>;
using CLWinogradFilterTransformFixtureFP16 = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, half>;
TEST_SUITE(NCHW)
@@ -347,7 +361,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixtureFP32, framework
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradFilterTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::Small3x3Shapes(),
+ framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+ framework::dataset::make("DataType", { DataType::F32 })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixtureFP32, framework::DatasetMode::NIGHTLY,
combine(combine(LargeWinogradFilterTransformDatasetNCHW,
framework::dataset::make("DataLayout", { DataLayout::NCHW })),
@@ -407,7 +429,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixtureFP32, framework
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradFilterTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(datasets::Small3x3Shapes(),
+ framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+ framework::dataset::make("DataType", { DataType::F32 })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixtureFP32, framework::DatasetMode::NIGHTLY,
combine(combine(LargeWinogradFilterTransformDatasetNHWC_F32,
framework::dataset::make("DataLayout", { DataLayout::NHWC })),
@@ -474,6 +504,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
using CLWinogradOutputTransform = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradOutputTransformKernel, 0>;
using CLWinogradOutputTransformFixtureFP32 = WinogradOutputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradOutputTransform, float>;
+using CLWinogradOutputTransformMixedDataLayoutFixtureFP32 = WinogradOutputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradOutputTransform, float, true>;
using CLWinogradOutputTransformFixtureFP16 = WinogradOutputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradOutputTransform, half>;
TEST_SUITE(NCHW)
@@ -505,7 +536,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixtureFP32, framework
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradOutputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::ALL,
+ combine(combine(combine(framework::dataset::make("Input", TensorShape(13U, 6U, 16U)),
+ framework::dataset::make("WinogradInfo", WinogradInfo(Size2D(2U, 2U),Size2D(3U, 3U), Size2D(7U, 6U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixtureFP32, framework::DatasetMode::NIGHTLY,
combine(combine(LargeWinogradOutputTransformDatasetNCHW,
framework::dataset::make("DataType", { DataType::F32 })),
@@ -546,7 +585,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixtureFP32, framework
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradOutputTransformMixedDataLayoutFixtureFP32, framework::DatasetMode::ALL,
+ combine(combine(combine(framework::dataset::make("Input", TensorShape(1U, 4U, 64U)),
+ framework::dataset::make("WinogradInfo", WinogradInfo(Size2D(2U, 2U), Size2D(7U, 7U), Size2D(9U, 9U), PadStrideInfo(1, 1, 0, 0), DataLayout::NHWC))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ framework::dataset::make("ActivationInfo",{ ActivationLayerInfo() }) ))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixtureFP32, framework::DatasetMode::NIGHTLY,
combine(combine(LargeWinogradOutputTransformDatasetNHWC_F32,
framework::dataset::make("DataType", { DataType::F32 })),
@@ -604,6 +651,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
TEST_SUITE(FP32)
using CLWinogradConvolutionLayerFastMathFixture = WinogradConvolutionLayerFastMathValidationFixture<CLTensor, CLAccessor, CLWinogradConvolutionLayer, float>;
+using CLWinogradConvolutionLayerFastMathMixedDataLayoutFixture = WinogradConvolutionLayerFastMathValidationFixture<CLTensor, CLAccessor, CLWinogradConvolutionLayer, float, float, true, true>;
TEST_SUITE(Conv3x3)
FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
@@ -614,7 +662,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, fram
// Validate output
validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLWinogradConvolutionLayerFastMathMixedDataLayoutFixture, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(8U, 8U, 32U)),
+ framework::dataset::make("Weight", TensorShape(1U, 3U, 32U, 1U))),
+ framework::dataset::make("Bias", TensorShape(1U))),
+ framework::dataset::make("Output", TensorShape(8U, 6U, 1U))),
+ framework::dataset::make("PadStrideInfo", PadStrideInfo(1, 1, 0, 0))),
+ framework::dataset::make("Dilation", Size2D(1U, 1U))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsSmallDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::NIGHTLY,
combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
framework::dataset::make("DataType", { DataType::F32 })),
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 6b152c9b68..b435744cdc 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -150,6 +150,8 @@ TEST_SUITE_END() // ConvolutionLayer
TEST_SUITE(WinogradLayer)
template <typename T>
using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
+template <typename T>
+using NEWinogradConvolutionLayerMixedDataLayoutFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, T, true, true>;
template <typename T>
using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, T, false>;
@@ -166,6 +168,21 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, frame
// Validate output
validate(Accessor(_target), _reference, abs_tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEWinogradConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(8U, 8U, 32U)),
+ framework::dataset::make("Weight", TensorShape(1U, 3U, 32U, 1U))),
+ framework::dataset::make("Bias", TensorShape(1U))),
+ framework::dataset::make("Output", TensorShape(8U, 6U, 1U))),
+ framework::dataset::make("PadStrideInfo", PadStrideInfo(1, 1, 0, 0))),
+ framework::dataset::make("Dilation", Size2D(1U, 1U))),
+ framework::dataset::make("DataType", { DataType::F32 })),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, abs_tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(),
framework::dataset::make("DataType", { DataType::F32 })),
@@ -384,6 +401,8 @@ TEST_SUITE_END() // WinogradLayer
TEST_SUITE(GEMMConvolutionLayer)
template <typename T>
using NEGEMMConvolutionLayerFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T>;
+template <typename T>
+using NEGEMMConvolutionLayerMixedDataLayoutFixture = ConvolutionValidationFixture<Tensor, Accessor, NEConvolutionLayer, T, true>;
TEST_SUITE(Float)
#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
@@ -424,11 +443,29 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerFixture<float>, framework
// Validate output
validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
+ framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
+ framework::dataset::make("Bias", TensorShape(2U))),
+ framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
+ framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
+ framework::dataset::make("Dilation", Size2D(1, 1))),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ ActivationFunctionsDataset))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, rel_tolerance_f32, 0.f, float(abs_tolerance_f32));
+}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
template <typename T>
using NEGEMMConvolutionLayerQuantizedFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T>;
+template <typename T>
+using NEGEMMConvolutionLayerQuantizedMixedDataLayoutFixture = ConvolutionValidationQuantizedFixture<Tensor, Accessor, NEConvolutionLayer, T, true>;
template <typename T>
using NEGEMMConvolutionLayerQuantizedPerChannelFixture = ConvolutionValidationQuantizedPerChannelFixture<Tensor, Accessor, NEConvolutionLayer, T, int8_t>;
@@ -451,6 +488,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
+ framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
+ framework::dataset::make("Bias", TensorShape(2U))),
+ framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
+ framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
+ framework::dataset::make("Dilation", Size2D(1, 1))),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ QuantizedActivationFunctionsDataset))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)
@@ -464,6 +518,23 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEGEMMConvolutionLayerQuantizedFixture<int8_t>,
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEGEMMConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL,
+ combine(combine(combine(combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(23U, 27U, 5U)),
+ framework::dataset::make("Weights", TensorShape(3U, 3U, 5U, 2U))),
+ framework::dataset::make("Bias", TensorShape(2U))),
+ framework::dataset::make("Output", TensorShape(11U, 25U, 2U))),
+ framework::dataset::make("PadStrideInfo", PadStrideInfo(2, 1, 0, 0))),
+ framework::dataset::make("Dilation", Size2D(1, 1))),
+ framework::dataset::make("ReshapeWeights", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+ QuantizedActivationFunctionsDataset))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE(QSYMM8_PER_CHANNEL)
diff --git a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
index 6bb40be036..a9c4edf5dd 100644
--- a/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
+++ b/tests/validation/NEON/DepthwiseConvolutionLayer.cpp
@@ -242,19 +242,28 @@ DATA_TEST_CASE(ValidateGeneric, framework::DatasetMode::ALL, zip(zip(zip(zip(zip
// *INDENT-ON*
template <typename T>
using NEDepthwiseConvolutionLayerFixture = DepthwiseConvolutionLayerValidationFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer, T>;
+template <typename T>
+using NEDepthwiseConvolutionLayerMixedDataLayoutFixture = DepthwiseConvolutionLayerValidationFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer, T, true>;
TEST_SUITE(Float)
TEST_SUITE(F32)
TEST_SUITE(Generic)
FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
depth_multipliers),
- framework::dataset::make("DataType",
- DataType::F32)),
+ framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
ActivationFunctionsDataset))
{
validate(Accessor(_target), _reference, tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, NEDepthwiseConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
+ framework::dataset::make("DepthMultiplier", { 2 })),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())))
+{
+ validate(Accessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset(),
large_depth_multipliers),
framework::dataset::make("DataType",
@@ -345,6 +354,15 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerFixture<float
{
validate(Accessor(_target), _reference, tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout3x3, NEDepthwiseConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DepthMultiplier", 1)),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())))
+{
+ validate(Accessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset5x5(),
framework::dataset::make("DepthMultiplier", 1)),
@@ -501,9 +519,9 @@ TEST_SUITE_END() // FP16
TEST_SUITE_END() // Float
template <typename T>
-using NEDepthwiseConvolutionLayerQuantizedFixtureOptimized = DepthwiseConvolutionLayerValidationQuantizedFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer, T>;
-template <typename T>
using NEDepthwiseConvolutionLayerQuantizedFixture = DepthwiseConvolutionLayerValidationQuantizedFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer, T>;
+template <typename T>
+using NEDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture = DepthwiseConvolutionLayerValidationQuantizedFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer, T, true>;
using NEDepthwiseConvolutionLayerQuantizedSymmetricPerChannelFixture = DepthwiseConvolutionLayerValidationQuantizedPerChannelFixture<Tensor, Accessor, NEDepthwiseConvolutionLayer, uint8_t, int8_t>;
TEST_SUITE(Quantized)
@@ -520,7 +538,17 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-
+FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout, NEDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset(),
+ framework::dataset::make("DepthMultiplier", { 2 })),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ input_qinfo_dataset),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 4) })),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())))
+{
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
TEST_SUITE(Dilation)
FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset(),
@@ -547,7 +575,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture
TEST_SUITE_END() // Dilation
TEST_SUITE_END() // Generic
TEST_SUITE(W3x3)
-FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<uint8_t>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(), depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8)),
input_qinfo_dataset),
@@ -557,7 +585,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<uint8_t>, framework::DatasetMode::NIGHTLY,
+FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
large_depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8)),
@@ -571,7 +599,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture
TEST_SUITE(Dilation)
-FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<uint8_t>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(), depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8)),
input_qinfo_dataset),
@@ -581,7 +609,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<uint8_t>, framework::DatasetMode::NIGHTLY,
+FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(),
large_depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8)),
@@ -596,11 +624,10 @@ TEST_SUITE_END() // Dilation
TEST_SUITE_END() // W3x3
TEST_SUITE(Optimized)
-FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<uint8_t>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(),
framework::dataset::make("DepthMultiplier", 1)),
- framework::dataset::make("DataType",
- DataType::QASYMM8)),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
input_qinfo_dataset),
framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
framework::dataset::make("DataLayout", { DataLayout::NHWC })),
@@ -608,7 +635,18 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixt
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<uint8_t>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE_NEW(RunMixedDataLayout3x3, NEDepthwiseConvolutionLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(),
+ framework::dataset::make("DepthMultiplier", 1)),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ input_qinfo_dataset),
+ framework::dataset::make("DstQuantizationInfo", { QuantizationInfo(0.5f, 10) })),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo())))
+{
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset5x5(),
framework::dataset::make("DepthMultiplier", 1)),
framework::dataset::make("DataType",
@@ -620,7 +658,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixt
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE_NEW(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<uint8_t>, framework::DatasetMode::NIGHTLY,
+FIXTURE_DATA_TEST_CASE_NEW(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(combine(combine(datasets::LargeOptimizedDepthwiseConvolutionLayerDataset3x3(),
framework::dataset::make("DepthMultiplier", 1)),
framework::dataset::make("DataType",
@@ -676,7 +714,7 @@ TEST_SUITE_END() // Dilation
TEST_SUITE_END() // Generic
TEST_SUITE(W3x3)
-FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<int8_t>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseConvolutionLayerDataset3x3(), depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
input_qinfo_dataset),
@@ -686,7 +724,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<int8_t>, framework::DatasetMode::NIGHTLY,
+FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseConvolutionLayerDataset3x3(),
large_depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
@@ -699,7 +737,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture
}
TEST_SUITE(Dilation)
-FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<int8_t>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallDepthwiseDilatedConvolutionLayerDataset3x3(), depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
input_qinfo_dataset),
@@ -709,7 +747,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall, NEDepthwiseConvolutionLayerQuantizedFixture
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<int8_t>, framework::DatasetMode::NIGHTLY,
+FIXTURE_DATA_TEST_CASE_NEW(RunLarge, NEDepthwiseConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(combine(combine(datasets::LargeDepthwiseDilatedConvolutionLayerDataset3x3(),
large_depth_multipliers),
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
@@ -724,7 +762,7 @@ TEST_SUITE_END() // Dilation
TEST_SUITE_END() // W3x3
TEST_SUITE(Optimized)
-FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<int8_t>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset3x3(),
framework::dataset::make("DepthMultiplier", 1)),
framework::dataset::make("DataType",
@@ -736,7 +774,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall3x3, NEDepthwiseConvolutionLayerQuantizedFixt
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<int8_t>, framework::DatasetMode::PRECOMMIT,
+FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
combine(combine(combine(combine(combine(combine(datasets::SmallOptimizedDepthwiseConvolutionLayerDataset5x5(),
framework::dataset::make("DepthMultiplier", 1)),
framework::dataset::make("DataType",
@@ -748,7 +786,7 @@ FIXTURE_DATA_TEST_CASE_NEW(RunSmall5x5, NEDepthwiseConvolutionLayerQuantizedFixt
{
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-FIXTURE_DATA_TEST_CASE_NEW(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixtureOptimized<int8_t>, framework::DatasetMode::NIGHTLY,
+FIXTURE_DATA_TEST_CASE_NEW(RunLarge3x3, NEDepthwiseConvolutionLayerQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY,
combine(combine(combine(combine(combine(combine(datasets::LargeOptimizedDepthwiseConvolutionLayerDataset3x3(),
framework::dataset::make("DepthMultiplier", 1)),
framework::dataset::make("DataType",
diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp
index ffffe7e3d5..c3d6e94bfc 100644
--- a/tests/validation/NEON/DirectConvolutionLayer.cpp
+++ b/tests/validation/NEON/DirectConvolutionLayer.cpp
@@ -278,6 +278,8 @@ DATA_TEST_CASE(NoPaddingNHWCKernel, framework::DatasetMode::ALL, combine(combine
template <typename T>
using NEDirectConvolutionLayerFixture = DirectConvolutionValidationFixture<Tensor, Accessor, NEDirectConvolutionLayer, T>;
+template <typename T>
+using NEDirectConvolutionLayerMixedDataLayoutFixture = DirectConvolutionValidationFixture<Tensor, Accessor, NEDirectConvolutionLayer, T, true>;
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -309,6 +311,14 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEDirectConvolutionLayerFixture<float>, framewo
// Validate output
validate(Accessor(_target), _reference, tolerance_fp32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEDirectConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit,
+ framework::dataset::make("DataType", DataType::F32)),
+ ActivationFunctionsDataset),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_fp32);
+}
FIXTURE_DATA_TEST_CASE(RunSmall9x9, NEDirectConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(data_precommit9x9, framework::dataset::make("DataType",
DataType::F32)),
ActivationFunctionsDataset),
diff --git a/tests/validation/NEON/FFT.cpp b/tests/validation/NEON/FFT.cpp
index 7125158a21..f7ef0a314e 100644
--- a/tests/validation/NEON/FFT.cpp
+++ b/tests/validation/NEON/FFT.cpp
@@ -158,6 +158,8 @@ TEST_SUITE(FFTConvolutionLayer)
template <typename T>
using NEFFTConvolutionLayerFixture = FFTConvolutionValidationFixture<Tensor, Accessor, NEFFTConvolutionLayer, T>;
+template <typename T>
+using NEFFTConvolutionLayerMixedDataLayoutFixture = FFTConvolutionValidationFixture<Tensor, Accessor, NEFFTConvolutionLayer, T, true>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
@@ -169,10 +171,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFFTConvolutionLayerFixture<float>, framework:
// Validate output
validate(Accessor(_target), _reference, tolerance_f32, tolerance_num);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFFTConvolutionLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+ ActivationFunctionsSmallDataset))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f32, tolerance_num);
+}
TEST_SUITE_END() // FP32
TEST_SUITE_END() // Float
TEST_SUITE_END() // FFTConvolutionLayer
-
TEST_SUITE_END() // Neon
} // namespace validation
} // namespace test
diff --git a/tests/validation/NEON/FullyConnectedLayer.cpp b/tests/validation/NEON/FullyConnectedLayer.cpp
index d8c2203802..4bb48bf42c 100644
--- a/tests/validation/NEON/FullyConnectedLayer.cpp
+++ b/tests/validation/NEON/FullyConnectedLayer.cpp
@@ -143,6 +143,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
template <typename T>
using NEFullyConnectedLayerFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
+template <typename T>
+using NEFullyConnectedLayerMixedDataLayoutFixture = FullyConnectedLayerValidationFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
TEST_SUITE(Float)
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
@@ -183,6 +185,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerFixture<float>, framework:
// Validate output
validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, rel_tolerance_f32, 0, abs_tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(
combine(datasets::FullyConnectedLayerWithActivationDataset(),
FullyConnectedParameters),
@@ -204,6 +218,8 @@ TEST_SUITE_END()
template <typename T>
using NEFullyConnectedLayerQuantizedFixture = FullyConnectedLayerValidationQuantizedFixture<Tensor, Accessor, NEFullyConnectedLayer, T>;
+template <typename T>
+using NEFullyConnectedLayerQuantizedMixedDataLayoutFixture = FullyConnectedLayerValidationQuantizedFixture<Tensor, Accessor, NEFullyConnectedLayer, T, true>;
TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)
@@ -217,7 +233,20 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<uint8_t>,
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8)),
+ QuantizationData),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(
combine(datasets::FullyConnectedLayerWithActivationDataset(),
FullyConnectedParameters),
@@ -251,7 +280,20 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEFullyConnectedLayerQuantizedFixture<int8_t>,
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
}
-
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEFullyConnectedLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+ combine(combine(combine(combine(combine(combine(combine(
+ framework::dataset::make("Input", TensorShape(9U, 5U, 7U)),
+ framework::dataset::make("Weights", TensorShape(315U, 271U))),
+ framework::dataset::make("Biases", TensorShape(271U))),
+ framework::dataset::make("Output", TensorShape(271U))),
+ FullyConnectedParameters),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
+ QuantizationData),
+ framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(
combine(datasets::FullyConnectedLayerWithActivationDataset(),
FullyConnectedParameters),
@@ -262,11 +304,10 @@ FIXTURE_DATA_TEST_CASE(RunWithActivation, NEFullyConnectedLayerQuantizedFixture<
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
}
-TEST_SUITE_END()
-TEST_SUITE_END()
-
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // QASYMM8_SIGNED
+TEST_SUITE_END() // Quantized
+TEST_SUITE_END() // FullyConnectedLayer
+TEST_SUITE_END() // NEON
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
index 24e552ed0c..9a6af49836 100644
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ b/tests/validation/NEON/PoolingLayer.cpp
@@ -130,6 +130,8 @@ using NEPoolingLayerIndicesFixture = PoolingLayerIndicesValidationFixture<Tensor
template <typename T>
using NEPoolingLayerFixture = PoolingLayerValidationFixture<Tensor, Accessor, NEPoolingLayer, T>;
+template <typename T>
+using NEPoolingLayerMixedDataLayoutFixture = PoolingLayerValidationFixture<Tensor, Accessor, NEPoolingLayer, T, true>;
template <typename T>
using NESpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture<Tensor, Accessor, NEPoolingLayer, T>;
@@ -165,6 +167,17 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixture<float>, framework::Datase
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEPoolingLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
+ combine(combine(combine(combine(datasets::PoolingTypes(),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) })),
+ framework::dataset::make("ExcludePadding", { false })),
+ framework::dataset::make("DataType", DataType::F32))),
+ pool_data_layout_dataset))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_f32);
+}
FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
framework::dataset::make("DataType",
DataType::F32))),
@@ -199,6 +212,8 @@ TEST_SUITE(Quantized)
template <typename T>
using NEPoolingLayerQuantizedFixture = PoolingLayerValidationQuantizedFixture<Tensor, Accessor, NEPoolingLayer, T>;
+template <typename T>
+using NEPoolingLayerQuantizedMixedDataLayoutFixture = PoolingLayerValidationQuantizedFixture<Tensor, Accessor, NEPoolingLayer, T, true>;
TEST_SUITE(QASYMM8)
FIXTURE_DATA_TEST_CASE(RunSmallNCHW, NEPoolingLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
@@ -221,24 +236,40 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerQuantizedFixture<uint8_t>, framew
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
}
-TEST_SUITE_END() // QASYMM8
-TEST_SUITE(QASYMM8_SIGNED)
-FIXTURE_DATA_TEST_CASE(RunSmallNCHW, NEPoolingLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
- combine(PoolingLayerDatasetQASYMM8Small,
- framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
- framework::dataset::make("DataLayout", { DataLayout::NCHW })),
- qasymm8_signed_in_qinfo_dataset),
- qasymm8_signed_in_qinfo_dataset))
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEPoolingLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+ combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
+ framework::dataset::make("ExcludePadding", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8))),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })),
+ framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10) })),
+ framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5) })))
{
// Validate output
- validate(Accessor(_target), _reference, tolerance_qasymm8_s);
+ validate(Accessor(_target), _reference, tolerance_qasymm8);
}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE(QASYMM8_SIGNED)
FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
combine(PoolingLayerDatasetQASYMM8Small,
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
- framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+ framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
qasymm8_signed_in_qinfo_dataset),
- qasymm8_signed_out_qinfo_dataset))
+ qasymm8_signed_in_qinfo_dataset))
+{
+ // Validate output
+ validate(Accessor(_target), _reference, tolerance_qasymm8_s);
+}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEPoolingLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+ combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
+ framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
+ framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
+ framework::dataset::make("ExcludePadding", { true })),
+ framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
+ framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })),
+ framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })),
+ framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_s);
diff --git a/tests/validation/NEON/Scale.cpp b/tests/validation/NEON/Scale.cpp
index eab241cb88..64427ae34f 100644
--- a/tests/validation/NEON/Scale.cpp
+++ b/tests/validation/NEON/Scale.cpp
@@ -316,7 +316,11 @@ DATA_TEST_CASE(CheckNoPaddingInterpAREA, framework::DatasetMode::ALL, combine(co
template <typename T>
using NEScaleFixture = ScaleValidationFixture<Tensor, Accessor, NEScale, T>;
template <typename T>
+using NEScaleMixedDataLayoutFixture = ScaleValidationFixture<Tensor, Accessor, NEScale, T, true>;
+template <typename T>
using NEScaleQuantizedFixture = ScaleValidationQuantizedFixture<Tensor, Accessor, NEScale, T>;
+template <typename T>
+using NEScaleQuantizedMixedDataLayoutFixture = ScaleValidationQuantizedFixture<Tensor, Accessor, NEScale, T, true>;
TEST_SUITE(Float)
TEST_SUITE(FP32)
@@ -330,6 +334,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEScaleFixture<float>, framework::DatasetMode::
// Validate output
validate(Accessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEScaleMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, ASSEMBLE_DATASET(f32_shape, ScaleSamplingPolicySet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED));
+
+ // Validate output
+ validate(Accessor(_target), _reference, valid_region, tolerance_f32, tolerance_num_f32);
+}
FIXTURE_DATA_TEST_CASE(RunSmallAlignCorners, NEScaleFixture<float>, framework::DatasetMode::ALL, ASSEMBLE_DATASET(f32_shape, ScaleAlignCornersSamplingPolicySet))
{
//Create valid region
@@ -422,6 +435,15 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NEScaleQuantizedFixture<uint8_t>, framework::Da
// Validate output
validate(Accessor(_target), _reference, valid_region, tolerance_u8);
}
+FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, NEScaleQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::ALL, ASSEMBLE_QUANTIZED_DATASET(qasymm8_shape, ScaleSamplingPolicySet, QuantizationInfoSet))
+{
+ //Create valid region
+ TensorInfo src_info(_shape, 1, _data_type);
+ ValidRegion valid_region = calculate_valid_region_scale(src_info, _reference.shape(), _policy, _sampling_policy, (_border_mode == BorderMode::UNDEFINED));
+
+ // Validate output
+ validate(Accessor(_target), _reference, valid_region, tolerance_u8);
+}
FIXTURE_DATA_TEST_CASE(RunSmallAlignCorners, NEScaleQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, ASSEMBLE_QUANTIZED_DATASET(qasymm8_shape, ScaleAlignCornersSamplingPolicySet,
QuantizationInfoSet))
{
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index a4db49fc8e..07790e84d9 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -69,8 +69,9 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
- DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info)
+ DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, bool mixed_layout = false)
{
+ _mixed_layout = mixed_layout;
_data_type = data_type;
_weights_data_type = weights_data_type;
_is_quantized = is_data_type_quantized_asymmetric(data_type);
@@ -86,6 +87,21 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
void regularize_values(void *values, size_t size)
{
float *fvalues = static_cast<float *>(values);
@@ -214,8 +230,15 @@ protected:
fill(AccessorType(weights), 1);
fill(AccessorType(bias), 2);
- // Compute NEConvolutionLayer function
- conv.run();
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
@@ -264,9 +287,10 @@ protected:
QuantizationInfo _weight_quantization_info{};
bool _is_quantized = false;
bool _is_bfloat16 = false;
+ bool _mixed_layout = false;
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
@@ -276,11 +300,11 @@ public:
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
data_type, data_type, data_layout,
- QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
@@ -289,7 +313,7 @@ public:
DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
- data_type, data_type, data_layout, quantization_info, quantization_info, act_info);
+ data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout);
}
};
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index d9806b5c84..0aa43d82b4 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -59,8 +59,9 @@ public:
void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation,
unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type,
QuantizationInfo input_quantization_info, QuantizationInfo weights_quantization_info, QuantizationInfo output_quantization_info,
- DataLayout data_layout, ActivationLayerInfo act_info)
+ DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false)
{
+ _mixed_layout = mixed_layout;
_input_shape = in_shape;
_input_data_type = input_data_type;
_weights_data_type = weights_data_type;
@@ -130,9 +131,16 @@ public:
fill(AccessorType(_src), 0);
fill(AccessorType(_weights), 1);
fill(AccessorType(_biases), 2);
-
- // Compute function
- _dwc.run();
+
+ if(_mixed_layout)
+ {
+ mix_layout(_dwc, _src, _target);
+ }
+ else
+ {
+ // Compute function
+ _dwc.run();
+ }
}
void compute_reference()
@@ -150,6 +158,21 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -214,9 +237,10 @@ protected:
ActivationLayerInfo _act_info{};
unsigned int _depth_multiplier{};
Size2D _dilation{};
+ bool _mixed_layout{false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DepthwiseConvolutionLayerValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
@@ -226,7 +250,7 @@ public:
{
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
data_type, data_type, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(),
- data_layout, act_info);
+ data_layout, act_info, mixed_layout);
}
};
@@ -434,8 +458,15 @@ public:
fill(AccessorType(_weights), 1);
fill(AccessorType(_biases), 2);
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ _src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ _target.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
// Compute function
_dwc.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ _target.info()->set_data_layout(_data_layout);
}
void compute_reference()
@@ -496,7 +527,7 @@ protected:
unsigned int _n0{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
@@ -506,7 +537,7 @@ public:
{
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type,
data_type, input_quantization_info, input_quantization_info, output_quantization_info,
- data_layout, act_info);
+ data_layout, act_info, mixed_layout);
}
};
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index 8e4de77535..5ed0b9f9a3 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -53,10 +53,11 @@ public:
template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
- DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
{
_quantization_info = quantization_info;
_data_type = data_type;
+ _mixed_layout = mixed_layout;
TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
@@ -89,6 +90,22 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -171,8 +188,15 @@ protected:
fill(AccessorType(weights), 1);
fill(AccessorType(bias), 2);
- // Compute NEConvolutionLayer function
- conv.run();
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
@@ -197,9 +221,10 @@ protected:
SimpleTensor<T> _reference{};
QuantizationInfo _quantization_info{};
DataType _data_type{};
+ bool _mixed_layout {false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -208,11 +233,11 @@ public:
DataLayout data_layout)
{
DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
- act_info, data_layout);
+ act_info, data_layout, mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -221,7 +246,7 @@ public:
ActivationLayerInfo act_info, DataLayout data_layout)
{
DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
- act_info, data_layout);
+ act_info, data_layout, mixed_layout);
}
};
diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h
index 86a97272a0..199730d5d0 100644
--- a/tests/validation/fixtures/FFTFixture.h
+++ b/tests/validation/fixtures/FFTFixture.h
@@ -134,8 +134,9 @@ class FFTConvolutionValidationGenericFixture : public framework::Fixture
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
+ DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false)
{
+ _mixed_layout = mixed_layout;
_data_type = data_type;
_data_layout = data_layout;
@@ -144,6 +145,21 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -209,10 +225,16 @@ protected:
fill(AccessorType(src), 0);
fill(AccessorType(weights), 1);
fill(AccessorType(bias), 2);
-
- // Compute convolution function
- conv.run();
-
+
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
@@ -239,9 +261,10 @@ protected:
SimpleTensor<T> _reference{};
DataType _data_type{};
DataLayout _data_layout{};
+ bool _mixed_layout{false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -250,7 +273,7 @@ public:
DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
{
FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
- data_type, data_layout, act_info);
+ data_type, data_layout, act_info, mixed_layout);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 3760cfb8b7..8f38aae187 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -56,11 +56,12 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
- DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info, bool mixed_layout = false)
{
ARM_COMPUTE_UNUSED(weights_shape);
ARM_COMPUTE_UNUSED(bias_shape);
+ _mixed_layout = mixed_layout;
_data_type = data_type;
_bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
_quantization_info = quantization_info;
@@ -71,6 +72,22 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -189,8 +206,15 @@ protected:
fill(AccessorType(weights), 1);
}
- // Compute NEFullyConnectedLayer function
- fc.run();
+ if(_mixed_layout)
+ {
+ mix_layout(fc, src, dst);
+ }
+ else
+ {
+ // Compute NEFullyConnectedLayer function
+ fc.run();
+ }
return dst;
}
@@ -214,11 +238,12 @@ protected:
SimpleTensor<T> _reference{};
DataType _data_type{};
DataType _bias_data_type{};
+ bool _mixed_layout{false};
QuantizationInfo _quantization_info{};
ActivationLayerInfo _activation_info{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -228,11 +253,11 @@ public:
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- QuantizationInfo(), activation_info);
+ QuantizationInfo(), activation_info, mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FullyConnectedLayerValidationQuantizedFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -242,7 +267,7 @@ public:
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- quantization_info, activation_info);
+ quantization_info, activation_info, mixed_layout);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index af078d4ce3..ee81ff5538 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -47,14 +47,31 @@ class PoolingLayerValidationGenericFixture : public framework::Fixture
public:
template <typename...>
void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, bool indices = false,
- QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo())
+ QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo(), bool mixed_layout = false)
{
+ _mixed_layout = mixed_layout;
_pool_info = pool_info;
_target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
_reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor)
{
@@ -110,9 +127,15 @@ protected:
// Fill tensors
fill(AccessorType(src));
- // Compute function
- pool_layer.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(pool_layer, src, dst);
+ }
+ else
+ {
+ // Compute function
+ pool_layer.run();
+ }
return dst;
}
@@ -129,6 +152,7 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
PoolingLayerInfo _pool_info{};
+ bool _mixed_layout{false};
TensorType _target_indices{};
SimpleTensor<uint32_t> _ref_indices{};
};
@@ -144,7 +168,7 @@ public:
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class PoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -152,7 +176,7 @@ public:
void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout)
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding),
- data_type, data_layout);
+ data_type, data_layout, false, mixed_layout);
}
};
@@ -168,7 +192,7 @@ public:
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class PoolingLayerValidationQuantizedFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -177,7 +201,7 @@ public:
QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo())
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding),
- data_type, data_layout, false, input_qinfo, output_qinfo);
+ data_type, data_layout, false, input_qinfo, output_qinfo, mixed_layout);
}
};
diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h
index dd521470e6..9e0f620abe 100644
--- a/tests/validation/fixtures/ScaleFixture.h
+++ b/tests/validation/fixtures/ScaleFixture.h
@@ -46,7 +46,7 @@ class ScaleValidationGenericFixture : public framework::Fixture
public:
template <typename...>
void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy,
- bool align_corners)
+ bool align_corners, bool mixed_layout)
{
_shape = shape;
_policy = policy;
@@ -55,6 +55,7 @@ public:
_data_type = data_type;
_quantization_info = quantization_info;
_align_corners = align_corners;
+ _mixed_layout = mixed_layout;
generate_scale(shape);
@@ -67,6 +68,22 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
void generate_scale(const TensorShape &shape)
{
static constexpr float _min_scale{ 0.25f };
@@ -155,9 +172,15 @@ protected:
// Fill tensors
fill(AccessorType(src));
- // Compute function
- scale.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(scale, src, dst);
+ }
+ else
+ {
+ // Compute function
+ scale.run();
+ }
return dst;
}
@@ -182,11 +205,12 @@ protected:
DataType _data_type{};
QuantizationInfo _quantization_info{};
bool _align_corners{ false };
+ bool _mixed_layout{ false };
float _scale_x{ 1.f };
float _scale_y{ 1.f };
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ScaleValidationQuantizedFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -201,10 +225,11 @@ public:
policy,
border_mode,
sampling_policy,
- align_corners);
+ align_corners,
+ mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ScaleValidationFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -218,7 +243,8 @@ public:
policy,
border_mode,
sampling_policy,
- align_corners);
+ align_corners,
+ mixed_layout);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index 03ec920c4e..f956963e14 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -51,130 +51,38 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
-class WinogradConvolutionLayerValidationFixture : public framework::Fixture
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true, bool mixed_layout = false>
+class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
{
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, ActivationLayerInfo act_info)
+ DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
+
{
ARM_COMPUTE_UNUSED(dilation);
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
+ _mixed_layout = mixed_layout;
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
}
protected:
- template <typename U>
- void fill(U &&tensor, int i, float min, float max)
- {
- switch(tensor.data_type())
- {
- case DataType::F16:
- {
- arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::F32:
- {
- std::uniform_real_distribution<float> distribution(min, max);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
- }
- TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- DataType data_type, ActivationLayerInfo act_info)
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
- TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
-
- // Create and configure function
- FunctionType conv;
- ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
- conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- dst.allocator()->allocate();
- bias.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0, -1.f, 1.f);
- fill(AccessorType(weights), 1, -1.f, 1.f);
- fill(AccessorType(bias), 2, -1.f, 1.f);
-
- // Compute Winograd Convolution function
- conv.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, ActivationLayerInfo act_info)
- {
- // Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1 };
- SimpleTensor<T> weights{ weights_shape, data_type, 1 };
- SimpleTensor<T> bias{ bias_shape, data_type, 1 };
-
- // Fill reference
- fill(src, 0, -1.f, 1.f);
- fill(weights, 1, -1.f, 1.f);
- if(use_bias)
- {
- fill(bias, 2, -1.f, 1.f);
- }
- else
- {
- fill(bias, 2, 0.f, 0.f);
- }
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
- SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
-
- return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
+ // Compute Convolution function
+ layer.run();
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true>
-class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
-
- {
- ARM_COMPUTE_UNUSED(dilation);
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
}
-protected:
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -242,9 +150,15 @@ protected:
fill(AccessorType(weights), 1, -0.5f, 0.5f);
fill(AccessorType(bias), 2, -0.5f, 0.5f);
- // Compute Winograd Convolution function
- conv.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute function
+ conv.run();
+ }
return dst;
}
@@ -321,9 +235,10 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
+ bool _mixed_layout{false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradInputTransformValidationFixture : public framework::Fixture
{
public:
@@ -331,12 +246,30 @@ public:
void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
{
TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
-
+ _mixed_layout = mixed_layout;
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
_reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -388,9 +321,15 @@ protected:
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- // Compute Winograd input transform function
- transf.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(transf, src, dst);
+ }
+ else
+ {
+ // Compute Winograd input transform function
+ transf.run();
+ }
return dst;
}
@@ -405,11 +344,12 @@ protected:
return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradFilterTransformValidationFixture : public framework::Fixture
{
public:
@@ -419,11 +359,30 @@ public:
WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
+ _mixed_layout = mixed_layout;
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
_reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -476,8 +435,15 @@ protected:
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- filter_transform.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(filter_transform, src, dst);
+ }
+ else
+ {
+ // Compute Winograd filter transform function
+ filter_transform.run();
+ }
return dst;
}
@@ -492,11 +458,12 @@ protected:
return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradOutputTransformValidationFixture : public framework::Fixture
{
public:
@@ -508,6 +475,24 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -562,8 +547,15 @@ protected:
fill(AccessorType(src), 0, -1.f, 1.f);
fill(AccessorType(bias), 1, -1.f, 1.f);
- output_transform.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(output_transform, src, dst);
+ }
+ else
+ {
+ // Compute Winograd output transform function
+ output_transform.run();
+ }
return dst;
}
@@ -585,10 +577,11 @@ protected:
return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */
+#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */ \ No newline at end of file