aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-03-25 14:54:50 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-04-13 08:48:39 +0000
commit63825e8259508dc7731b6de2e008c5ef8c738d79 (patch)
treedd1c1e83b7453625cedb941fac3fc7c50fb94c50 /tests/validation/fixtures/WinogradConvolutionLayerFixture.h
parent0a66abec3723f7dd655a118cf2969db59b37f171 (diff)
downloadComputeLibrary-63825e8259508dc7731b6de2e008c5ef8c738d79.tar.gz
Implicit padding testing along the X axis on high priority operators
Add artificial implicit padding testing for the following fixtures: - Scale - FullyConnected - Pooling - DepthwiseConvolution - DirectConvolution - Winograd - FFT - GEMM/GEMMLowp Create utility function that loops through a list of tensor and adds random padding based on the global seed (only for NHWC layer layout). Remove GEMMLowpAssemblyFixture since it wasn't used Remove some AssetsLibrary headers since they weren't used Resolve COMPMID-4161 Change-Id: Ib6f4f7f113ae69b993d7b2a9e04abbf3de8c99fe Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5327 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/WinogradConvolutionLayerFixture.h')
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h34
1 files changed, 19 insertions, 15 deletions
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index f956963e14..a1433e9115 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -62,12 +62,11 @@ public:
{
ARM_COMPUTE_UNUSED(dilation);
_mixed_layout = mixed_layout;
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
}
protected:
-
void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
const DataLayout data_layout = src.info()->data_layout();
@@ -134,6 +133,8 @@ protected:
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &src, &weights, &bias, &dst }, data_layout);
+
// Allocate tensors
src.allocator()->allocate();
weights.allocator()->allocate();
@@ -235,7 +236,7 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
- bool _mixed_layout{false};
+ bool _mixed_layout{ false };
};
template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
@@ -246,13 +247,12 @@ public:
void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
{
TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
- _mixed_layout = mixed_layout;
- _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
- _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
+ _mixed_layout = mixed_layout;
+ _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
-
void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
const DataLayout data_layout_src = src.info()->data_layout();
@@ -311,6 +311,8 @@ protected:
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &src, &dst }, data_layout);
+
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
@@ -344,7 +346,7 @@ protected:
return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
}
- bool _mixed_layout {false};
+ bool _mixed_layout{ false };
TensorType _target{};
SimpleTensor<T> _reference{};
};
@@ -360,12 +362,11 @@ public:
TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
_mixed_layout = mixed_layout;
- _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
- _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
+ _target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+ _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
-
void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
const DataLayout data_layout_src = src.info()->data_layout();
@@ -425,6 +426,8 @@ protected:
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &src, &dst }, data_layout);
+
// Allocate tensors
src.allocator()->allocate();
dst.allocator()->allocate();
@@ -458,7 +461,7 @@ protected:
return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
}
- bool _mixed_layout {false};
+ bool _mixed_layout{ false };
TensorType _target{};
SimpleTensor<T> _reference{};
};
@@ -475,7 +478,6 @@ public:
}
protected:
-
void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
const DataLayout data_layout_src = src.info()->data_layout();
@@ -534,6 +536,8 @@ protected:
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &src, &bias, &dst }, winograd_info.output_data_layout);
+
// Allocate tensors
src.allocator()->allocate();
bias.allocator()->allocate();
@@ -577,7 +581,7 @@ protected:
return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
}
- bool _mixed_layout {false};
+ bool _mixed_layout{ false };
TensorType _target{};
SimpleTensor<T> _reference{};
};