aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/ConvolutionLayerFixture.h
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2021-03-25 14:54:50 +0000
committerGeorgios Pinitas <georgios.pinitas@arm.com>2021-04-13 08:48:39 +0000
commit63825e8259508dc7731b6de2e008c5ef8c738d79 (patch)
treedd1c1e83b7453625cedb941fac3fc7c50fb94c50 /tests/validation/fixtures/ConvolutionLayerFixture.h
parent0a66abec3723f7dd655a118cf2969db59b37f171 (diff)
downloadComputeLibrary-63825e8259508dc7731b6de2e008c5ef8c738d79.tar.gz
Implicit padding testing along the X axis on high priority operators
Add artificial implicit padding testing for the following fixtures: - Scale - FullyConnected - Pooling - DepthwiseConvolution - DirectConvolution - Winograd - FFT - GEMM/GEMMLowp Create utility function that loops through a list of tensor and adds random padding based on the global seed (only for NHWC layer layout). Remove GEMMLowpAssemblyFixture since it wasn't used Remove some AssetsLibrary headers since they weren't used Resolve COMPMID-4161 Change-Id: Ib6f4f7f113ae69b993d7b2a9e04abbf3de8c99fe Signed-off-by: Giorgio Arena <giorgio.arena@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5327 Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/fixtures/ConvolutionLayerFixture.h')
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 07790e84d9..b649280ae0 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -69,7 +69,8 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
- DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, bool mixed_layout = false)
+ DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
+ bool mixed_layout = false)
{
_mixed_layout = mixed_layout;
_data_type = data_type;
@@ -87,7 +88,6 @@ public:
}
protected:
-
void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
// Test Multi DataLayout graph cases, when the data layout changes after configure
@@ -214,6 +214,8 @@ protected:
ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+ add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
+
// Allocate tensors
src.allocator()->allocate();
weights.allocator()->allocate();