aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures
diff options
context:
space:
mode:
Diffstat (limited to 'tests/validation/fixtures')
-rw-r--r--tests/validation/fixtures/ConvolutionLayerFixture.h38
-rw-r--r--tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h47
-rw-r--r--tests/validation/fixtures/DirectConvolutionLayerFixture.h39
-rw-r--r--tests/validation/fixtures/FFTFixture.h37
-rw-r--r--tests/validation/fixtures/FullyConnectedLayerFixture.h39
-rw-r--r--tests/validation/fixtures/PoolingLayerFixture.h40
-rw-r--r--tests/validation/fixtures/ScaleFixture.h42
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h241
8 files changed, 347 insertions, 176 deletions
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index a4db49fc8e..07790e84d9 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -69,8 +69,9 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
- DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info)
+ DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, bool mixed_layout = false)
{
+ _mixed_layout = mixed_layout;
_data_type = data_type;
_weights_data_type = weights_data_type;
_is_quantized = is_data_type_quantized_asymmetric(data_type);
@@ -86,6 +87,21 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
void regularize_values(void *values, size_t size)
{
float *fvalues = static_cast<float *>(values);
@@ -214,8 +230,15 @@ protected:
fill(AccessorType(weights), 1);
fill(AccessorType(bias), 2);
- // Compute NEConvolutionLayer function
- conv.run();
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
@@ -264,9 +287,10 @@ protected:
QuantizationInfo _weight_quantization_info{};
bool _is_quantized = false;
bool _is_bfloat16 = false;
+ bool _mixed_layout = false;
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ConvolutionValidationFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
@@ -276,11 +300,11 @@ public:
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
data_type, data_type, data_layout,
- QuantizationInfo(), QuantizationInfo(), act_info);
+ QuantizationInfo(), QuantizationInfo(), act_info, mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ConvolutionValidationQuantizedFixture : public ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
@@ -289,7 +313,7 @@ public:
DataLayout data_layout, QuantizationInfo quantization_info, ActivationLayerInfo act_info)
{
ConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation, reshape_weights,
- data_type, data_type, data_layout, quantization_info, quantization_info, act_info);
+ data_type, data_type, data_layout, quantization_info, quantization_info, act_info, mixed_layout);
}
};
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index d9806b5c84..0aa43d82b4 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -59,8 +59,9 @@ public:
void setup(TensorShape in_shape, Size2D kernel_size, PadStrideInfo pad_stride_info, Size2D dilation,
unsigned int depth_multiplier, DataType input_data_type, DataType weights_data_type,
QuantizationInfo input_quantization_info, QuantizationInfo weights_quantization_info, QuantizationInfo output_quantization_info,
- DataLayout data_layout, ActivationLayerInfo act_info)
+ DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false)
{
+ _mixed_layout = mixed_layout;
_input_shape = in_shape;
_input_data_type = input_data_type;
_weights_data_type = weights_data_type;
@@ -130,9 +131,16 @@ public:
fill(AccessorType(_src), 0);
fill(AccessorType(_weights), 1);
fill(AccessorType(_biases), 2);
-
- // Compute function
- _dwc.run();
+
+ if(_mixed_layout)
+ {
+ mix_layout(_dwc, _src, _target);
+ }
+ else
+ {
+ // Compute function
+ _dwc.run();
+ }
}
void compute_reference()
@@ -150,6 +158,21 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -214,9 +237,10 @@ protected:
ActivationLayerInfo _act_info{};
unsigned int _depth_multiplier{};
Size2D _dilation{};
+ bool _mixed_layout{false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DepthwiseConvolutionLayerValidationFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
@@ -226,7 +250,7 @@ public:
{
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier,
data_type, data_type, QuantizationInfo(), QuantizationInfo(), QuantizationInfo(),
- data_layout, act_info);
+ data_layout, act_info, mixed_layout);
}
};
@@ -434,8 +458,15 @@ public:
fill(AccessorType(_weights), 1);
fill(AccessorType(_biases), 2);
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ _src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ _target.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
// Compute function
_dwc.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ _target.info()->set_data_layout(_data_layout);
}
void compute_reference()
@@ -496,7 +527,7 @@ protected:
unsigned int _n0{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DepthwiseConvolutionLayerValidationQuantizedFixture : public DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>
{
public:
@@ -506,7 +537,7 @@ public:
{
DepthwiseConvolutionLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T, T>::setup(in_shape, kernel_size, pad_stride_info, dilation, depth_multiplier, data_type,
data_type, input_quantization_info, input_quantization_info, output_quantization_info,
- data_layout, act_info);
+ data_layout, act_info, mixed_layout);
}
};
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index 8e4de77535..5ed0b9f9a3 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -53,10 +53,11 @@ public:
template <typename...>
void setup(TensorShape input_shape, int stride_x, int stride_y, int pad_x, int pad_y, unsigned int kernel_size, unsigned int num_kernels,
- DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo act_info, DataLayout data_layout, bool mixed_layout = false)
{
_quantization_info = quantization_info;
_data_type = data_type;
+ _mixed_layout = mixed_layout;
TensorShape weights_shape(kernel_size, kernel_size, input_shape.z(), num_kernels);
const TensorShape bias_shape(num_kernels);
@@ -89,6 +90,22 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -171,8 +188,15 @@ protected:
fill(AccessorType(weights), 1);
fill(AccessorType(bias), 2);
- // Compute NEConvolutionLayer function
- conv.run();
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
@@ -197,9 +221,10 @@ protected:
SimpleTensor<T> _reference{};
QuantizationInfo _quantization_info{};
DataType _data_type{};
+ bool _mixed_layout {false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DirectConvolutionValidationFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -208,11 +233,11 @@ public:
DataLayout data_layout)
{
DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, QuantizationInfo(),
- act_info, data_layout);
+ act_info, data_layout, mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class DirectConvolutionValidationQuantizedFixture : public DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -221,7 +246,7 @@ public:
ActivationLayerInfo act_info, DataLayout data_layout)
{
DirectConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, stride_x, stride_y, pad_x, pad_y, kernel_size, num_kernels, data_type, quantization_info,
- act_info, data_layout);
+ act_info, data_layout, mixed_layout);
}
};
diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h
index 86a97272a0..199730d5d0 100644
--- a/tests/validation/fixtures/FFTFixture.h
+++ b/tests/validation/fixtures/FFTFixture.h
@@ -134,8 +134,9 @@ class FFTConvolutionValidationGenericFixture : public framework::Fixture
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
+ DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false)
{
+ _mixed_layout = mixed_layout;
_data_type = data_type;
_data_layout = data_layout;
@@ -144,6 +145,21 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(_data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(_data_layout);
+ dst.info()->set_data_layout(_data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -209,10 +225,16 @@ protected:
fill(AccessorType(src), 0);
fill(AccessorType(weights), 1);
fill(AccessorType(bias), 2);
-
- // Compute convolution function
- conv.run();
-
+
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute Convolution function
+ conv.run();
+ }
return dst;
}
@@ -239,9 +261,10 @@ protected:
SimpleTensor<T> _reference{};
DataType _data_type{};
DataLayout _data_layout{};
+ bool _mixed_layout{false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FFTConvolutionValidationFixture : public FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -250,7 +273,7 @@ public:
DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info)
{
FFTConvolutionValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, dilation,
- data_type, data_layout, act_info);
+ data_type, data_layout, act_info, mixed_layout);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 3760cfb8b7..8f38aae187 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -56,11 +56,12 @@ public:
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, bool transpose_weights, bool reshape_weights,
- DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info)
+ DataType data_type, QuantizationInfo quantization_info, ActivationLayerInfo activation_info, bool mixed_layout = false)
{
ARM_COMPUTE_UNUSED(weights_shape);
ARM_COMPUTE_UNUSED(bias_shape);
+ _mixed_layout = mixed_layout;
_data_type = data_type;
_bias_data_type = is_data_type_quantized_asymmetric(data_type) ? DataType::S32 : data_type;
_quantization_info = quantization_info;
@@ -71,6 +72,22 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor, int i)
{
@@ -189,8 +206,15 @@ protected:
fill(AccessorType(weights), 1);
}
- // Compute NEFullyConnectedLayer function
- fc.run();
+ if(_mixed_layout)
+ {
+ mix_layout(fc, src, dst);
+ }
+ else
+ {
+ // Compute NEFullyConnectedLayer function
+ fc.run();
+ }
return dst;
}
@@ -214,11 +238,12 @@ protected:
SimpleTensor<T> _reference{};
DataType _data_type{};
DataType _bias_data_type{};
+ bool _mixed_layout{false};
QuantizationInfo _quantization_info{};
ActivationLayerInfo _activation_info{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FullyConnectedLayerValidationFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -228,11 +253,11 @@ public:
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- QuantizationInfo(), activation_info);
+ QuantizationInfo(), activation_info, mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class FullyConnectedLayerValidationQuantizedFixture : public FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -242,7 +267,7 @@ public:
{
FullyConnectedLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, transpose_weights,
reshape_weights, data_type,
- quantization_info, activation_info);
+ quantization_info, activation_info, mixed_layout);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index af078d4ce3..ee81ff5538 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -47,14 +47,31 @@ class PoolingLayerValidationGenericFixture : public framework::Fixture
public:
template <typename...>
void setup(TensorShape shape, PoolingLayerInfo pool_info, DataType data_type, DataLayout data_layout, bool indices = false,
- QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo())
+ QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo(), bool mixed_layout = false)
{
+ _mixed_layout = mixed_layout;
_pool_info = pool_info;
_target = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
_reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
template <typename U>
void fill(U &&tensor)
{
@@ -110,9 +127,15 @@ protected:
// Fill tensors
fill(AccessorType(src));
- // Compute function
- pool_layer.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(pool_layer, src, dst);
+ }
+ else
+ {
+ // Compute function
+ pool_layer.run();
+ }
return dst;
}
@@ -129,6 +152,7 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
PoolingLayerInfo _pool_info{};
+ bool _mixed_layout{false};
TensorType _target_indices{};
SimpleTensor<uint32_t> _ref_indices{};
};
@@ -144,7 +168,7 @@ public:
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class PoolingLayerValidationFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -152,7 +176,7 @@ public:
void setup(TensorShape shape, PoolingType pool_type, Size2D pool_size, PadStrideInfo pad_stride_info, bool exclude_padding, DataType data_type, DataLayout data_layout)
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding),
- data_type, data_layout);
+ data_type, data_layout, false, mixed_layout);
}
};
@@ -168,7 +192,7 @@ public:
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class PoolingLayerValidationQuantizedFixture : public PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -177,7 +201,7 @@ public:
QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo())
{
PoolingLayerValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, PoolingLayerInfo(pool_type, pool_size, data_layout, pad_stride_info, exclude_padding),
- data_type, data_layout, false, input_qinfo, output_qinfo);
+ data_type, data_layout, false, input_qinfo, output_qinfo, mixed_layout);
}
};
diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h
index dd521470e6..9e0f620abe 100644
--- a/tests/validation/fixtures/ScaleFixture.h
+++ b/tests/validation/fixtures/ScaleFixture.h
@@ -46,7 +46,7 @@ class ScaleValidationGenericFixture : public framework::Fixture
public:
template <typename...>
void setup(TensorShape shape, DataType data_type, QuantizationInfo quantization_info, DataLayout data_layout, InterpolationPolicy policy, BorderMode border_mode, SamplingPolicy sampling_policy,
- bool align_corners)
+ bool align_corners, bool mixed_layout)
{
_shape = shape;
_policy = policy;
@@ -55,6 +55,7 @@ public:
_data_type = data_type;
_quantization_info = quantization_info;
_align_corners = align_corners;
+ _mixed_layout = mixed_layout;
generate_scale(shape);
@@ -67,6 +68,22 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
+ }
+
void generate_scale(const TensorShape &shape)
{
static constexpr float _min_scale{ 0.25f };
@@ -155,9 +172,15 @@ protected:
// Fill tensors
fill(AccessorType(src));
- // Compute function
- scale.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(scale, src, dst);
+ }
+ else
+ {
+ // Compute function
+ scale.run();
+ }
return dst;
}
@@ -182,11 +205,12 @@ protected:
DataType _data_type{};
QuantizationInfo _quantization_info{};
bool _align_corners{ false };
+ bool _mixed_layout{ false };
float _scale_x{ 1.f };
float _scale_y{ 1.f };
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ScaleValidationQuantizedFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -201,10 +225,11 @@ public:
policy,
border_mode,
sampling_policy,
- align_corners);
+ align_corners,
+ mixed_layout);
}
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class ScaleValidationFixture : public ScaleValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
{
public:
@@ -218,7 +243,8 @@ public:
policy,
border_mode,
sampling_policy,
- align_corners);
+ align_corners,
+ mixed_layout);
}
};
} // namespace validation
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index 03ec920c4e..f956963e14 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -51,130 +51,38 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
-class WinogradConvolutionLayerValidationFixture : public framework::Fixture
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true, bool mixed_layout = false>
+class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
{
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, ActivationLayerInfo act_info)
+ DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
+
{
ARM_COMPUTE_UNUSED(dilation);
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
+ _mixed_layout = mixed_layout;
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
}
protected:
- template <typename U>
- void fill(U &&tensor, int i, float min, float max)
- {
- switch(tensor.data_type())
- {
- case DataType::F16:
- {
- arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::F32:
- {
- std::uniform_real_distribution<float> distribution(min, max);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
- }
- TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- DataType data_type, ActivationLayerInfo act_info)
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
- TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
-
- // Create and configure function
- FunctionType conv;
- ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
- conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- dst.allocator()->allocate();
- bias.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0, -1.f, 1.f);
- fill(AccessorType(weights), 1, -1.f, 1.f);
- fill(AccessorType(bias), 2, -1.f, 1.f);
-
- // Compute Winograd Convolution function
- conv.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, ActivationLayerInfo act_info)
- {
- // Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1 };
- SimpleTensor<T> weights{ weights_shape, data_type, 1 };
- SimpleTensor<T> bias{ bias_shape, data_type, 1 };
-
- // Fill reference
- fill(src, 0, -1.f, 1.f);
- fill(weights, 1, -1.f, 1.f);
- if(use_bias)
- {
- fill(bias, 2, -1.f, 1.f);
- }
- else
- {
- fill(bias, 2, 0.f, 0.f);
- }
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
- SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
-
- return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
+ // Compute Convolution function
+ layer.run();
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true>
-class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
-
- {
- ARM_COMPUTE_UNUSED(dilation);
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
}
-protected:
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -242,9 +150,15 @@ protected:
fill(AccessorType(weights), 1, -0.5f, 0.5f);
fill(AccessorType(bias), 2, -0.5f, 0.5f);
- // Compute Winograd Convolution function
- conv.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute function
+ conv.run();
+ }
return dst;
}
@@ -321,9 +235,10 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
+ bool _mixed_layout{false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradInputTransformValidationFixture : public framework::Fixture
{
public:
@@ -331,12 +246,30 @@ public:
void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
{
TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
-
+ _mixed_layout = mixed_layout;
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
_reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -388,9 +321,15 @@ protected:
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- // Compute Winograd input transform function
- transf.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(transf, src, dst);
+ }
+ else
+ {
+ // Compute Winograd input transform function
+ transf.run();
+ }
return dst;
}
@@ -405,11 +344,12 @@ protected:
return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradFilterTransformValidationFixture : public framework::Fixture
{
public:
@@ -419,11 +359,30 @@ public:
WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
+ _mixed_layout = mixed_layout;
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
_reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -476,8 +435,15 @@ protected:
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- filter_transform.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(filter_transform, src, dst);
+ }
+ else
+ {
+ // Compute Winograd filter transform function
+ filter_transform.run();
+ }
return dst;
}
@@ -492,11 +458,12 @@ protected:
return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradOutputTransformValidationFixture : public framework::Fixture
{
public:
@@ -508,6 +475,24 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -562,8 +547,15 @@ protected:
fill(AccessorType(src), 0, -1.f, 1.f);
fill(AccessorType(bias), 1, -1.f, 1.f);
- output_transform.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(output_transform, src, dst);
+ }
+ else
+ {
+ // Compute Winograd output transform function
+ output_transform.run();
+ }
return dst;
}
@@ -585,10 +577,11 @@ protected:
return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */
+#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */ \ No newline at end of file