aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
diff options
context:
space:
mode:
authorManuel Bottini <manuel.bottini@arm.com>2021-03-23 11:50:34 +0000
committerManuel Bottini <manuel.bottini@arm.com>2021-04-06 11:28:16 +0000
commitca62c6f53eb7244e6fed9f7e932608aa2496d9eb (patch)
treee5c7630c40d9f009e9baef4e849c6c7cc6ca90a7 /tests/validation/fixtures/WinogradConvolutionLayerFixture.h
parent4ed7b39dbbe8ccc6267a9eacefca51717c3b3e10 (diff)
downloadComputeLibrary-ca62c6f53eb7244e6fed9f7e932608aa2496d9eb.tar.gz
Mixed data-layout testing on high priority operators
Change data layouts after the configure in validation tests for: - Scale - Pooling - FullyConnected - DepthwiseConvolution - DirectConvolution - FFTConvolution - WinogradConvolution - GEMMConvolution (Indirect GEMM included) Extending fixtures Fixes for new mixed data layout tests Resolves: COMPMID-4162 Change-Id: I2f2eb2075f7e24ab3872249d88cadb57b82c5dde Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5326 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests/validation/fixtures/WinogradConvolutionLayerFixture.h')
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h241
1 files changed, 117 insertions, 124 deletions
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index 03ec920c4e..f956963e14 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -51,130 +51,38 @@ namespace validation
{
using namespace arm_compute::misc::shape_calculator;
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
-class WinogradConvolutionLayerValidationFixture : public framework::Fixture
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true, bool mixed_layout = false>
+class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
{
public:
template <typename...>
void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, ActivationLayerInfo act_info)
+ DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
+
{
ARM_COMPUTE_UNUSED(dilation);
-
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info);
+ _mixed_layout = mixed_layout;
+ _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
+ _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
}
protected:
- template <typename U>
- void fill(U &&tensor, int i, float min, float max)
- {
- switch(tensor.data_type())
- {
- case DataType::F16:
- {
- arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(min), float(max) };
- library->fill(tensor, distribution, i);
- break;
- }
- case DataType::F32:
- {
- std::uniform_real_distribution<float> distribution(min, max);
- library->fill(tensor, distribution, i);
- break;
- }
- default:
- {
- ARM_COMPUTE_ERROR("Not supported");
- }
- }
- }
- TensorType compute_target(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, const PadStrideInfo &info,
- DataType data_type, ActivationLayerInfo act_info)
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
{
- // Create tensors
- TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
- TensorType weights = create_tensor<TensorType>(weights_shape, data_type, 1);
- TensorType bias = create_tensor<TensorType>(bias_shape, data_type, 1);
- TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
-
- // Create and configure function
- FunctionType conv;
- ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info)), framework::LogLevel::ERRORS);
- conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info);
-
- ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Allocate tensors
- src.allocator()->allocate();
- weights.allocator()->allocate();
- dst.allocator()->allocate();
- bias.allocator()->allocate();
-
- ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
- ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
-
- // Fill tensors
- fill(AccessorType(src), 0, -1.f, 1.f);
- fill(AccessorType(weights), 1, -1.f, 1.f);
- fill(AccessorType(bias), 2, -1.f, 1.f);
-
- // Compute Winograd Convolution function
- conv.run();
-
- return dst;
- }
-
- SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
- DataType data_type, ActivationLayerInfo act_info)
- {
- // Create reference
- SimpleTensor<T> src{ input_shape, data_type, 1 };
- SimpleTensor<T> weights{ weights_shape, data_type, 1 };
- SimpleTensor<T> bias{ bias_shape, data_type, 1 };
-
- // Fill reference
- fill(src, 0, -1.f, 1.f);
- fill(weights, 1, -1.f, 1.f);
- if(use_bias)
- {
- fill(bias, 2, -1.f, 1.f);
- }
- else
- {
- fill(bias, 2, 0.f, 0.f);
- }
+ const DataLayout data_layout = src.info()->data_layout();
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
- SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
-
- return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
- }
-
- TensorType _target{};
- SimpleTensor<T> _reference{};
-};
+ // Compute Convolution function
+ layer.run();
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T, typename T1 = T, bool use_bias = true>
-class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
-{
-public:
- template <typename...>
- void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation,
- DataType data_type, ActivationLayerInfo act_info, const DataLayout &data_layout)
-
- {
- ARM_COMPUTE_UNUSED(dilation);
- _target = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
- _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout);
+ dst.info()->set_data_layout(data_layout);
}
-protected:
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -242,9 +150,15 @@ protected:
fill(AccessorType(weights), 1, -0.5f, 0.5f);
fill(AccessorType(bias), 2, -0.5f, 0.5f);
- // Compute Winograd Convolution function
- conv.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(conv, src, dst);
+ }
+ else
+ {
+ // Compute function
+ conv.run();
+ }
return dst;
}
@@ -321,9 +235,10 @@ protected:
TensorType _target{};
SimpleTensor<T> _reference{};
+ bool _mixed_layout{false};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradInputTransformValidationFixture : public framework::Fixture
{
public:
@@ -331,12 +246,30 @@ public:
void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
{
TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
-
+ _mixed_layout = mixed_layout;
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
_reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -388,9 +321,15 @@ protected:
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- // Compute Winograd input transform function
- transf.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(transf, src, dst);
+ }
+ else
+ {
+ // Compute Winograd input transform function
+ transf.run();
+ }
return dst;
}
@@ -405,11 +344,12 @@ protected:
return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradFilterTransformValidationFixture : public framework::Fixture
{
public:
@@ -419,11 +359,30 @@ public:
WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
+ _mixed_layout = mixed_layout;
_target = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
_reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -476,8 +435,15 @@ protected:
// Fill tensors
fill(AccessorType(src), 0, -1.f, 1.f);
- filter_transform.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(filter_transform, src, dst);
+ }
+ else
+ {
+ // Compute Winograd filter transform function
+ filter_transform.run();
+ }
return dst;
}
@@ -492,11 +458,12 @@ protected:
return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
class WinogradOutputTransformValidationFixture : public framework::Fixture
{
public:
@@ -508,6 +475,24 @@ public:
}
protected:
+
+ void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
+ {
+ const DataLayout data_layout_src = src.info()->data_layout();
+ const DataLayout data_layout_dst = dst.info()->data_layout();
+
+ // Test Multi DataLayout graph cases, when the data layout changes after configure
+ src.info()->set_data_layout(data_layout_src == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+ dst.info()->set_data_layout(data_layout_dst == DataLayout::NCHW ? DataLayout::NHWC : DataLayout::NCHW);
+
+ // Compute Convolution function
+ layer.run();
+
+ // Reinstating original data layout for the test suite to properly check the values
+ src.info()->set_data_layout(data_layout_src);
+ dst.info()->set_data_layout(data_layout_dst);
+ }
+
template <typename U>
void fill(U &&tensor, int i, float min, float max)
{
@@ -562,8 +547,15 @@ protected:
fill(AccessorType(src), 0, -1.f, 1.f);
fill(AccessorType(bias), 1, -1.f, 1.f);
- output_transform.run();
-
+ if(_mixed_layout)
+ {
+ mix_layout(output_transform, src, dst);
+ }
+ else
+ {
+ // Compute Winograd output transform function
+ output_transform.run();
+ }
return dst;
}
@@ -585,10 +577,11 @@ protected:
return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
}
+ bool _mixed_layout {false};
TensorType _target{};
SimpleTensor<T> _reference{};
};
} // namespace validation
} // namespace test
} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */
+#endif /* ARM_COMPUTE_TEST_WINOGRAD_LAYER_FIXTURE */ \ No newline at end of file