aboutsummaryrefslogtreecommitdiff
path: root/tests/validation
diff options
context:
space:
mode:
authorGiorgio Arena <giorgio.arena@arm.com>2018-05-03 15:57:48 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:52:35 +0000
commita3221e6772dc371cf5de7e525bf5c22b58ad6d08 (patch)
tree14d224e07d92dbbd97966de0b6b0aa8e6a288022 /tests/validation
parent20b4313365ea2ed31f59fd757f68f791f076e6bc (diff)
downloadComputeLibrary-a3221e6772dc371cf5de7e525bf5c22b58ad6d08.tar.gz
COMPMID-1106 Add fast math support in NEWinogradConvolutionLayer
Change-Id: I5fcbbb3b6f22204f0aaebbc319dfdf03593577e8 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/130067 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'tests/validation')
-rw-r--r--tests/validation/NEON/ConvolutionLayer.cpp59
-rw-r--r--tests/validation/fixtures/WinogradConvolutionLayerFixture.h16
2 files changed, 44 insertions, 31 deletions
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 330480e4d8..9043d582d4 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -76,43 +76,48 @@ const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo
TEST_SUITE(NEON)
TEST_SUITE(ConvolutionLayer)
-DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(
- framework::dataset::make("InputInfo", { TensorInfo(TensorShape(8U, 8U, 2U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0)
- }),
- framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0)
- })),
- framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(6U, 6U, 1U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(21U, 25U, 21U, 4U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
- TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0)
- })),
- framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
- PadStrideInfo(1, 1, 0, 0),
- PadStrideInfo(2, 1, 0, 0),
- PadStrideInfo(3, 2, 1, 0)
+DATA_TEST_CASE(ValidateConvolutionMethod, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
+ framework::dataset::make("InputInfo", { TensorInfo(TensorShape(18U, 18U, 32U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(23U, 27U, 32U, 4U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(3U, 3U, 2U, 1U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(33U, 27U, 7U, 4U), 1, DataType::F32, 0)
+ }),
+ framework::dataset::make("WeightsInfo", { TensorInfo(TensorShape(3U, 3U, 32U, 21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(5U, 5U, 32U, 21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(3U, 3U, 5U, 21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(5U, 5U, 7U, 16U), 1, DataType::F16, 0)
+ })),
+ framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(16U, 16U, 21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(19U, 23U, 21U, 4U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(11U, 25U, 21U), 1, DataType::F32, 0),
+ TensorInfo(TensorShape(11U, 12U, 16U, 4U), 1, DataType::F32, 0)
+ })),
+ framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0),
+ PadStrideInfo(1, 1, 0, 0),
+ PadStrideInfo(2, 1, 0, 0),
+ PadStrideInfo(3, 2, 1, 0)
+ })),
+ framework::dataset::make("FastMath", { true,
+ true,
+ false,
+ false
})),
- framework::dataset::make("Expected", { ConvolutionMethod::GEMM, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
- input_info, weights_info, output_info, conv_info, expected)
+ framework::dataset::make("Expected", { ConvolutionMethod::WINOGRAD, ConvolutionMethod::WINOGRAD, ConvolutionMethod::GEMM, ConvolutionMethod::GEMM })),
+ input_info, weights_info, output_info, conv_info, fast_math, expected)
{
- ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(false),
- &weights_info.clone()->set_is_resizable(false),
- &output_info.clone()->set_is_resizable(false), conv_info);
+ ConvolutionMethod is_valid = NEConvolutionLayer::get_convolution_method(&input_info.clone()->set_is_resizable(true),
+ &weights_info.clone()->set_is_resizable(true),
+ &output_info.clone()->set_is_resizable(true), conv_info, WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math);
ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
}
TEST_SUITE_END()
TEST_SUITE(WinogradLayer)
template <typename T>
-using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
+using NEWinogradConvolutionLayerFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T>;
template <typename T>
-using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, false>;
+using NEWinogradConvolutionLayerNoBiasFixture = WinogradConvolutionLayerFastMathValidationFixture<Tensor, Accessor, NEWinogradConvolutionLayer, T, false>;
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, NEWinogradConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index e15931eafb..ef596e0bae 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -153,7 +153,7 @@ protected:
SimpleTensor<T> _reference{};
};
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool use_bias = true>
class WinogradConvolutionLayerFastMathValidationFixture : public framework::Fixture
{
public:
@@ -198,8 +198,9 @@ protected:
// Create and configure function
FunctionType conv;
- ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), bias.info(), dst.info(), info, act_info, true /* Enable fast math */)), framework::LogLevel::ERRORS);
- conv.configure(&src, &weights, &bias, &dst, info, act_info, true /* Enable fast math */);
+ ARM_COMPUTE_EXPECT(static_cast<bool>(conv.validate(src.info(), weights.info(), (use_bias) ? bias.info() : nullptr, dst.info(), info, act_info, true /* Enable fast math */)),
+ framework::LogLevel::ERRORS);
+ conv.configure(&src, &weights, (use_bias) ? &bias : nullptr, &dst, info, act_info, true /* Enable fast math */);
ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -239,7 +240,14 @@ protected:
// Fill reference
fill(src, 0, -1.f, 1.f);
fill(weights, 1, -1.f, 1.f);
- fill(bias, 2, -1.f, 1.f);
+ if(use_bias)
+ {
+ fill(bias, 2, -1.f, 1.f);
+ }
+ else
+ {
+ fill(bias, 2, 0.f, 0.f);
+ }
WinogradInfo winograd_info(Size2D(4U, 4U),
Size2D(weights_shape[0], weights_shape[1]),