diff options
author | Michalis Spyrou <michalis.spyrou@arm.com> | 2017-11-30 14:25:57 +0000 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2018-11-02 16:42:17 +0000 |
commit | afa5d817b1d083837cd7ea30d32f845d82620c12 (patch) | |
tree | 1ca2a27ab7108b7137b96fc1547a8b5ac5d9c8f7 /tests | |
parent | 631c41a4e3645a948b0f597caa77e8fa91ca0efc (diff) | |
download | ComputeLibrary-afa5d817b1d083837cd7ea30d32f845d82620c12.tar.gz |
COMPMID-617 Add validation methods to Kernels
- NEActivationLayer
- NESoftmax
- NEDirectConvolutionLayer
- NENormalizationLayer
- NEPoolingLayer
Change-Id: Ib279f1c1b7f9247679b0d6593aed7393da8fe87b
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/111335
Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/validation/NEON/ActivationLayer.cpp | 30 | ||||
-rw-r--r-- | tests/validation/NEON/DirectConvolutionLayer.cpp | 62 | ||||
-rw-r--r-- | tests/validation/NEON/NormalizationLayer.cpp | 36 | ||||
-rw-r--r-- | tests/validation/NEON/PoolingLayer.cpp | 42 | ||||
-rw-r--r-- | tests/validation/NEON/SoftmaxLayer.cpp | 22 |
5 files changed, 192 insertions, 0 deletions
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp index b13cb59b9a..6ed482e4e7 100644 --- a/tests/validation/NEON/ActivationLayer.cpp +++ b/tests/validation/NEON/ActivationLayer.cpp @@ -137,6 +137,36 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(conca } } +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QS8, 2), + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), + TensorInfo(), + })), + framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU), + })), + framework::dataset::make("Expected", { false, true, false, false, true })), + input_info, output_info, act_info, expected) +{ + bool is_valid = bool(NEActivationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), act_info)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + template <typename T> using NEActivationLayerFixture = ActivationValidationFixture<Tensor, Accessor, NEActivationLayer, T>; diff --git a/tests/validation/NEON/DirectConvolutionLayer.cpp b/tests/validation/NEON/DirectConvolutionLayer.cpp index 52e2b2c034..cd23ce4bc3 100644 --- a/tests/validation/NEON/DirectConvolutionLayer.cpp +++ b/tests/validation/NEON/DirectConvolutionLayer.cpp @@ -91,6 +91,68 @@ const auto data_qs16 = combine(datasets::SmallDirectConvolutionShapes(), TEST_SUITE(NEON) TEST_SUITE(DirectConvolutionLayer) +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/weights + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching input feature maps + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Unsupported kernel width + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non-rectangular weights dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid weights dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid stride + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases size + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid biases dimensions + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid output size + }), + framework::dataset::make("WeightsInfo",{ TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F16, 0), + TensorInfo(TensorShape(3U, 3U, 3U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(9U, 9U, 2U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(5U, 3U, 2U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(3U, 3U, 2U, 4U, 3U), 1, DataType::F32, 0), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(3U, 3U, 2U, 4U), 1, DataType::F32, 0), + })), + framework::dataset::make("BiasesInfo",{ TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(3U), 1, DataType::F32, 0), + TensorInfo(TensorShape(4U, 2U), 1, DataType::F32, 0), + TensorInfo(TensorShape(4U), 1, DataType::F32, 0), + })), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 4U), 1, DataType::F32, 0), + TensorInfo(TensorShape(26U, 11U, 4U), 1, DataType::F32, 0), + })), + framework::dataset::make("ConvInfo", { PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(3, 3, 0, 0), + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(1, 1, 0, 0), + PadStrideInfo(1, 1, 0, 0), + })), + framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false })), + input_info, weights_info, biases_info, output_info, conv_info, expected) +{ + bool is_valid = bool(NEDirectConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &biases_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + //TODO(COMPMID-415): Configuration tests? template <typename T> diff --git a/tests/validation/NEON/NormalizationLayer.cpp b/tests/validation/NEON/NormalizationLayer.cpp index 3afa52cb4c..4d504a8972 100644 --- a/tests/validation/NEON/NormalizationLayer.cpp +++ b/tests/validation/NEON/NormalizationLayer.cpp @@ -66,6 +66,42 @@ TEST_SUITE(NormalizationLayer) //TODO(COMPMID-415): Missing configuration? +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type input/output + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching shapes + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Even normalization + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Non implemented IN_MAP_2D + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16, 0), + TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32, 0), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), + TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32, 0), + })), + framework::dataset::make("NormInfo", { NormalizationLayerInfo(NormType::IN_MAP_1D, 5), + NormalizationLayerInfo(NormType::IN_MAP_1D, 5), + NormalizationLayerInfo(NormType::IN_MAP_1D, 4), + NormalizationLayerInfo(NormType::IN_MAP_2D, 5), + NormalizationLayerInfo(NormType::IN_MAP_1D, 5), + NormalizationLayerInfo(NormType::IN_MAP_1D, 5), + NormalizationLayerInfo(NormType::CROSS_MAP, 1), + })), + framework::dataset::make("Expected", { false, false, false, false, false, false, true })), + input_info, output_info, norm_info, expected) +{ + bool is_valid = bool(NENormalizationLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), norm_info)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + template <typename T> using NENormalizationLayerFixture = NormalizationValidationFixture<Tensor, Accessor, NENormalizationLayer, T>; diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp index 78c326975c..79a732a59a 100644 --- a/tests/validation/NEON/PoolingLayer.cpp +++ b/tests/validation/NEON/PoolingLayer.cpp @@ -66,6 +66,48 @@ TEST_SUITE(PoolingLayer) //TODO(COMPMID-415): Configuration tests? +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Mismatching data type + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Window shrink + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 4), // Mismatching fixed point position + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS16, 11), // Window shrink + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32, 0), // Invalid pad/size combination + TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32, 0), // Non-rectangular Global Pooling + TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), // Invalid output Global Pooling + TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32, 0), + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16, 0), + TensorInfo(TensorShape(25U, 10U, 2U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS8, 5), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::QS16, 11), + TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32, 0), + TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32, 0), + TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32, 0), + TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F32, 0), + })), + framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), + PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), + PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), + PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 0, 0)), + PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 2, 0)), + PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(1, 1, 0, 2)), + PoolingLayerInfo(PoolingType::AVG), + PoolingLayerInfo(PoolingType::MAX), + PoolingLayerInfo(PoolingType::AVG), + })), + framework::dataset::make("Expected", { false, false, false, false, false, false, false, false, false, true })), + input_info, output_info, pool_info, expected) +{ + bool is_valid = bool(NEPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + template <typename T> using NEPoolingLayerFixture = PoolingLayerValidationFixture<Tensor, Accessor, NEPoolingLayer, T>; diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index 1a303e14a3..0b688dfd1b 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -93,6 +93,28 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datase validate(dst.info()->padding(), padding); } +// *INDENT-OFF* +// clang-format off +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( + framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 2), // Mismatching fixed point + TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), + }), + framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16), + TensorInfo(TensorShape(27U, 11U, 2U), 1, DataType::F32), + TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QS8, 3), + TensorInfo(TensorShape(32U, 16U, 2U), 1, DataType::F32), + })), + framework::dataset::make("Expected", { false, false, false, true })), + input_info, output_info, expected) +{ + bool is_valid = bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))); + ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS); +} +// clang-format on +// *INDENT-ON* + template <typename T> using NESoftmaxLayerFixture = SoftmaxValidationFixture<Tensor, Accessor, NESoftmaxLayer, T>; |