From d004a7a707feab36e51f51cfc9eb2cb70729d5ad Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Thu, 28 May 2020 15:26:41 +0100 Subject: COMPMID-3510 [Interface change] Fix definition of "axis" in NESoftmaxLayer and CLSoftmaxLayer * [Interface change] "axis" argument is renamed to "reduce_end_axis" * Unify the meaning of "axis"(now "reduce_end_axis") to be the last axis of the first n dimensions (inclusive)to reduce. This way the meaning of reduce_end_axis stays the same for both positive and negative values: it selects a dimension before which all dimensions (including the selected dimension) are reduced. Change-Id: I4ab03bd8360b1cd8cac4998df0b1571064a9d4ed Signed-off-by: SiCong Li Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3278 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- tests/validation/CL/LogSoftmaxLayer.cpp | 20 ++++----- tests/validation/CL/SoftmaxLayer.cpp | 61 +++++++++++++++++++------- tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp | 10 ++--- tests/validation/NEON/LogSoftmaxLayer.cpp | 18 ++++---- tests/validation/NEON/SoftmaxLayer.cpp | 43 ++++++++++-------- tests/validation/reference/LogSoftmaxLayer.cpp | 16 +++---- tests/validation/reference/LogSoftmaxLayer.h | 4 +- tests/validation/reference/SoftmaxLayer.cpp | 42 +++++++----------- tests/validation/reference/SoftmaxLayer.h | 6 +-- 9 files changed, 123 insertions(+), 97 deletions(-) (limited to 'tests') diff --git a/tests/validation/CL/LogSoftmaxLayer.cpp b/tests/validation/CL/LogSoftmaxLayer.cpp index 148613c5f8..39d2483ab8 100644 --- a/tests/validation/CL/LogSoftmaxLayer.cpp +++ b/tests/validation/CL/LogSoftmaxLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019 ARM Limited. + * Copyright (c) 2019-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -70,7 +70,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -78,7 +78,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -86,7 +86,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(Run4D, CLLogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -97,7 +97,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -105,7 +105,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture, framework::Dat FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -113,7 +113,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture, framework::Dat FIXTURE_DATA_TEST_CASE(Run4D, CLLogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -130,7 +130,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerQuantizedFixture, fra framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -139,7 +139,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerQuantizedFixture, fra framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -148,7 +148,7 @@ FIXTURE_DATA_TEST_CASE(Run4D, CLLogSoftmaxLayerQuantizedFixture, framew framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp index 5ee929f6b9..432720ca14 100644 --- a/tests/validation/CL/SoftmaxLayer.cpp +++ b/tests/validation/CL/SoftmaxLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -109,7 +109,7 @@ DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::Sof // *INDENT-OFF* // clang-format off -DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( +DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Mismatching data types TensorInfo(TensorShape(27U, 13U), 1, DataType::F32), // Mismatching shapes TensorInfo(TensorShape(27U, 13U), 1, DataType::QASYMM8, // Invalid output quantization info @@ -120,6 +120,10 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 12)), TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED, + QuantizationInfo(1.f/256, 12)), + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED, // Invalid axis high + QuantizationInfo(1.f/256, 12)), + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED, // Invalid axis low QuantizationInfo(1.f/256, 12)) }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U), 1, DataType::F16), @@ -133,11 +137,38 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip( QuantizationInfo(1.f/256, 0)), TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED, QuantizationInfo(1.f/256, -128)), + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED, // Invalid axis high + QuantizationInfo(1.f/256, -128)), + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED, // Invalid axis low + QuantizationInfo(1.f/256, -128)), })), - framework::dataset::make("Expected", { false, false, false, false, false, true, true, true })), - input_info, output_info, expected) + framework::dataset::make("beta", { 1.0, + 2.0, + 1.0, + 2.0, + 1.0, + 2.0, + 1.0, + 2.0, + 1.0, + 2.0, + })), + framework::dataset::make("reduce_end_axis", { + 0, + 0, + 0, + 0, + 1, + 0, + 1, + 0, + 2, + -1, + })), + framework::dataset::make("Expected", { false, false, false, false, false, true, true, true, false, false })), + input_info, output_info, beta, reduce_end_axis, expected) { - ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(CLSoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, reduce_end_axis)) == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* @@ -150,7 +181,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -158,7 +189,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture, framework::Dataset FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -166,7 +197,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture, framework::Dataset FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f16); @@ -177,7 +208,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -185,7 +216,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture, framework::Datase FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -193,7 +224,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture, framework::Datase FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_f32); @@ -210,7 +241,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerQuantizedFixture, framew framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -219,7 +250,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerQuantizedFixture, framew framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -228,7 +259,7 @@ FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerQuantizedFixture, framework framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8); @@ -242,7 +273,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerQuantizedFixture, framewo framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { 1, 2 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1 }))) { // Validate output validate(CLAccessor(_target), _reference, tolerance_qasymm8_signed, tolerance_number_qasymm8_signed); diff --git a/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp b/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp index 3b55717372..50f6a74313 100644 --- a/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp +++ b/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 ARM Limited. + * Copyright (c) 2017-2020 ARM Limited. * * SPDX-License-Identifier: MIT * @@ -89,7 +89,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, GCSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", 1.0f)), - framework::dataset::make("Axis", 1))) + framework::dataset::make("ReduceEndAxis", 0))) { // Validate output validate(GCAccessor(_target), _reference, tolerance_f16); @@ -97,7 +97,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, GCSoftmaxLayerFixture, framew FIXTURE_DATA_TEST_CASE(RunLarge, GCSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", 1.0f)), - framework::dataset::make("Axis", 1))) + framework::dataset::make("ReduceEndAxis", 0))) { // Validate output validate(GCAccessor(_target), _reference, tolerance_f16); @@ -108,7 +108,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall, GCSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", 1.0f)), - framework::dataset::make("Axis", 1))) + framework::dataset::make("ReduceEndAxis", 0))) { // Validate output validate(GCAccessor(_target), _reference, tolerance_f32); @@ -116,7 +116,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, GCSoftmaxLayerFixture, framework::Datase FIXTURE_DATA_TEST_CASE(RunLarge, GCSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", 1.0f)), - framework::dataset::make("Axis", 1))) + framework::dataset::make("ReduceEndAxis", 0))) { // Validate output validate(GCAccessor(_target), _reference, tolerance_f32); diff --git a/tests/validation/NEON/LogSoftmaxLayer.cpp b/tests/validation/NEON/LogSoftmaxLayer.cpp index 43e98ae4ab..22b56fd127 100644 --- a/tests/validation/NEON/LogSoftmaxLayer.cpp +++ b/tests/validation/NEON/LogSoftmaxLayer.cpp @@ -71,7 +71,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NELogSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -79,7 +79,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NELogSoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -87,7 +87,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture, framework::Da FIXTURE_DATA_TEST_CASE(RunLarge, NELogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -99,7 +99,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -107,7 +107,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerFixture, framework::D FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -115,7 +115,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture, framework::D FIXTURE_DATA_TEST_CASE(RunLarge, NELogSoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -132,7 +132,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerQuantizedFixture, f framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -141,7 +141,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerQuantizedFixture, f framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -150,7 +150,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NELogSoftmaxLayerQuantizedFixture, fra framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp index 8af3847cf8..1465af441b 100644 --- a/tests/validation/NEON/SoftmaxLayer.cpp +++ b/tests/validation/NEON/SoftmaxLayer.cpp @@ -73,7 +73,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( TensorInfo(TensorShape(32U, 13U), 1, DataType::F32), TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 12)), - TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, //Invalid axis value + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, //Invalid axis high + QuantizationInfo(1.f/256, 12)), + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, //Invalid axis low QuantizationInfo(1.f/256, 12)), }), framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U), 1, DataType::F16), @@ -85,6 +87,8 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( QuantizationInfo(1.f/256, 0)), TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, QuantizationInfo(1.f/256, 0)), + TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, + QuantizationInfo(1.f/256, 0)), })), framework::dataset::make("beta", { 1.0, 2.0, @@ -94,17 +98,18 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip( 2.0, 1.0, })), - framework::dataset::make("axis", { 1, - 1, - 1, + framework::dataset::make("reduce_end_axis", { 0, + 0, + 0, -1, - 1, + 0, + 2, -3, })), - framework::dataset::make("Expected", { false, false, false, true, true, false })), - input_info, output_info, beta, axis, expected) + framework::dataset::make("Expected", { false, false, false, true, true, false, false })), + input_info, output_info, beta, reduce_end_axis, expected) { - ARM_COMPUTE_EXPECT(bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, axis)) == expected, framework::LogLevel::ERRORS); + ARM_COMPUTE_EXPECT(bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, reduce_end_axis)) == expected, framework::LogLevel::ERRORS); } // clang-format on // *INDENT-ON* @@ -118,7 +123,7 @@ TEST_SUITE(FP16) FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -126,7 +131,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture, framework::Dataset FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -134,7 +139,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture, framework::Datas FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f16); @@ -146,7 +151,7 @@ TEST_SUITE(FP32) FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -154,7 +159,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { 0, 1, 2 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -162,7 +167,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture, framework::Data FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Beta", { 1.0f, 2.0f })), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_f32); @@ -179,7 +184,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerQuantizedFixture, fram framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -188,7 +193,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture, fram framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { -1, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { -1, 1, 2 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -197,7 +202,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerQuantizedFixture, framew framework::dataset::make("DataType", DataType::QASYMM8)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.0f }))), - framework::dataset::make("Axis", { 1 }))) + framework::dataset::make("ReduceEndAxis", { 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8); @@ -209,7 +214,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerQuantizedFixture, frame framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { -1, 1 }))) + framework::dataset::make("ReduceEndAxis", { -1, 0 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); @@ -218,7 +223,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture, frame framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }), framework::dataset::make("Beta", { 1.0f, 2.f }))), - framework::dataset::make("Axis", { -2, 2, 3 }))) + framework::dataset::make("ReduceEndAxis", { -2, 1, 2 }))) { // Validate output validate(Accessor(_target), _reference, tolerance_qasymm8_signed); diff --git a/tests/validation/reference/LogSoftmaxLayer.cpp b/tests/validation/reference/LogSoftmaxLayer.cpp index edb208e6ae..8dd8d45a86 100644 --- a/tests/validation/reference/LogSoftmaxLayer.cpp +++ b/tests/validation/reference/LogSoftmaxLayer.cpp @@ -35,26 +35,26 @@ namespace validation namespace reference { template ::value, int>::type> -SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t axis) +SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis) { - return softmax_layer_generic(src, beta, axis, true); + return softmax_layer_generic(src, beta, reduce_end_axis, true); } template < typename T, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type > -SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t axis) +SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis) { const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), true); SimpleTensor src_tmp = convert_from_asymmetric(src); - SimpleTensor dst_tmp = log_softmax_layer(src_tmp, beta, axis); + SimpleTensor dst_tmp = log_softmax_layer(src_tmp, beta, reduce_end_axis); SimpleTensor dst = convert_to_asymmetric(dst_tmp, output_quantization_info); return dst; } -template SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t axis); +template SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/LogSoftmaxLayer.h b/tests/validation/reference/LogSoftmaxLayer.h index 48ffdcfbcc..d9a439850e 100644 --- a/tests/validation/reference/LogSoftmaxLayer.h +++ b/tests/validation/reference/LogSoftmaxLayer.h @@ -36,10 +36,10 @@ namespace validation namespace reference { template ::value, int>::type = 0> -SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t axis = -1); +SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis = 0); template < typename T, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type = 0 > -SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t axis = -1); +SimpleTensor log_softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis = 0); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp index 2fe1faef50..9a8d46d516 100644 --- a/tests/validation/reference/SoftmaxLayer.cpp +++ b/tests/validation/reference/SoftmaxLayer.cpp @@ -23,6 +23,7 @@ */ #include "SoftmaxLayer.h" +#include "arm_compute/core/Helpers.h" #include "arm_compute/core/Types.h" namespace arm_compute @@ -34,32 +35,21 @@ namespace validation namespace reference { template ::value, int>::type> -SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log) +SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t reduce_end_axis, bool is_log) { // Create reference SimpleTensor dst{ src.shape(), src.data_type(), 1 }; - // Negative index is used to specify axis from the end (e.g. -1 for the last axis). - if(axis < 0) - { - axis += src.shape().num_dimensions(); - } + // Convert reduce-before axis (inclusive) to first n axes to reduce + const size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast(src.shape().num_dimensions())); // Compute reference. Lower dims are the collapsing of the first axis // dimensions (i.e., the flattened dimension of each batch). The upper dims are // instead the batches we want to normalize - int lower_dims = 1; - for(size_t i = 0; i < static_cast(axis); ++i) - { - lower_dims *= src.shape()[i]; - } + const int lower_dims = src.shape().total_size_lower(first_n_reduce_axes); - int upper_dims = 1; - for(size_t i = static_cast(axis); i < TensorShape::num_max_dimensions; ++i) - { - upper_dims *= src.shape()[i]; - } + const int upper_dims = src.shape().total_size_upper(first_n_reduce_axes); #if defined(_OPENMP) #pragma omp parallel for @@ -107,30 +97,30 @@ SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, in return dst; } -template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log); -template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log); +template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t reduce_end_axis, bool is_log); +template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t reduce_end_axis, bool is_log); template ::value, int>::type> -SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis) +SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis) { - return softmax_layer_generic(src, beta, axis, false); + return softmax_layer_generic(src, beta, reduce_end_axis, false); } template < typename T, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type > -SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis) +SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis) { const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), false); SimpleTensor src_tmp = convert_from_asymmetric(src); - SimpleTensor dst_tmp = softmax_layer(src_tmp, beta, axis); + SimpleTensor dst_tmp = softmax_layer(src_tmp, beta, reduce_end_axis); SimpleTensor dst = convert_to_asymmetric(dst_tmp, output_quantization_info); return dst; } -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); } // namespace reference } // namespace validation } // namespace test diff --git a/tests/validation/reference/SoftmaxLayer.h b/tests/validation/reference/SoftmaxLayer.h index f819853d95..fde19943bf 100644 --- a/tests/validation/reference/SoftmaxLayer.h +++ b/tests/validation/reference/SoftmaxLayer.h @@ -36,13 +36,13 @@ namespace validation namespace reference { template ::value, int>::type = 0> -SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log = false); +SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t reduce_end_axis, bool is_log = false); template ::value, int>::type = 0> -SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis = -1); +SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis = 0); template < typename T, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type = 0 > -SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis = -1); +SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis = 0); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1