aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/CL/LogSoftmaxLayer.cpp14
-rw-r--r--tests/validation/CL/SoftmaxLayer.cpp24
-rw-r--r--tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp18
-rw-r--r--tests/validation/NEON/LogSoftmaxLayer.cpp12
-rw-r--r--tests/validation/NEON/SoftmaxLayer.cpp24
-rw-r--r--tests/validation/fixtures/SoftmaxLayerFixture.h15
-rw-r--r--tests/validation/reference/LogSoftmaxLayer.cpp61
-rw-r--r--tests/validation/reference/LogSoftmaxLayer.h47
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp89
-rw-r--r--tests/validation/reference/SoftmaxLayer.h6
10 files changed, 103 insertions, 207 deletions
diff --git a/tests/validation/CL/LogSoftmaxLayer.cpp b/tests/validation/CL/LogSoftmaxLayer.cpp
index 15466affc4..8fdc745d13 100644
--- a/tests/validation/CL/LogSoftmaxLayer.cpp
+++ b/tests/validation/CL/LogSoftmaxLayer.cpp
@@ -59,7 +59,7 @@ TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -1 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
@@ -75,7 +75,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture<half>, framework::Data
FIXTURE_DATA_TEST_CASE(Run4D, CLLogSoftmaxLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -3, 2 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
@@ -86,7 +86,7 @@ TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLLogSoftmaxLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -99,10 +99,10 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLLogSoftmaxLayerFixture<float>, framework::Dat
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
}
-FIXTURE_DATA_TEST_CASE(Run4D, CLLogSoftmaxLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(),
- framework::dataset::make("DataType", DataType::F32)),
- framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+FIXTURE_DATA_TEST_CASE(Run4D, CLLogSoftmaxLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayer4DShapes(),
+ framework::dataset::make("DataType", DataType::F32)),
+ framework::dataset::make("Beta", { 1.0f, 2.0f })),
+ framework::dataset::make("Axis", { 0, -4, 3 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp
index 90c3058c5d..0e4952b02a 100644
--- a/tests/validation/CL/SoftmaxLayer.cpp
+++ b/tests/validation/CL/SoftmaxLayer.cpp
@@ -129,9 +129,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
QuantizationInfo(1.f/256, 0)),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED,
QuantizationInfo(1.f/256, -128)),
- TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED, // Invalid axis high
+ TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED,
QuantizationInfo(1.f/256, -128)),
- TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED, // Invalid axis low
+ TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8_SIGNED,
QuantizationInfo(1.f/256, -128)),
})),
framework::dataset::make("beta", { 1.0,
@@ -151,11 +151,11 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
0,
0,
0,
+ 1,
0,
- 0,
- 0,
- 2,
-1,
+ 2,
+ -3,
})),
framework::dataset::make("Expected", { false, false, false, false, false, true, true, true, false, false })),
input_info, output_info, beta, axis, expected)
@@ -173,7 +173,7 @@ TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -1 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
@@ -189,7 +189,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture<half>, framework::Dataset
FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -1, 2 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f16);
@@ -200,7 +200,7 @@ TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -216,7 +216,7 @@ FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture<float>, framework::Datase
FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayer4DShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -2, 3 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_f32);
@@ -233,7 +233,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerQuantizedFixture<uint8_t>, framew
framework::dataset::make("DataType", DataType::QASYMM8)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -251,7 +251,7 @@ FIXTURE_DATA_TEST_CASE(Run4D, CLSoftmaxLayerQuantizedFixture<uint8_t>, framework
framework::dataset::make("DataType", DataType::QASYMM8)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.0f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -4, 1 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8);
@@ -265,7 +265,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerQuantizedFixture<int8_t>, framewo
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1 })))
{
// Validate output
validate(CLAccessor(_target), _reference, tolerance_qasymm8_signed);
diff --git a/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp b/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
index af92cff813..2a8a33eaf7 100644
--- a/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/SoftmaxLayer.cpp
@@ -89,7 +89,7 @@ TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, GCSoftmaxLayerFixture<half_float::half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", 1.0f)),
- framework::dataset::make("ReduceEndAxis", 0)))
+ framework::dataset::make("Axis", 0)))
{
// Validate output
validate(GCAccessor(_target), _reference, tolerance_f16);
@@ -97,18 +97,18 @@ FIXTURE_DATA_TEST_CASE(RunSmall, GCSoftmaxLayerFixture<half_float::half>, framew
FIXTURE_DATA_TEST_CASE(RunLarge, GCSoftmaxLayerFixture<half_float::half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", 1.0f)),
- framework::dataset::make("ReduceEndAxis", 0)))
+ framework::dataset::make("Axis", 0)))
{
// Validate output
validate(GCAccessor(_target), _reference, tolerance_f16);
}
-TEST_SUITE_END()
+TEST_SUITE_END() // FP16
TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall, GCSoftmaxLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", 1.0f)),
- framework::dataset::make("ReduceEndAxis", 0)))
+ framework::dataset::make("Axis", 0)))
{
// Validate output
validate(GCAccessor(_target), _reference, tolerance_f32);
@@ -116,16 +116,16 @@ FIXTURE_DATA_TEST_CASE(RunSmall, GCSoftmaxLayerFixture<float>, framework::Datase
FIXTURE_DATA_TEST_CASE(RunLarge, GCSoftmaxLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::SoftmaxLayerLargeShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", 1.0f)),
- framework::dataset::make("ReduceEndAxis", 0)))
+ framework::dataset::make("Axis", 0)))
{
// Validate output
validate(GCAccessor(_target), _reference, tolerance_f32);
}
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // SoftmaxLayer
+TEST_SUITE_END() // GC
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/NEON/LogSoftmaxLayer.cpp b/tests/validation/NEON/LogSoftmaxLayer.cpp
index 3f85e3f7a2..a7ab033359 100644
--- a/tests/validation/NEON/LogSoftmaxLayer.cpp
+++ b/tests/validation/NEON/LogSoftmaxLayer.cpp
@@ -71,7 +71,7 @@ TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, NELogSoftmaxLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
@@ -79,7 +79,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NELogSoftmaxLayerFixture<half>, framework::Data
FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -3, 2 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
@@ -99,7 +99,7 @@ TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
@@ -107,7 +107,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerFixture<float>, framework::D
FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 2, -1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
@@ -132,7 +132,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NELogSoftmaxLayerQuantizedFixture<uint8_t>, f
framework::dataset::make("DataType", DataType::QASYMM8)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -141,7 +141,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NELogSoftmaxLayerQuantizedFixture<uint8_t>, f
framework::dataset::make("DataType", DataType::QASYMM8)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -1, 1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
index 70203d9ce9..2a9e30604e 100644
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ b/tests/validation/NEON/SoftmaxLayer.cpp
@@ -73,6 +73,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 12)),
+ TensorInfo(TensorShape(32U, 13U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, //Invalid axis high
QuantizationInfo(1.f/256, 12)),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8, //Invalid axis low
@@ -85,6 +86,7 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
TensorInfo(TensorShape(32U, 13U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 0)),
+ TensorInfo(TensorShape(32U, 13U), 1, DataType::F32),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8,
QuantizationInfo(1.f/256, 0)),
TensorInfo(TensorShape(32U, 13U), 1, DataType::QASYMM8,
@@ -95,18 +97,20 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
1.0,
2.0,
1.0,
+ 1.0,
2.0,
1.0,
})),
framework::dataset::make("axis", { 0,
0,
0,
+ 1,
0,
- 0,
+ -1,
2,
-3,
})),
- framework::dataset::make("Expected", { false, false, false, true, true, false, false })),
+ framework::dataset::make("Expected", { false, false, false, true, true, true, false, false })),
input_info, output_info, beta, axis, expected)
{
ARM_COMPUTE_EXPECT(bool(NESoftmaxLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), beta, axis)) == expected, framework::LogLevel::ERRORS);
@@ -123,7 +127,7 @@ TEST_SUITE(FP16)
FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
@@ -131,7 +135,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture<half>, framework::Dataset
FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(),
framework::dataset::make("DataType", DataType::F16)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 2, -1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f16);
@@ -151,7 +155,7 @@ TEST_SUITE(FP32)
FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SoftmaxLayerSmallShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
@@ -159,7 +163,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerFixture<float>, framework::Data
FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::Small4DShapes(),
framework::dataset::make("DataType", DataType::F32)),
framework::dataset::make("Beta", { 1.0f, 2.0f })),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -2, 3 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_f32);
@@ -184,7 +188,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerQuantizedFixture<uint8_t>, fram
framework::dataset::make("DataType", DataType::QASYMM8)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -193,7 +197,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture<uint8_t>, fram
framework::dataset::make("DataType", DataType::QASYMM8)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1, -2 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -214,7 +218,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerQuantizedFixture<int8_t>, frame
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, -1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
@@ -223,7 +227,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture<int8_t>, frame
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 0 })))
+ framework::dataset::make("Axis", { 0, 1, -1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h
index 29a3ed2cd0..30356d648d 100644
--- a/tests/validation/fixtures/SoftmaxLayerFixture.h
+++ b/tests/validation/fixtures/SoftmaxLayerFixture.h
@@ -32,7 +32,6 @@
#include "tests/IAccessor.h"
#include "tests/framework/Asserts.h"
#include "tests/framework/Fixture.h"
-#include "tests/validation/reference/LogSoftmaxLayer.h"
#include "tests/validation/reference/SoftmaxLayer.h"
#include <random>
@@ -52,8 +51,8 @@ public:
{
_quantization_info = quantization_info;
- _target = compute_target(shape, data_type, quantization_info, beta, axis);
_reference = compute_reference(shape, data_type, quantization_info, beta, axis);
+ _target = compute_target(shape, data_type, quantization_info, beta, axis);
}
protected:
@@ -62,7 +61,7 @@ protected:
{
if(!is_data_type_quantized(tensor.data_type()))
{
- std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
+ std::uniform_real_distribution<> distribution(-10.f, 10.f);
library->fill(tensor, distribution, 0);
}
else // data type is quantized_asymmetric (signed or unsigned)
@@ -111,14 +110,7 @@ protected:
// Fill reference
fill(src);
- if(IS_LOG)
- {
- return reference::log_softmax_layer<T>(src, beta, axis);
- }
- else
- {
- return reference::softmax_layer<T>(src, beta, axis);
- }
+ return reference::softmax_layer<T>(src, beta, axis, IS_LOG);
}
TensorType _target{};
@@ -155,6 +147,7 @@ public:
axis);
}
};
+
} // namespace validation
} // namespace test
} // namespace arm_compute
diff --git a/tests/validation/reference/LogSoftmaxLayer.cpp b/tests/validation/reference/LogSoftmaxLayer.cpp
deleted file mode 100644
index 8d3b8f7579..0000000000
--- a/tests/validation/reference/LogSoftmaxLayer.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "LogSoftmaxLayer.h"
-#include "SoftmaxLayer.h"
-
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace reference
-{
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
-{
- return softmax_layer_generic<T>(src, beta, reduce_end_axis, true);
-}
-
-template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type >
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
-{
- const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), true);
-
- SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
- SimpleTensor<float> dst_tmp = log_softmax_layer<float>(src_tmp, beta, reduce_end_axis);
- SimpleTensor<T> dst = convert_to_asymmetric<T>(dst_tmp, output_quantization_info);
- return dst;
-}
-
-template SimpleTensor<float> log_softmax_layer(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<half> log_softmax_layer(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<uint8_t> log_softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<int8_t> log_softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t reduce_end_axis);
-} // namespace reference
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/reference/LogSoftmaxLayer.h b/tests/validation/reference/LogSoftmaxLayer.h
deleted file mode 100644
index db945074a2..0000000000
--- a/tests/validation/reference/LogSoftmaxLayer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_LOG_SOFTMAX_LAYER_H
-#define ARM_COMPUTE_TEST_LOG_SOFTMAX_LAYER_H
-
-#include "tests/SimpleTensor.h"
-#include "tests/validation/Helpers.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace reference
-{
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
-
-template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type = 0 >
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
-} // namespace reference
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SOFTMAX_LAYER_H */
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index 00206766f8..3fbac32a9b 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Types.h"
+#include "utils/TypePrinter.h"
namespace arm_compute
{
@@ -35,39 +36,43 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis, bool is_log)
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log)
{
// Create reference
SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
- // Convert reduce-before axis (inclusive) to first n axes to reduce
- const size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast<int32_t>(src.shape().num_dimensions()));
+ const int32_t n_dims = static_cast<int32_t>(src.shape().num_dimensions());
+ ARM_COMPUTE_ERROR_ON(axis < -n_dims || axis >= n_dims);
- // Compute reference. Lower dims are the collapsing of the first axis
- // dimensions (i.e., the flattened dimension of each batch). The upper dims are
- // instead the batches we want to normalize
+ const unsigned int actual_axis = static_cast<unsigned int>(wrap_around(axis, n_dims));
+ Window window;
+ window.use_tensor_dimensions(src.shape());
+ const unsigned int axis_dimension = src.shape()[actual_axis];
+ window.set(actual_axis, Window::Dimension(0, 1, 1));
- const int lower_dims = src.shape().total_size_lower(first_n_reduce_axes);
-
- const int upper_dims = src.shape().total_size_upper(first_n_reduce_axes);
-
-#if defined(_OPENMP)
- #pragma omp parallel for
-#endif /* _OPENMP */
- for(int r = 0; r < upper_dims; ++r)
+ execute_window_loop(window, [&](const Coordinates & id)
{
- const T *src_row_ptr = src.data() + r * lower_dims;
- T *dst_row_ptr = dst.data() + r * lower_dims;
-
- // Find max
- const T max = *std::max_element(src_row_ptr, src_row_ptr + lower_dims);
+ // Find max along axis
+ Coordinates offset(id);
+ offset.set(actual_axis, 0);
+ T max = *reinterpret_cast<const T *>(src(offset));
+ for(unsigned int axis_id = 1; axis_id < axis_dimension; ++axis_id)
+ {
+ offset.set(actual_axis, axis_id);
+ const T val = *reinterpret_cast<const T *>(src(offset));
+ if(val > max)
+ {
+ max = val;
+ }
+ }
// Regularize
T sum(0.f);
- std::transform(src_row_ptr, src_row_ptr + lower_dims, dst_row_ptr, [&sum, max, beta, is_log](T val)
+ for(unsigned int axis_id = 0; axis_id < axis_dimension; ++axis_id)
{
- T res{ (val - max) *beta };
-
+ offset.set(actual_axis, axis_id);
+ const T val = *reinterpret_cast<const T *>(src(offset));
+ T res{ (val - max) *beta };
if(is_log)
{
sum += std::exp(res);
@@ -77,50 +82,52 @@ SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, in
res = std::exp(res);
sum += res;
}
- return res;
- });
+ *reinterpret_cast<T *>(dst(offset)) = res;
+ }
// Normalize
- std::transform(dst_row_ptr, dst_row_ptr + lower_dims, dst_row_ptr, [sum, is_log](T val)
+ for(unsigned int axis_id = 0; axis_id < axis_dimension; ++axis_id)
{
+ offset.set(actual_axis, axis_id);
+ const T val = *reinterpret_cast<const T *>(dst(offset));
if(is_log)
{
- return val - static_cast<T>(std::log(sum));
+ *reinterpret_cast<T *>(dst(offset)) = val - static_cast<T>(std::log(sum));
}
else
{
- return val / sum;
+ *reinterpret_cast<T *>(dst(offset)) = val / sum;
}
- });
- }
-
+ }
+ });
return dst;
}
-template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis, bool is_log);
-template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis, bool is_log);
+template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, int32_t axis, bool is_log);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log)
{
- return softmax_layer_generic<T>(src, beta, reduce_end_axis, false);
+ return softmax_layer_generic<T>(src, beta, axis, is_log);
}
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type >
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log)
{
- const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), false);
+ const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), is_log);
SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
- SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta, reduce_end_axis);
+ SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta, axis, is_log);
SimpleTensor<T> dst = convert_to_asymmetric<T>(dst_tmp, output_quantization_info);
return dst;
}
-template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<int8_t> softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t reduce_end_axis);
+template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<int8_t> softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t axis, bool is_log);
+
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.h b/tests/validation/reference/SoftmaxLayer.h
index 2af0b6d36a..3362f195c9 100644
--- a/tests/validation/reference/SoftmaxLayer.h
+++ b/tests/validation/reference/SoftmaxLayer.h
@@ -36,13 +36,13 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis, bool is_log = false);
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log = false);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = 0, bool is_log = false);
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type = 0 >
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = 0, bool is_log = false);
} // namespace reference
} // namespace validation
} // namespace test