aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorSheri Zhang <sheri.zhang@arm.com>2020-05-05 11:47:36 +0100
committerSheri Zhang <sheri.zhang@arm.com>2020-05-12 13:12:58 +0000
commit1f567afcdfb2919fab417f0060155deda7132df8 (patch)
tree79631c4b121b89ff261156c41d1cc217afd891fc /tests
parentc630e94d143ac5f46381f53a4994b29ea7ef2ac0 (diff)
downloadComputeLibrary-1f567afcdfb2919fab417f0060155deda7132df8.tar.gz
COMPMID-3442: Add support of negative axis in NESoftmaxLayer and reference code
Signed-off-by: Sheri Zhang <sheri.zhang@arm.com> Change-Id: I285cc3b74ac0a45f0ad5830baed5237cea568f15 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3147 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/validation/NEON/SoftmaxLayer.cpp10
-rw-r--r--tests/validation/fixtures/SoftmaxLayerFixture.h6
-rw-r--r--tests/validation/reference/LogSoftmaxLayer.cpp16
-rw-r--r--tests/validation/reference/LogSoftmaxLayer.h8
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp28
-rw-r--r--tests/validation/reference/SoftmaxLayer.h10
6 files changed, 42 insertions, 36 deletions
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
index c429782e60..8af3847cf8 100644
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ b/tests/validation/NEON/SoftmaxLayer.cpp
@@ -97,9 +97,9 @@ DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
framework::dataset::make("axis", { 1,
1,
1,
+ -1,
1,
- 1,
- 0,
+ -3,
})),
framework::dataset::make("Expected", { false, false, false, true, true, false })),
input_info, output_info, beta, axis, expected)
@@ -188,7 +188,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture<uint8_t>, fram
framework::dataset::make("DataType", DataType::QASYMM8)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 1, 2, 3 })))
+ framework::dataset::make("Axis", { -1, 2, 3 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8);
@@ -209,7 +209,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall2D, NESoftmaxLayerQuantizedFixture<int8_t>, frame
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 1 })))
+ framework::dataset::make("Axis", { -1, 1 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
@@ -218,7 +218,7 @@ FIXTURE_DATA_TEST_CASE(RunSmall4D, NESoftmaxLayerQuantizedFixture<int8_t>, frame
framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
combine(framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, -10) }),
framework::dataset::make("Beta", { 1.0f, 2.f }))),
- framework::dataset::make("Axis", { 1, 2, 3 })))
+ framework::dataset::make("Axis", { -2, 2, 3 })))
{
// Validate output
validate(Accessor(_target), _reference, tolerance_qasymm8_signed);
diff --git a/tests/validation/fixtures/SoftmaxLayerFixture.h b/tests/validation/fixtures/SoftmaxLayerFixture.h
index 82daf34f13..aeff777776 100644
--- a/tests/validation/fixtures/SoftmaxLayerFixture.h
+++ b/tests/validation/fixtures/SoftmaxLayerFixture.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -73,7 +73,7 @@ protected:
}
TensorType compute_target(const TensorShape &shape, DataType data_type,
- QuantizationInfo quantization_info, float beta, size_t axis)
+ QuantizationInfo quantization_info, float beta, int32_t axis)
{
// Create tensors
TensorType src = create_tensor<TensorType>(shape, data_type, 1, quantization_info);
@@ -103,7 +103,7 @@ protected:
}
SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type,
- QuantizationInfo quantization_info, float beta, size_t axis)
+ QuantizationInfo quantization_info, float beta, int32_t axis)
{
// Create reference
SimpleTensor<T> src{ shape, data_type, 1, quantization_info };
diff --git a/tests/validation/reference/LogSoftmaxLayer.cpp b/tests/validation/reference/LogSoftmaxLayer.cpp
index e4403956ab..edb208e6ae 100644
--- a/tests/validation/reference/LogSoftmaxLayer.cpp
+++ b/tests/validation/reference/LogSoftmaxLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -35,13 +35,13 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
+SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis)
{
return softmax_layer_generic<T>(src, beta, axis, true);
}
-template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int>::type>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
+template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type >
+SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis)
{
const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), true);
@@ -51,10 +51,10 @@ SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, size_t
return dst;
}
-template SimpleTensor<float> log_softmax_layer(const SimpleTensor<float> &src, float beta, size_t axis);
-template SimpleTensor<half> log_softmax_layer(const SimpleTensor<half> &src, float beta, size_t axis);
-template SimpleTensor<uint8_t> log_softmax_layer(const SimpleTensor<uint8_t> &src, float beta, size_t axis);
-template SimpleTensor<int8_t> log_softmax_layer(const SimpleTensor<int8_t> &src, float beta, size_t axis);
+template SimpleTensor<float> log_softmax_layer(const SimpleTensor<float> &src, float beta, int32_t axis);
+template SimpleTensor<half> log_softmax_layer(const SimpleTensor<half> &src, float beta, int32_t axis);
+template SimpleTensor<uint8_t> log_softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t axis);
+template SimpleTensor<int8_t> log_softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t axis);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/LogSoftmaxLayer.h b/tests/validation/reference/LogSoftmaxLayer.h
index c2e3f5974e..48ffdcfbcc 100644
--- a/tests/validation/reference/LogSoftmaxLayer.h
+++ b/tests/validation/reference/LogSoftmaxLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,10 +36,10 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis = 1);
+SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = -1);
-template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int>::type = 0>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis = 1);
+template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type = 0 >
+SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = -1);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index ee7a5f175a..2fe1faef50 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -34,23 +34,29 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, size_t axis, bool is_log)
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log)
{
// Create reference
SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
+ // Negative index is used to specify axis from the end (e.g. -1 for the last axis).
+ if(axis < 0)
+ {
+ axis += src.shape().num_dimensions();
+ }
+
// Compute reference. Lower dims are the collapsing of the first axis
// dimensions (i.e., the flattened dimension of each batch). The upper dims are
// instead the batches we want to normalize
int lower_dims = 1;
- for(size_t i = 0; i < axis; i++)
+ for(size_t i = 0; i < static_cast<size_t>(axis); ++i)
{
lower_dims *= src.shape()[i];
}
int upper_dims = 1;
- for(size_t i = axis; i < TensorShape::num_max_dimensions; i++)
+ for(size_t i = static_cast<size_t>(axis); i < TensorShape::num_max_dimensions; ++i)
{
upper_dims *= src.shape()[i];
}
@@ -101,17 +107,17 @@ SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, si
return dst;
}
-template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, size_t axis, bool is_log);
-template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, size_t axis, bool is_log);
+template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, int32_t axis, bool is_log);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis)
{
return softmax_layer_generic<T>(src, beta, axis, false);
}
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type >
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis)
{
const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), false);
@@ -121,10 +127,10 @@ SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axi
return dst;
}
-template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, size_t axis);
-template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, size_t axis);
-template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, size_t axis);
-template SimpleTensor<int8_t> softmax_layer(const SimpleTensor<int8_t> &src, float beta, size_t axis);
+template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, int32_t axis);
+template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, int32_t axis);
+template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t axis);
+template SimpleTensor<int8_t> softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t axis);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.h b/tests/validation/reference/SoftmaxLayer.h
index 2be575c2af..f819853d95 100644
--- a/tests/validation/reference/SoftmaxLayer.h
+++ b/tests/validation/reference/SoftmaxLayer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -36,13 +36,13 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, size_t axis, bool is_log = false);
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log = false);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis = 1);
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = -1);
-template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis = 1);
+template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type = 0 >
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = -1);
} // namespace reference
} // namespace validation
} // namespace test