aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2020-05-28 15:26:41 +0100
committerSiCong Li <sicong.li@arm.com>2020-06-11 09:15:33 +0000
commitd004a7a707feab36e51f51cfc9eb2cb70729d5ad (patch)
treee6adef65a116e92c29303af479fab3ef5e1d8b97 /tests/validation/reference
parenteb727f4f7afaa0a5ac5c630277086d912b128e55 (diff)
downloadComputeLibrary-d004a7a707feab36e51f51cfc9eb2cb70729d5ad.tar.gz
COMPMID-3510 [Interface change] Fix definition of "axis" in NESoftmaxLayer and CLSoftmaxLayer
* [Interface change] "axis" argument is renamed to "reduce_end_axis" * Unify the meaning of "axis"(now "reduce_end_axis") to be the last axis of the first n dimensions (inclusive)to reduce. This way the meaning of reduce_end_axis stays the same for both positive and negative values: it selects a dimension before which all dimensions (including the selected dimension) are reduced. Change-Id: I4ab03bd8360b1cd8cac4998df0b1571064a9d4ed Signed-off-by: SiCong Li <sicong.li@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3278 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/reference')
-rw-r--r--tests/validation/reference/LogSoftmaxLayer.cpp16
-rw-r--r--tests/validation/reference/LogSoftmaxLayer.h4
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp42
-rw-r--r--tests/validation/reference/SoftmaxLayer.h6
4 files changed, 29 insertions, 39 deletions
diff --git a/tests/validation/reference/LogSoftmaxLayer.cpp b/tests/validation/reference/LogSoftmaxLayer.cpp
index edb208e6ae..8dd8d45a86 100644
--- a/tests/validation/reference/LogSoftmaxLayer.cpp
+++ b/tests/validation/reference/LogSoftmaxLayer.cpp
@@ -35,26 +35,26 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis)
+SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
{
- return softmax_layer_generic<T>(src, beta, axis, true);
+ return softmax_layer_generic<T>(src, beta, reduce_end_axis, true);
}
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type >
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis)
+SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
{
const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), true);
SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
- SimpleTensor<float> dst_tmp = log_softmax_layer<float>(src_tmp, beta, axis);
+ SimpleTensor<float> dst_tmp = log_softmax_layer<float>(src_tmp, beta, reduce_end_axis);
SimpleTensor<T> dst = convert_to_asymmetric<T>(dst_tmp, output_quantization_info);
return dst;
}
-template SimpleTensor<float> log_softmax_layer(const SimpleTensor<float> &src, float beta, int32_t axis);
-template SimpleTensor<half> log_softmax_layer(const SimpleTensor<half> &src, float beta, int32_t axis);
-template SimpleTensor<uint8_t> log_softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t axis);
-template SimpleTensor<int8_t> log_softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t axis);
+template SimpleTensor<float> log_softmax_layer(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis);
+template SimpleTensor<half> log_softmax_layer(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis);
+template SimpleTensor<uint8_t> log_softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t reduce_end_axis);
+template SimpleTensor<int8_t> log_softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t reduce_end_axis);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/LogSoftmaxLayer.h b/tests/validation/reference/LogSoftmaxLayer.h
index 48ffdcfbcc..d9a439850e 100644
--- a/tests/validation/reference/LogSoftmaxLayer.h
+++ b/tests/validation/reference/LogSoftmaxLayer.h
@@ -36,10 +36,10 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = -1);
+SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type = 0 >
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = -1);
+SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index 2fe1faef50..9a8d46d516 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -23,6 +23,7 @@
*/
#include "SoftmaxLayer.h"
+#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Types.h"
namespace arm_compute
@@ -34,32 +35,21 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log)
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis, bool is_log)
{
// Create reference
SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
- // Negative index is used to specify axis from the end (e.g. -1 for the last axis).
- if(axis < 0)
- {
- axis += src.shape().num_dimensions();
- }
+ // Convert reduce-before axis (inclusive) to first n axes to reduce
+ const size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast<int32_t>(src.shape().num_dimensions()));
// Compute reference. Lower dims are the collapsing of the first axis
// dimensions (i.e., the flattened dimension of each batch). The upper dims are
// instead the batches we want to normalize
- int lower_dims = 1;
- for(size_t i = 0; i < static_cast<size_t>(axis); ++i)
- {
- lower_dims *= src.shape()[i];
- }
+ const int lower_dims = src.shape().total_size_lower(first_n_reduce_axes);
- int upper_dims = 1;
- for(size_t i = static_cast<size_t>(axis); i < TensorShape::num_max_dimensions; ++i)
- {
- upper_dims *= src.shape()[i];
- }
+ const int upper_dims = src.shape().total_size_upper(first_n_reduce_axes);
#if defined(_OPENMP)
#pragma omp parallel for
@@ -107,30 +97,30 @@ SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, in
return dst;
}
-template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, int32_t axis, bool is_log);
-template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis, bool is_log);
+template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis, bool is_log);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
{
- return softmax_layer_generic<T>(src, beta, axis, false);
+ return softmax_layer_generic<T>(src, beta, reduce_end_axis, false);
}
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type >
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
{
const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), false);
SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
- SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta, axis);
+ SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta, reduce_end_axis);
SimpleTensor<T> dst = convert_to_asymmetric<T>(dst_tmp, output_quantization_info);
return dst;
}
-template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, int32_t axis);
-template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, int32_t axis);
-template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t axis);
-template SimpleTensor<int8_t> softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t axis);
+template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis);
+template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis);
+template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t reduce_end_axis);
+template SimpleTensor<int8_t> softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t reduce_end_axis);
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.h b/tests/validation/reference/SoftmaxLayer.h
index f819853d95..fde19943bf 100644
--- a/tests/validation/reference/SoftmaxLayer.h
+++ b/tests/validation/reference/SoftmaxLayer.h
@@ -36,13 +36,13 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log = false);
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis, bool is_log = false);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = -1);
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type = 0 >
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = -1);
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
} // namespace reference
} // namespace validation
} // namespace test