From d004a7a707feab36e51f51cfc9eb2cb70729d5ad Mon Sep 17 00:00:00 2001 From: SiCong Li Date: Thu, 28 May 2020 15:26:41 +0100 Subject: COMPMID-3510 [Interface change] Fix definition of "axis" in NESoftmaxLayer and CLSoftmaxLayer * [Interface change] "axis" argument is renamed to "reduce_end_axis" * Unify the meaning of "axis"(now "reduce_end_axis") to be the last axis of the first n dimensions (inclusive)to reduce. This way the meaning of reduce_end_axis stays the same for both positive and negative values: it selects a dimension before which all dimensions (including the selected dimension) are reduced. Change-Id: I4ab03bd8360b1cd8cac4998df0b1571064a9d4ed Signed-off-by: SiCong Li Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3278 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Reviewed-by: Georgios Pinitas Comments-Addressed: Arm Jenkins --- tests/validation/reference/SoftmaxLayer.cpp | 42 +++++++++++------------------ 1 file changed, 16 insertions(+), 26 deletions(-) (limited to 'tests/validation/reference/SoftmaxLayer.cpp') diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp index 2fe1faef50..9a8d46d516 100644 --- a/tests/validation/reference/SoftmaxLayer.cpp +++ b/tests/validation/reference/SoftmaxLayer.cpp @@ -23,6 +23,7 @@ */ #include "SoftmaxLayer.h" +#include "arm_compute/core/Helpers.h" #include "arm_compute/core/Types.h" namespace arm_compute @@ -34,32 +35,21 @@ namespace validation namespace reference { template ::value, int>::type> -SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log) +SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t reduce_end_axis, bool is_log) { // Create reference SimpleTensor dst{ src.shape(), src.data_type(), 1 }; - // Negative index is used to specify axis from the end (e.g. -1 for the last axis). - if(axis < 0) - { - axis += src.shape().num_dimensions(); - } + // Convert reduce-before axis (inclusive) to first n axes to reduce + const size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast(src.shape().num_dimensions())); // Compute reference. Lower dims are the collapsing of the first axis // dimensions (i.e., the flattened dimension of each batch). The upper dims are // instead the batches we want to normalize - int lower_dims = 1; - for(size_t i = 0; i < static_cast(axis); ++i) - { - lower_dims *= src.shape()[i]; - } + const int lower_dims = src.shape().total_size_lower(first_n_reduce_axes); - int upper_dims = 1; - for(size_t i = static_cast(axis); i < TensorShape::num_max_dimensions; ++i) - { - upper_dims *= src.shape()[i]; - } + const int upper_dims = src.shape().total_size_upper(first_n_reduce_axes); #if defined(_OPENMP) #pragma omp parallel for @@ -107,30 +97,30 @@ SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, in return dst; } -template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log); -template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log); +template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t reduce_end_axis, bool is_log); +template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t reduce_end_axis, bool is_log); template ::value, int>::type> -SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis) +SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis) { - return softmax_layer_generic(src, beta, axis, false); + return softmax_layer_generic(src, beta, reduce_end_axis, false); } template < typename T, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type > -SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis) +SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis) { const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), false); SimpleTensor src_tmp = convert_from_asymmetric(src); - SimpleTensor dst_tmp = softmax_layer(src_tmp, beta, axis); + SimpleTensor dst_tmp = softmax_layer(src_tmp, beta, reduce_end_axis); SimpleTensor dst = convert_to_asymmetric(dst_tmp, output_quantization_info); return dst; } -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t reduce_end_axis); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1