From 1f567afcdfb2919fab417f0060155deda7132df8 Mon Sep 17 00:00:00 2001 From: Sheri Zhang Date: Tue, 5 May 2020 11:47:36 +0100 Subject: COMPMID-3442: Add support of negative axis in NESoftmaxLayer and reference code Signed-off-by: Sheri Zhang Change-Id: I285cc3b74ac0a45f0ad5830baed5237cea568f15 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3147 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- tests/validation/reference/SoftmaxLayer.cpp | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) (limited to 'tests/validation/reference/SoftmaxLayer.cpp') diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp index ee7a5f175a..2fe1faef50 100644 --- a/tests/validation/reference/SoftmaxLayer.cpp +++ b/tests/validation/reference/SoftmaxLayer.cpp @@ -34,23 +34,29 @@ namespace validation namespace reference { template ::value, int>::type> -SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, size_t axis, bool is_log) +SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log) { // Create reference SimpleTensor dst{ src.shape(), src.data_type(), 1 }; + // Negative index is used to specify axis from the end (e.g. -1 for the last axis). + if(axis < 0) + { + axis += src.shape().num_dimensions(); + } + // Compute reference. Lower dims are the collapsing of the first axis // dimensions (i.e., the flattened dimension of each batch). The upper dims are // instead the batches we want to normalize int lower_dims = 1; - for(size_t i = 0; i < axis; i++) + for(size_t i = 0; i < static_cast(axis); ++i) { lower_dims *= src.shape()[i]; } int upper_dims = 1; - for(size_t i = axis; i < TensorShape::num_max_dimensions; i++) + for(size_t i = static_cast(axis); i < TensorShape::num_max_dimensions; ++i) { upper_dims *= src.shape()[i]; } @@ -101,17 +107,17 @@ SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, si return dst; } -template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, size_t axis, bool is_log); -template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, size_t axis, bool is_log); +template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log); +template SimpleTensor softmax_layer_generic(const SimpleTensor &src, float beta, int32_t axis, bool is_log); template ::value, int>::type> -SimpleTensor softmax_layer(const SimpleTensor &src, float beta, size_t axis) +SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis) { return softmax_layer_generic(src, beta, axis, false); } template < typename T, typename std::enable_if < std::is_same::value || std::is_same::value, int >::type > -SimpleTensor softmax_layer(const SimpleTensor &src, float beta, size_t axis) +SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis) { const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), false); @@ -121,10 +127,10 @@ SimpleTensor softmax_layer(const SimpleTensor &src, float beta, size_t axi return dst; } -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, size_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, size_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, size_t axis); -template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, size_t axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); +template SimpleTensor softmax_layer(const SimpleTensor &src, float beta, int32_t axis); } // namespace reference } // namespace validation } // namespace test -- cgit v1.2.1