aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference
diff options
context:
space:
mode:
authorSiCong Li <sicong.li@arm.com>2020-08-21 12:28:30 +0100
committerSiCong Li <sicong.li@arm.com>2020-08-25 14:12:07 +0000
commit96209c73b071bb65d4919fb441076f977095a31b (patch)
tree50252f1a33992b3a6171c6b2becf6da1b6f0022d /tests/validation/reference
parent5111264954e2d1a4d3e91d23a0869a0d7105be4c (diff)
downloadComputeLibrary-96209c73b071bb65d4919fb441076f977095a31b.tar.gz
COMPMID-3694 COMPMID-3695 COMPMID-3458: Softmax Axis
* Properly support "axis" in CL and NEON (and GC) SoftmaxLayer and LogSoftmaxLayer in accord with mainstream frameworks. Axis now defines the dimension on which softmax is performed, and supports the range [-rank, rank) * Extend validation tests to include valid and invalid axes * Remove unnecessary LogSoftmaxLayer fixture, as it is only a specialisation of the SoftmaxLayer fixture * Change the validation fill value range from [-1000, 1000] to [-10, 10], as the former often results in sparse outputs with a single one and zeros elsewhere Change-Id: I8a0040453182b04ed88260de3ba434e98258d863 Signed-off-by: Manuel Bottini <manuel.bottini@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3830 Tested-by: Arm Jenkins <bsgcomp@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Diffstat (limited to 'tests/validation/reference')
-rw-r--r--tests/validation/reference/LogSoftmaxLayer.cpp61
-rw-r--r--tests/validation/reference/LogSoftmaxLayer.h47
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp89
-rw-r--r--tests/validation/reference/SoftmaxLayer.h6
4 files changed, 51 insertions, 152 deletions
diff --git a/tests/validation/reference/LogSoftmaxLayer.cpp b/tests/validation/reference/LogSoftmaxLayer.cpp
deleted file mode 100644
index 8d3b8f7579..0000000000
--- a/tests/validation/reference/LogSoftmaxLayer.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "LogSoftmaxLayer.h"
-#include "SoftmaxLayer.h"
-
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace reference
-{
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
-{
- return softmax_layer_generic<T>(src, beta, reduce_end_axis, true);
-}
-
-template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type >
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
-{
- const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), true);
-
- SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
- SimpleTensor<float> dst_tmp = log_softmax_layer<float>(src_tmp, beta, reduce_end_axis);
- SimpleTensor<T> dst = convert_to_asymmetric<T>(dst_tmp, output_quantization_info);
- return dst;
-}
-
-template SimpleTensor<float> log_softmax_layer(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<half> log_softmax_layer(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<uint8_t> log_softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<int8_t> log_softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t reduce_end_axis);
-} // namespace reference
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
diff --git a/tests/validation/reference/LogSoftmaxLayer.h b/tests/validation/reference/LogSoftmaxLayer.h
deleted file mode 100644
index db945074a2..0000000000
--- a/tests/validation/reference/LogSoftmaxLayer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2019-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_LOG_SOFTMAX_LAYER_H
-#define ARM_COMPUTE_TEST_LOG_SOFTMAX_LAYER_H
-
-#include "tests/SimpleTensor.h"
-#include "tests/validation/Helpers.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-namespace reference
-{
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
-
-template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type = 0 >
-SimpleTensor<T> log_softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
-} // namespace reference
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_SOFTMAX_LAYER_H */
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index 00206766f8..3fbac32a9b 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -25,6 +25,7 @@
#include "arm_compute/core/Helpers.h"
#include "arm_compute/core/Types.h"
+#include "utils/TypePrinter.h"
namespace arm_compute
{
@@ -35,39 +36,43 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis, bool is_log)
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log)
{
// Create reference
SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
- // Convert reduce-before axis (inclusive) to first n axes to reduce
- const size_t first_n_reduce_axes = dim_index_2_num_dims(reduce_end_axis, static_cast<int32_t>(src.shape().num_dimensions()));
+ const int32_t n_dims = static_cast<int32_t>(src.shape().num_dimensions());
+ ARM_COMPUTE_ERROR_ON(axis < -n_dims || axis >= n_dims);
- // Compute reference. Lower dims are the collapsing of the first axis
- // dimensions (i.e., the flattened dimension of each batch). The upper dims are
- // instead the batches we want to normalize
+ const unsigned int actual_axis = static_cast<unsigned int>(wrap_around(axis, n_dims));
+ Window window;
+ window.use_tensor_dimensions(src.shape());
+ const unsigned int axis_dimension = src.shape()[actual_axis];
+ window.set(actual_axis, Window::Dimension(0, 1, 1));
- const int lower_dims = src.shape().total_size_lower(first_n_reduce_axes);
-
- const int upper_dims = src.shape().total_size_upper(first_n_reduce_axes);
-
-#if defined(_OPENMP)
- #pragma omp parallel for
-#endif /* _OPENMP */
- for(int r = 0; r < upper_dims; ++r)
+ execute_window_loop(window, [&](const Coordinates & id)
{
- const T *src_row_ptr = src.data() + r * lower_dims;
- T *dst_row_ptr = dst.data() + r * lower_dims;
-
- // Find max
- const T max = *std::max_element(src_row_ptr, src_row_ptr + lower_dims);
+ // Find max along axis
+ Coordinates offset(id);
+ offset.set(actual_axis, 0);
+ T max = *reinterpret_cast<const T *>(src(offset));
+ for(unsigned int axis_id = 1; axis_id < axis_dimension; ++axis_id)
+ {
+ offset.set(actual_axis, axis_id);
+ const T val = *reinterpret_cast<const T *>(src(offset));
+ if(val > max)
+ {
+ max = val;
+ }
+ }
// Regularize
T sum(0.f);
- std::transform(src_row_ptr, src_row_ptr + lower_dims, dst_row_ptr, [&sum, max, beta, is_log](T val)
+ for(unsigned int axis_id = 0; axis_id < axis_dimension; ++axis_id)
{
- T res{ (val - max) *beta };
-
+ offset.set(actual_axis, axis_id);
+ const T val = *reinterpret_cast<const T *>(src(offset));
+ T res{ (val - max) *beta };
if(is_log)
{
sum += std::exp(res);
@@ -77,50 +82,52 @@ SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, in
res = std::exp(res);
sum += res;
}
- return res;
- });
+ *reinterpret_cast<T *>(dst(offset)) = res;
+ }
// Normalize
- std::transform(dst_row_ptr, dst_row_ptr + lower_dims, dst_row_ptr, [sum, is_log](T val)
+ for(unsigned int axis_id = 0; axis_id < axis_dimension; ++axis_id)
{
+ offset.set(actual_axis, axis_id);
+ const T val = *reinterpret_cast<const T *>(dst(offset));
if(is_log)
{
- return val - static_cast<T>(std::log(sum));
+ *reinterpret_cast<T *>(dst(offset)) = val - static_cast<T>(std::log(sum));
}
else
{
- return val / sum;
+ *reinterpret_cast<T *>(dst(offset)) = val / sum;
}
- });
- }
-
+ }
+ });
return dst;
}
-template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis, bool is_log);
-template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis, bool is_log);
+template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, int32_t axis, bool is_log);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log)
{
- return softmax_layer_generic<T>(src, beta, reduce_end_axis, false);
+ return softmax_layer_generic<T>(src, beta, axis, is_log);
}
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type >
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis)
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log)
{
- const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), false);
+ const QuantizationInfo output_quantization_info = arm_compute::get_softmax_output_quantization_info(src.data_type(), is_log);
SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
- SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta, reduce_end_axis);
+ SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta, axis, is_log);
SimpleTensor<T> dst = convert_to_asymmetric<T>(dst_tmp, output_quantization_info);
return dst;
}
-template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t reduce_end_axis);
-template SimpleTensor<int8_t> softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t reduce_end_axis);
+template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta, int32_t axis, bool is_log);
+template SimpleTensor<int8_t> softmax_layer(const SimpleTensor<int8_t> &src, float beta, int32_t axis, bool is_log);
+
} // namespace reference
} // namespace validation
} // namespace test
diff --git a/tests/validation/reference/SoftmaxLayer.h b/tests/validation/reference/SoftmaxLayer.h
index 2af0b6d36a..3362f195c9 100644
--- a/tests/validation/reference/SoftmaxLayer.h
+++ b/tests/validation/reference/SoftmaxLayer.h
@@ -36,13 +36,13 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis, bool is_log = false);
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, int32_t axis, bool is_log = false);
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = 0, bool is_log = false);
template < typename T, typename std::enable_if < std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, int >::type = 0 >
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t reduce_end_axis = 0);
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, int32_t axis = 0, bool is_log = false);
} // namespace reference
} // namespace validation
} // namespace test