aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/SoftmaxLayer.cpp
diff options
context:
space:
mode:
authorSang-Hoon Park <sang-hoon.park@arm.com>2019-10-08 18:07:23 +0100
committerSang-Hoon Park <sang-hoon.park@arm.com>2019-10-16 12:14:41 +0000
commitd24affe0abefe8f4a83c7d4487386920895fd2e7 (patch)
tree8ccdf4891b2107f49814002817e6895f201686a7 /tests/validation/reference/SoftmaxLayer.cpp
parent7c60c990fbed62aab1369c0e4462c4081dc3cfeb (diff)
downloadComputeLibrary-d24affe0abefe8f4a83c7d4487386920895fd2e7.tar.gz
COMPMID-2265 add support for Log Softmax to NEON
Kernel (NEON/reference), validation tests, function and fixture are updated to add support for Log Softmax Change-Id: I641dbf1552f4128c691af8875949ebf88da71ee8 Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com> Reviewed-on: https://review.mlplatform.org/c/2075 Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'tests/validation/reference/SoftmaxLayer.cpp')
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp37
1 files changed, 31 insertions, 6 deletions
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index fabc62bedb..ef2468df59 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -34,7 +34,7 @@ namespace validation
namespace reference
{
template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
-SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
+SimpleTensor<T> softmax_layer_generic(const SimpleTensor<T> &src, float beta, size_t axis, bool is_log)
{
// Create reference
SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
@@ -65,23 +65,48 @@ SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axi
// Regularize
T sum(0.f);
- std::transform(src_row_ptr, src_row_ptr + lower_dims, dst_row_ptr, [&sum, max, beta](T val)
+ std::transform(src_row_ptr, src_row_ptr + lower_dims, dst_row_ptr, [&sum, max, beta, is_log](T val)
{
- const T res(std::exp((val - max) * beta));
- sum += res;
+ T res{ (val - max) *beta };
+
+ if(is_log)
+ {
+ sum += std::exp(res);
+ }
+ else
+ {
+ res = std::exp(res);
+ sum += res;
+ }
return res;
});
// Normalize
- std::transform(dst_row_ptr, dst_row_ptr + lower_dims, dst_row_ptr, [sum](T val)
+ std::transform(dst_row_ptr, dst_row_ptr + lower_dims, dst_row_ptr, [sum, is_log](T val)
{
- return val / sum;
+ if(is_log)
+ {
+ return val - sum;
+ }
+ else
+ {
+ return val / sum;
+ }
});
}
return dst;
}
+template SimpleTensor<float> softmax_layer_generic(const SimpleTensor<float> &src, float beta, size_t axis, bool is_log);
+template SimpleTensor<half> softmax_layer_generic(const SimpleTensor<half> &src, float beta, size_t axis, bool is_log);
+
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
+{
+ return softmax_layer_generic<T>(src, beta, axis, false);
+}
+
template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value, int>::type>
SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta, size_t axis)
{