aboutsummaryrefslogtreecommitdiff
path: root/tests/validation/reference/SoftmaxLayer.cpp
diff options
context:
space:
mode:
authorVidhya Sudhan Loganathan <vidhyasudhan.loganathan@arm.com>2018-07-04 09:34:00 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2018-11-02 16:54:10 +0000
commit7485d5a62685cb745ab50e970adb722cb71557ac (patch)
treeba01b99ca466c93edc9a3f8c1e34394ff84be060 /tests/validation/reference/SoftmaxLayer.cpp
parent014333d73883c3872e458cedda5ccef586a7ccd4 (diff)
downloadComputeLibrary-7485d5a62685cb745ab50e970adb722cb71557ac.tar.gz
COMPMID-970 : Remove QS8 / QS16 support
Removed fixed point related code. Change-Id: I487acf138dace3b0450e0d72ca7071eaec254566 Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/137678 Tested-by: Jenkins <bsgcomp@arm.com> Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Diffstat (limited to 'tests/validation/reference/SoftmaxLayer.cpp')
-rw-r--r--tests/validation/reference/SoftmaxLayer.cpp53
1 files changed, 5 insertions, 48 deletions
diff --git a/tests/validation/reference/SoftmaxLayer.cpp b/tests/validation/reference/SoftmaxLayer.cpp
index ae4bcd8f0e..aa640ad5e6 100644
--- a/tests/validation/reference/SoftmaxLayer.cpp
+++ b/tests/validation/reference/SoftmaxLayer.cpp
@@ -24,7 +24,6 @@
#include "SoftmaxLayer.h"
#include "arm_compute/core/Types.h"
-#include "tests/validation/FixedPoint.h"
namespace arm_compute
{
@@ -71,63 +70,21 @@ SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
return dst;
}
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
+template <typename T, typename std::enable_if<std::is_same<T, uint8_t>::value, int>::type>
SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src, float beta)
{
- ARM_COMPUTE_UNUSED(beta);
-
- using namespace fixed_point_arithmetic;
-
- // Create reference
- SimpleTensor<T> dst{ src.shape(), src.data_type(), 1 };
-
- // Compute reference
- const int cols = src.shape()[0];
- const int upper_dims = src.num_elements() / cols;
-
- for(int r = 0; r < upper_dims; ++r)
- {
- const T *src_row_ptr = src.data() + r * cols;
- T *dst_row_ptr = dst.data() + r * cols;
-
- // Find max
- const fixed_point<T> max(*std::max_element(src_row_ptr, src_row_ptr + cols), src.fixed_point_position(), true);
-
- // Regularize
- using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
- fixed_point<promoted_type> sum(0, src.fixed_point_position(), true);
- std::transform(src_row_ptr, src_row_ptr + cols, dst_row_ptr, [&](T val)
- {
- const fixed_point<T> res = exp(fixed_point<T>(val, src.fixed_point_position(), true) - max);
- sum = add(sum, fixed_point<promoted_type>(res.raw(), src.fixed_point_position(), true));
- return res.raw();
- });
-
- // Normalize
- fixed_point<T> saturated_sum(sum);
- std::transform(dst_row_ptr, dst_row_ptr + cols, dst_row_ptr, [&](T val)
- {
- return div(fixed_point<T>(val, src.fixed_point_position(), true), saturated_sum).raw();
- });
- }
-
- return dst;
-}
-
-template <>
-SimpleTensor<uint8_t> softmax_layer<uint8_t>(const SimpleTensor<uint8_t> &src, float beta)
-{
// Note: Output quantization info should always have scale = 1/256 and offset = 0
const QuantizationInfo output_quantization_info = QuantizationInfo(1.f / 256, 0);
- SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
- SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta);
- SimpleTensor<uint8_t> dst = convert_to_asymmetric(dst_tmp, output_quantization_info);
+ SimpleTensor<float> src_tmp = convert_from_asymmetric(src);
+ SimpleTensor<float> dst_tmp = softmax_layer<float>(src_tmp, beta);
+ SimpleTensor<T> dst = convert_to_asymmetric(dst_tmp, output_quantization_info);
return dst;
}
template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src, float beta);
template SimpleTensor<half> softmax_layer(const SimpleTensor<half> &src, float beta);
+template SimpleTensor<uint8_t> softmax_layer(const SimpleTensor<uint8_t> &src, float beta);
} // namespace reference
} // namespace validation
} // namespace test