aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CL/cl_kernels/softmax_layer.cl4
-rw-r--r--src/core/CL/cl_kernels/softmax_layer_quantized.cl4
-rw-r--r--src/core/NEON/NEMath.inl3
3 files changed, 2 insertions, 9 deletions
diff --git a/src/core/CL/cl_kernels/softmax_layer.cl b/src/core/CL/cl_kernels/softmax_layer.cl
index 01f5de47cf..4d2d89dd73 100644
--- a/src/core/CL/cl_kernels/softmax_layer.cl
+++ b/src/core/CL/cl_kernels/softmax_layer.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -306,7 +306,6 @@ __kernel void softmax_layer_max_shift_exp_sum_parallel(
}
#ifdef NON_MULTIPLE_OF_GRID_SIZE
// How many work-items needed to complete the computation.
- //TODO: Optimize this calculation (avoid %).
int boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
if(lid < boundary_workitems)
{
@@ -417,7 +416,6 @@ __kernel void softmax_layer_max_shift_exp_sum_parallel(
sum1D += data;
}
#ifdef NON_MULTIPLE_OF_GRID_SIZE
- //TODO: Optimize the calculation (avoid %).
boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
if(lid < boundary_workitems)
{
diff --git a/src/core/CL/cl_kernels/softmax_layer_quantized.cl b/src/core/CL/cl_kernels/softmax_layer_quantized.cl
index b7a6e00dfa..4d5006d804 100644
--- a/src/core/CL/cl_kernels/softmax_layer_quantized.cl
+++ b/src/core/CL/cl_kernels/softmax_layer_quantized.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -324,7 +324,6 @@ __kernel void softmax_layer_max_shift_exp_sum_quantized_parallel(
}
#ifdef NON_MULTIPLE_OF_GRID_SIZE
// How many work-items needed to complete the computation.
- //TODO: Optimize this calculation (avoid %).
int boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
if(lid < boundary_workitems)
{
@@ -429,7 +428,6 @@ __kernel void softmax_layer_max_shift_exp_sum_quantized_parallel(
sum1D = sum1D + select(0, data_fp, data_diff >= (VEC_INT)(DIFF_MIN));
}
#ifdef NON_MULTIPLE_OF_GRID_SIZE
- //TODO: Optimize the calculation (avoid %).
boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
if(lid < boundary_workitems)
{
diff --git a/src/core/NEON/NEMath.inl b/src/core/NEON/NEMath.inl
index da9d038139..29df5433bb 100644
--- a/src/core/NEON/NEMath.inl
+++ b/src/core/NEON/NEMath.inl
@@ -495,7 +495,6 @@ inline float16x8_t vtaylor_polyq_f16(float16x8_t x, const std::array<float16x8_t
inline float16x8_t vexpq_f16(float16x8_t x)
{
- // TODO (COMPMID-1535) : Revisit FP16 approximations
const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
@@ -505,7 +504,6 @@ inline float16x8_t vexpq_f16(float16x8_t x)
inline float16x8_t vlogq_f16(float16x8_t x)
{
- // TODO (COMPMID-1535) : Revisit FP16 approximations
const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
const float32x4_t x_low = vcvt_f32_f16(vget_low_f16(x));
@@ -515,7 +513,6 @@ inline float16x8_t vlogq_f16(float16x8_t x)
inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n)
{
- // TODO (giaiod01) - COMPMID-1535
float32x4_t n0_f32 = vcvt_f32_f16(vget_low_f16(n));
float32x4_t n1_f32 = vcvt_f32_f16(vget_high_f16(n));
float32x4_t val0_f32 = vcvt_f32_f16(vget_low_f16(val));