From 57016a419c89e737216fd12711e6eba7e030061e Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Wed, 16 Jan 2019 12:54:29 +0000 Subject: COMPMID-1710: Fixes substitution errors in bare_metal build. Change-Id: Icf76503b4d01e90a682b9bed0798a8a635840e46 Reviewed-on: https://review.mlplatform.org/528 Reviewed-by: Michalis Spyrou Tested-by: Arm Jenkins --- src/core/NEON/kernels/NEActivationLayerKernel.cpp | 6 +++--- src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/core/NEON/kernels/NEActivationLayerKernel.cpp b/src/core/NEON/kernels/NEActivationLayerKernel.cpp index 97cb9ceb2e..7565a1214c 100644 --- a/src/core/NEON/kernels/NEActivationLayerKernel.cpp +++ b/src/core/NEON/kernels/NEActivationLayerKernel.cpp @@ -371,17 +371,17 @@ typename std::enable_if::value, void>::type NEActivat if(act == ActivationFunction::RELU) { tmp = std::max(const_0, in); - tmp = std::max(0, std::min(static_cast(tmp * s + o), 255)); + tmp = std::max(0, std::min(tmp * s + o, 255)); } else if(act == ActivationFunction::BOUNDED_RELU) { tmp = std::min(a, std::max(const_0, in)); - tmp = std::max(0, std::min(static_cast(tmp * s + o), 255)); + tmp = std::max(0, std::min(tmp * s + o, 255)); } else if(act == ActivationFunction::LU_BOUNDED_RELU) { tmp = std::min(a, std::max(b, in)); - tmp = std::max(0, std::min(static_cast(tmp * s + o), 255)); + tmp = std::max(0, std::min(tmp * s + o, 255)); } else { diff --git a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp index e74833cd41..bd52d4de2b 100644 --- a/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp +++ b/src/core/NEON/kernels/NEArithmeticAdditionKernel.cpp @@ -246,7 +246,7 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor for(; x < window_end_x; ++x) { const float afs = static_cast(*(non_broadcast_input_ptr + x) - non_broadcast_qinfo.offset) * non_broadcast_qinfo.scale; - *(output_ptr + x) = std::max(0, std::min(static_cast((afs + bfs) * invoutput_scale + output_offset), 255)); + *(output_ptr + x) = std::max(0, std::min((afs + bfs) * invoutput_scale + output_offset, 255)); } }, broadcast_input, non_broadcast_input, output); @@ -317,7 +317,7 @@ void add_QASYMM8_QASYMM8_QASYMM8(const ITensor *in1, const ITensor *in2, ITensor { const float afs = static_cast((*(input1_ptr + x)) - input1_qinfo.offset) * input1_qinfo.scale; const float bfs = static_cast((*(input2_ptr + x)) - input2_qinfo.offset) * input2_qinfo.scale; - *(output_ptr + x) = std::max(0, std::min(static_cast((afs + bfs) * invoutput_scale + output_offset), 255)); + *(output_ptr + x) = std::max(0, std::min((afs + bfs) * invoutput_scale + output_offset, 255)); } }, input1, input2, output); -- cgit v1.2.1