aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads/RefNormalizationWorkload.cpp
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-09-09 09:07:37 +0100
committerJan Eilers <jan.eilers@arm.com>2020-09-17 08:31:09 +0000
commit171214c8ff275c90cd4f7fc23a34ec2c83b5ea39 (patch)
tree23fd3ee288d631c8c94bede71f89f0f1e12da862 /src/backends/reference/workloads/RefNormalizationWorkload.cpp
parenta25886e0966a6b9433cd23595688fadb88a161b2 (diff)
downloadarmnn-171214c8ff275c90cd4f7fc23a34ec2c83b5ea39.tar.gz
IVGCVSW-5300 Remove some boost::numeric_cast from armnn/backends
* Replaced with armnn/utility/NumericCast.hpp * Some exclusions in reference backend * Excluded as requires float implementation in NumericCast.hpp Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I9e4e9cd502c865452128fa04415fd6f250baa855
Diffstat (limited to 'src/backends/reference/workloads/RefNormalizationWorkload.cpp')
-rw-r--r--src/backends/reference/workloads/RefNormalizationWorkload.cpp25
1 files changed, 12 insertions, 13 deletions
diff --git a/src/backends/reference/workloads/RefNormalizationWorkload.cpp b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
index a41f68349a..d5d2104cba 100644
--- a/src/backends/reference/workloads/RefNormalizationWorkload.cpp
+++ b/src/backends/reference/workloads/RefNormalizationWorkload.cpp
@@ -8,11 +8,10 @@
#include <armnn/Logging.hpp>
#include <armnn/Tensor.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <Profiling.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include "RefWorkloadUtils.hpp"
#include "Decoders.hpp"
#include "Encoders.hpp"
@@ -37,7 +36,7 @@ void NormalizeWithinUingLbr(Decoder<float>& inputData,
const unsigned int rows = tensorShape[2];
const unsigned int cols = tensorShape[3];
- int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
+ int radius = armnn::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
for (unsigned int n = 0; n < batchSize; n++)
{
@@ -52,23 +51,23 @@ void NormalizeWithinUingLbr(Decoder<float>& inputData,
{
for (int x = -radius; x <= radius; x++)
{
- int i = boost::numeric_cast<int>(w) + x;
- int j = boost::numeric_cast<int>(h) + y;
+ int i = armnn::numeric_cast<int>(w) + x;
+ int j = armnn::numeric_cast<int>(h) + y;
- if ((i < 0) || (i >= boost::numeric_cast<int>(cols)))
+ if ((i < 0) || (i >= armnn::numeric_cast<int>(cols)))
{
continue;
}
- if ((j < 0) || (j >= boost::numeric_cast<int>(rows)))
+ if ((j < 0) || (j >= armnn::numeric_cast<int>(rows)))
{
continue;
}
unsigned int inputIndex = n * cols * rows * depth +
c * cols * rows +
- boost::numeric_cast<unsigned int>(j) * cols +
- boost::numeric_cast<unsigned int>(i);
+ armnn::numeric_cast<unsigned int>(j) * cols +
+ armnn::numeric_cast<unsigned int>(i);
inputData[inputIndex];
float inval = inputData.Get();
@@ -106,7 +105,7 @@ void NormalizeAcrossUingLbr(Decoder<float>& inputData,
const unsigned int rows = tensorShape[dataLayoutIndexed.GetHeightIndex()];
const unsigned int cols = tensorShape[dataLayoutIndexed.GetWidthIndex()];
- int radius = boost::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
+ int radius = armnn::numeric_cast<int>(norm_size / 2u); /* Strong Assumption on rounding Mode */
for (unsigned int n = 0; n < batchSize; n++)
{
@@ -119,16 +118,16 @@ void NormalizeAcrossUingLbr(Decoder<float>& inputData,
float accumulated_scale = 0.0;
for (int z = -radius; z <= radius; z++)
{
- int k = boost::numeric_cast<int>(c) + z;
+ int k = armnn::numeric_cast<int>(c) + z;
- if ((k < 0) || (k >= boost::numeric_cast<int>(depth)))
+ if ((k < 0) || (k >= armnn::numeric_cast<int>(depth)))
{
continue;
}
unsigned inputIndex = dataLayoutIndexed.GetIndex(tensorShape,
n,
- boost::numeric_cast<unsigned int>(k),
+ armnn::numeric_cast<unsigned int>(k),
h,
w);