aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference/workloads
diff options
context:
space:
mode:
authorMatthew Sloyan <matthew.sloyan@arm.com>2020-09-23 16:57:23 +0100
committerColm Donelan <colm.donelan@arm.com>2020-10-02 15:05:08 +0000
commit24ac85943b609e48fc36d16570ca4b5b90d31a6a (patch)
tree6a279be5b2be2084ddff3ec989a957e17ecad26b /src/backends/reference/workloads
parent0c8cb872344d3ca4217881d6c70e826ae1776cdb (diff)
downloadarmnn-24ac85943b609e48fc36d16570ca4b5b90d31a6a.tar.gz
IVGCVSW-5334 Remove remaining boost::numeric_cast from armnn
* Floating point casts now use armnn::numeric_cast. * Also removed remaining header imports. Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com> Change-Id: I2d37847d67f164fc0a0ae17f34d49ff3d2210c30
Diffstat (limited to 'src/backends/reference/workloads')
-rw-r--r--src/backends/reference/workloads/ConvImpl.hpp2
-rw-r--r--src/backends/reference/workloads/Debug.cpp8
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp6
-rw-r--r--src/backends/reference/workloads/Mean.cpp8
-rw-r--r--src/backends/reference/workloads/Pooling2d.cpp6
-rw-r--r--src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp4
-rw-r--r--src/backends/reference/workloads/Resize.cpp8
-rw-r--r--src/backends/reference/workloads/Slice.cpp2
8 files changed, 16 insertions, 28 deletions
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index d971b8f24f..d9d8a87ae8 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -15,8 +15,6 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <cmath>
#include <limits>
diff --git a/src/backends/reference/workloads/Debug.cpp b/src/backends/reference/workloads/Debug.cpp
index aadbc7613b..df05ad73dc 100644
--- a/src/backends/reference/workloads/Debug.cpp
+++ b/src/backends/reference/workloads/Debug.cpp
@@ -8,8 +8,6 @@
#include <BFloat16.hpp>
#include <Half.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <algorithm>
#include <iostream>
@@ -53,10 +51,10 @@ void Debug(const TensorInfo& inputInfo,
std::cout << "], ";
std::cout << "\"min\": "
- << boost::numeric_cast<float>(*std::min_element(inputData, inputData + numElements)) << ", ";
+ << static_cast<float>(*std::min_element(inputData, inputData + numElements)) << ", ";
std::cout << "\"max\": "
- << boost::numeric_cast<float>(*std::max_element(inputData, inputData + numElements)) << ", ";
+ << static_cast<float>(*std::max_element(inputData, inputData + numElements)) << ", ";
std::cout << "\"data\": ";
@@ -70,7 +68,7 @@ void Debug(const TensorInfo& inputInfo,
}
}
- std::cout << boost::numeric_cast<float>(inputData[i]);
+ std::cout << static_cast<float>(inputData[i]);
for (unsigned int j = 0; j < numDims; j++)
{
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index ce07110da9..f80f20a441 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -8,8 +8,6 @@
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <algorithm>
#include <numeric>
@@ -120,7 +118,7 @@ void AllocateOutputData(unsigned int numOutput,
{
unsigned int boxCornorIndex = selectedBoxes[outputIndices[i]] * 4;
detectionScores[i] = selectedScores[outputIndices[i]];
- detectionClasses[i] = boost::numeric_cast<float>(selectedClasses[outputIndices[i]]);
+ detectionClasses[i] = armnn::numeric_cast<float>(selectedClasses[outputIndices[i]]);
detectionBoxes[boxIndex] = boxCorners[boxCornorIndex];
detectionBoxes[boxIndex + 1] = boxCorners[boxCornorIndex + 1];
detectionBoxes[boxIndex + 2] = boxCorners[boxCornorIndex + 2];
@@ -136,7 +134,7 @@ void AllocateOutputData(unsigned int numOutput,
detectionBoxes[boxIndex + 3] = 0.0f;
}
}
- numDetections[0] = boost::numeric_cast<float>(numSelected);
+ numDetections[0] = armnn::numeric_cast<float>(numSelected);
}
void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp
index e43a4d5312..fe34efe0c7 100644
--- a/src/backends/reference/workloads/Mean.cpp
+++ b/src/backends/reference/workloads/Mean.cpp
@@ -8,8 +8,6 @@
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <cmath>
#include <cstddef>
#include <functional>
@@ -130,15 +128,15 @@ void Mean(const armnn::TensorInfo& inputInfo,
for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
{
unsigned int current = inputDims[resolvedAxis[idx]];
- ARMNN_ASSERT(boost::numeric_cast<float>(current) <
- (std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
+ ARMNN_ASSERT(armnn::numeric_cast<float>(current) <
+ (std::numeric_limits<float>::max() / armnn::numeric_cast<float>(numElementsInAxis)));
numElementsInAxis *= current;
}
if (numElementsInAxis > 0) {
for (unsigned int idx = 0; idx < numOutputs; ++idx)
{
output[idx];
- output.Set(tempSum[idx] / boost::numeric_cast<float>(numElementsInAxis));
+ output.Set(tempSum[idx] / armnn::numeric_cast<float>(numElementsInAxis));
}
}
}
diff --git a/src/backends/reference/workloads/Pooling2d.cpp b/src/backends/reference/workloads/Pooling2d.cpp
index be6ff387f3..c5633e8eba 100644
--- a/src/backends/reference/workloads/Pooling2d.cpp
+++ b/src/backends/reference/workloads/Pooling2d.cpp
@@ -11,8 +11,6 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnn/utility/NumericCast.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <limits>
#include <algorithm>
#include <functional>
@@ -208,7 +206,7 @@ void Pooling2d(Decoder<float>& rInputDecoder,
wend = std::min(wend, widthInput + padRight);
float result = defaultInitializer;
- float poolAreaSize = boost::numeric_cast<float>(height * (wend - wstart));
+ float poolAreaSize = armnn::numeric_cast<float>(height * (wend - wstart));
// Special case: when the pooling kernel is over a padding region and the padding
// size is larger or equal to the kernel and the kernel only covers
@@ -248,7 +246,7 @@ void Pooling2d(Decoder<float>& rInputDecoder,
{
// When we exclude the padding, it means we calculate with a smaller
// kernel size, so I changed the divisor here.
- poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
+ poolAreaSize = armnn::numeric_cast<float>((hend - hstart) * (wend - wstart));
}
for (auto yInput = hstart; yInput < hend; yInput++)
diff --git a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
index aca3308974..cf355d35d2 100644
--- a/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
+++ b/src/backends/reference/workloads/RefFakeQuantizationFloat32Workload.cpp
@@ -9,7 +9,7 @@
#include "Profiling.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
namespace armnn
{
@@ -17,7 +17,7 @@ namespace armnn
void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max)
{
float scale = (max - min) / 255.f;
- int32_t offset = boost::numeric_cast<int32_t>((-min * 255.f) / (max - min));
+ int32_t offset = armnn::numeric_cast<int32_t>((-min * 255.f) / (max - min));
for (uint32_t i = 0; i < numElements; i++)
{
diff --git a/src/backends/reference/workloads/Resize.cpp b/src/backends/reference/workloads/Resize.cpp
index 16cdd4a2d0..b8bf1bceae 100644
--- a/src/backends/reference/workloads/Resize.cpp
+++ b/src/backends/reference/workloads/Resize.cpp
@@ -7,7 +7,7 @@
#include "TensorBufferArrayView.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/NumericCast.hpp>
#include <cmath>
#include <algorithm>
@@ -27,7 +27,7 @@ inline float Lerp(float a, float b, float w)
inline double EuclideanDistance(float Xa, float Ya, const unsigned int Xb, const unsigned int Yb)
{
- return std::sqrt(pow(Xa - boost::numeric_cast<float>(Xb), 2) + pow(Ya - boost::numeric_cast<float>(Yb), 2));
+ return std::sqrt(pow(Xa - armnn::numeric_cast<float>(Xb), 2) + pow(Ya - armnn::numeric_cast<float>(Yb), 2));
}
inline float CalculateResizeScale(const unsigned int& InputSize,
@@ -35,8 +35,8 @@ inline float CalculateResizeScale(const unsigned int& InputSize,
const bool& AlignCorners)
{
return (AlignCorners && OutputSize > 1)
- ? boost::numeric_cast<float>(InputSize - 1) / boost::numeric_cast<float>(OutputSize - 1)
- : boost::numeric_cast<float>(InputSize) / boost::numeric_cast<float>(OutputSize);
+ ? armnn::numeric_cast<float>(InputSize - 1) / armnn::numeric_cast<float>(OutputSize - 1)
+ : armnn::numeric_cast<float>(InputSize) / armnn::numeric_cast<float>(OutputSize);
}
inline float PixelScaler(const unsigned int& Pixel,
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
index e972524f11..d6836c6933 100644
--- a/src/backends/reference/workloads/Slice.cpp
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -8,8 +8,6 @@
#include <armnn/utility/Assert.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
namespace armnn
{