aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-04-09 14:08:06 +0100
committerAron Virginas-Tar <Aron.Virginas-Tar@arm.com>2019-04-10 15:54:26 +0100
commitd4f0fead9e014f14e1435281f91fc7a48d02d6a1 (patch)
treea65cbc44024d289e34400df094719e285764a322 /include
parentf30f7d32b22020f80b21da7b008d8302cee9d395 (diff)
downloadarmnn-d4f0fead9e014f14e1435281f91fc7a48d02d6a1.tar.gz
IVGCVSW-2947 Remove boost dependency from include/TypesUtils.hpp
!android-nn-driver:968 Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com> Change-Id: I03ccb4842b060a9893567542bfcadc180bbc7311
Diffstat (limited to 'include')
-rw-r--r--include/armnn/TypesUtils.hpp27
1 files changed, 2 insertions, 25 deletions
diff --git a/include/armnn/TypesUtils.hpp b/include/armnn/TypesUtils.hpp
index 4880c90538..837490d258 100644
--- a/include/armnn/TypesUtils.hpp
+++ b/include/armnn/TypesUtils.hpp
@@ -7,9 +7,6 @@
#include "Tensor.hpp"
#include "Types.hpp"
-#include <boost/assert.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
#include <cmath>
#include <ostream>
#include <set>
@@ -191,20 +188,7 @@ inline std::ostream & operator<<(std::ostream & os, const armnn::TensorShape & s
/// @return - The quantized value calculated as round(value/scale)+offset.
///
template<typename QuantizedType>
-inline QuantizedType Quantize(float value, float scale, int32_t offset)
-{
- static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
- constexpr QuantizedType max = std::numeric_limits<QuantizedType>::max();
- constexpr QuantizedType min = std::numeric_limits<QuantizedType>::lowest();
- BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!std::isnan(value));
-
- float clampedValue = std::min(std::max(static_cast<float>(round(value/scale) + offset), static_cast<float>(min)),
- static_cast<float>(max));
- auto quantizedBits = static_cast<QuantizedType>(clampedValue);
-
- return quantizedBits;
-}
+QuantizedType Quantize(float value, float scale, int32_t offset);
/// Dequantize an 8-bit data type into a floating point data type.
/// @param value - The value to dequantize.
@@ -213,14 +197,7 @@ inline QuantizedType Quantize(float value, float scale, int32_t offset)
/// @return - The dequantized value calculated as (value-offset)*scale.
///
template <typename QuantizedType>
-inline float Dequantize(QuantizedType value, float scale, int32_t offset)
-{
- static_assert(IsQuantizedType<QuantizedType>(), "Not an integer type.");
- BOOST_ASSERT(scale != 0.f);
- BOOST_ASSERT(!std::isnan(value));
- float dequantized = boost::numeric_cast<float>(value - offset) * scale;
- return dequantized;
-}
+float Dequantize(QuantizedType value, float scale, int32_t offset);
inline void VerifyTensorInfoDataType(const armnn::TensorInfo & info, armnn::DataType dataType)
{