diff options
author | Derek Lamberti <derek.lamberti@arm.com> | 2019-11-26 16:38:31 +0000 |
---|---|---|
committer | Derek Lamberti <derek.lamberti@arm.com> | 2019-12-05 15:35:51 +0000 |
commit | 08446976e3b6ce0e02f22b391b37aacaad181e1a (patch) | |
tree | b57106c6a3e28662adb2592ac3e850a8f19b6ec7 /src/armnn/Network.cpp | |
parent | a3b31f010004ed397ec04325edf7020984847f21 (diff) | |
download | armnn-08446976e3b6ce0e02f22b391b37aacaad181e1a.tar.gz |
Replace boost logging with simple logger
!referencetests:214319
* Reduces arm nn binary size ~15%
* Also fixed test logging black hole issues
Change-Id: Iba27db304d9a8088fa46aeb0b52225d93bb56bc8
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
Diffstat (limited to 'src/armnn/Network.cpp')
-rw-r--r-- | src/armnn/Network.cpp | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 53e28c344a..c764e2a059 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -33,7 +33,6 @@ #include <boost/assert.hpp> #include <boost/format.hpp> -#include <boost/log/trivial.hpp> #include <boost/numeric/conversion/converter_policies.hpp> #include <boost/cast.hpp> @@ -76,7 +75,7 @@ void ReportError(const std::string& errorMessage, { std::stringstream fullErrorMessage; fullErrorMessage << "ERROR: " << errorMessage; - BOOST_LOG_TRIVIAL(warning) << fullErrorMessage.str(); + ARMNN_LOG(warning) << fullErrorMessage.str(); if (errorMessages) { errorMessages.value().push_back(fullErrorMessage.str()); @@ -88,7 +87,7 @@ void ReportWarning(const std::string& warningMessage, { std::stringstream fullWarningMessage; fullWarningMessage << "WARNING: " << warningMessage; - BOOST_LOG_TRIVIAL(warning) << fullWarningMessage.str(); + ARMNN_LOG(warning) << fullWarningMessage.str(); if (warningMessages) { warningMessages.value().push_back(fullWarningMessage.str()); @@ -120,7 +119,7 @@ bool CheckScaleSetOnQuantizedType(Layer* layer, Optional<std::vector<std::string ss << "Quantization parameters for Softmax layer (Scale: " << info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() << ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0"; - BOOST_LOG_TRIVIAL(warning) << ss.str(); + ARMNN_LOG(warning) << ss.str(); info.SetQuantizationScale((1.0f /256.0f)); info.SetQuantizationOffset(0); outputSlot.SetTensorInfo(info); |