From ac73760a3731934ff7401d847eb2db7b9a77be02 Mon Sep 17 00:00:00 2001 From: Derek Lamberti Date: Thu, 16 May 2019 16:33:00 +0100 Subject: IVGCVSW-3060 Classification tests display output value as raw float Change-Id: I92a1e043d60fa2fe3414dc9339ef36204aca42e2 Signed-off-by: Derek Lamberti --- tests/InferenceTest.hpp | 47 ----------------------- tests/InferenceTest.inl | 99 +++++++++++++++++++++++++++++++++---------------- 2 files changed, 68 insertions(+), 78 deletions(-) diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp index 3ebfdbcc3c..40c9e5e597 100644 --- a/tests/InferenceTest.hpp +++ b/tests/InferenceTest.hpp @@ -136,53 +136,6 @@ private: std::vector m_Outputs; }; -template -struct ToFloat { }; // nothing defined for the generic case - -template <> -struct ToFloat -{ - static inline float Convert(float value, const InferenceModelInternal::QuantizationParams &) - { - // assuming that float models are not quantized - return value; - } - - static inline float Convert(int value, const InferenceModelInternal::QuantizationParams &) - { - // assuming that float models are not quantized - return static_cast(value); - } -}; - -template <> -struct ToFloat -{ - static inline float Convert(uint8_t value, - const InferenceModelInternal::QuantizationParams & quantizationParams) - { - return armnn::Dequantize(value, - quantizationParams.first, - quantizationParams.second); - } - - static inline float Convert(int value, - const InferenceModelInternal::QuantizationParams & quantizationParams) - { - return armnn::Dequantize(static_cast(value), - quantizationParams.first, - quantizationParams.second); - } - - static inline float Convert(float value, - const InferenceModelInternal::QuantizationParams & quantizationParams) - { - return armnn::Dequantize(static_cast(value), - quantizationParams.first, - quantizationParams.second); - } -}; - template class ClassifierTestCase : public InferenceModelTestCase { diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl index 0112037bc3..04cae99132 100644 --- a/tests/InferenceTest.inl +++ b/tests/InferenceTest.inl @@ -51,48 +51,85 @@ ClassifierTestCase::ClassifierTestCase( { } -template -TestCaseResult ClassifierTestCase::ProcessResult(const InferenceTestOptions& params) +struct ClassifierResultProcessor : public boost::static_visitor<> { - auto& output = this->GetOutputs()[0]; - const auto testCaseId = this->GetTestCaseId(); + using ResultMap = std::map; - std::map resultMap; + ClassifierResultProcessor(float scale, int offset) + : m_Scale(scale) + , m_Offset(offset) + {} + + void operator()(const std::vector& values) { - int index = 0; + SortPredictions(values, [](float value) + { + return value; + }); + } - boost::apply_visitor([&](auto&& value) - { - for (const auto & o : value) - { - float prob = ToFloat::Convert(o, m_QuantizationParams); - int classification = index++; - - // Take the first class with each probability - // This avoids strange results when looping over batched results produced - // with identical test data. - std::map::iterator lb = resultMap.lower_bound(prob); - if (lb == resultMap.end() || - !resultMap.key_comp()(prob, lb->first)) { - // If the key is not already in the map, insert it. - resultMap.insert(lb, std::map::value_type(prob, classification)); - } - } - }, - output); + void operator()(const std::vector& values) + { + auto& scale = m_Scale; + auto& offset = m_Offset; + SortPredictions(values, [&scale, &offset](uint8_t value) + { + return armnn::Dequantize(value, scale, offset); + }); } + void operator()(const std::vector& values) { - BOOST_LOG_TRIVIAL(info) << "= Prediction values for test #" << testCaseId; - auto it = resultMap.rbegin(); - for (int i=0; i<5 && it != resultMap.rend(); ++i) + BOOST_ASSERT_MSG(false, "Non-float predictions output not supported."); + } + + ResultMap& GetResultMap() { return m_ResultMap; } + +private: + template + void SortPredictions(const Container& c, Delegate delegate) + { + int index = 0; + for (const auto& value : c) { - BOOST_LOG_TRIVIAL(info) << "Top(" << (i+1) << ") prediction is " << it->second << - " with confidence: " << 100.0*(it->first) << "%"; - ++it; + int classification = index++; + // Take the first class with each probability + // This avoids strange results when looping over batched results produced + // with identical test data. + ResultMap::iterator lb = m_ResultMap.lower_bound(value); + + if (lb == m_ResultMap.end() || !m_ResultMap.key_comp()(value, lb->first)) + { + // If the key is not already in the map, insert it. + m_ResultMap.insert(lb, ResultMap::value_type(delegate(value), classification)); + } } } + ResultMap m_ResultMap; + + float m_Scale=0.0f; + int m_Offset=0; +}; + +template +TestCaseResult ClassifierTestCase::ProcessResult(const InferenceTestOptions& params) +{ + auto& output = this->GetOutputs()[0]; + const auto testCaseId = this->GetTestCaseId(); + + ClassifierResultProcessor resultProcessor(m_QuantizationParams.first, m_QuantizationParams.second); + boost::apply_visitor(resultProcessor, output); + + BOOST_LOG_TRIVIAL(info) << "= Prediction values for test #" << testCaseId; + auto it = resultProcessor.GetResultMap().rbegin(); + for (int i=0; i<5 && it != resultProcessor.GetResultMap().rend(); ++i) + { + BOOST_LOG_TRIVIAL(info) << "Top(" << (i+1) << ") prediction is " << it->second << + " with value: " << (it->first); + ++it; + } + unsigned int prediction = 0; boost::apply_visitor([&](auto&& value) { -- cgit v1.2.1