diff options
author | alered01 <Alex.Redshaw@arm.com> | 2020-05-07 14:58:29 +0100 |
---|---|---|
committer | Alex Redshaw <Alex.Redshaw@arm.com> | 2020-05-22 11:05:07 +0000 |
commit | a7227ac8fa45c9ea0da0e1ed66bb0c551c61095b (patch) | |
tree | b0d1af7bed0e1d2de1bb7f2e64b67c56d43c10dc /tests/InferenceModel.hpp | |
parent | 985ef1f2baf052a4d845b4fc7b6019ee7cd5e846 (diff) | |
download | armnn-a7227ac8fa45c9ea0da0e1ed66bb0c551c61095b.tar.gz |
Adding more performance metrics
* Implemented CLTuning flow for ExecuteNetwork tests
* Added --tuning-path to specify tuning file to use/create
* Added --tuning-level to specify tuning level to use as well as enable extra tuning run to generate the tuning file
* Fixed issue where TuningLevel was being parsed incorrectly
* Added measurements for initialization, network parsing, network optimization, tuning, and shutdown
* Added flag to control number of iterations inference is run for
Signed-off-by: alered01 <Alex.Redshaw@arm.com>
Change-Id: Ic739ff26e136e32aff9f0995217c1c3207008ca4
Diffstat (limited to 'tests/InferenceModel.hpp')
-rw-r--r-- | tests/InferenceModel.hpp | 31 |
1 files changed, 14 insertions, 17 deletions
diff --git a/tests/InferenceModel.hpp b/tests/InferenceModel.hpp index 410bc7c04e..781cef4ed0 100644 --- a/tests/InferenceModel.hpp +++ b/tests/InferenceModel.hpp @@ -6,6 +6,8 @@ #pragma once #include <armnn/ArmNN.hpp> +#include <armnn/Logging.hpp> +#include <armnn/utility/Timer.hpp> #include <armnn/BackendRegistry.hpp> #include <armnn/utility/Assert.hpp> @@ -31,7 +33,6 @@ #include <boost/variant.hpp> #include <algorithm> -#include <chrono> #include <iterator> #include <fstream> #include <map> @@ -399,8 +400,12 @@ public: throw armnn::Exception("Some backend IDs are invalid: " + invalidBackends); } + const auto parsing_start_time = armnn::GetTimeNow(); armnn::INetworkPtr network = CreateNetworkImpl<IParser>::Create(params, m_InputBindings, m_OutputBindings); + ARMNN_LOG(info) << "Network parsing time: " << std::setprecision(2) + << std::fixed << armnn::GetTimeDuration(parsing_start_time).count() << " ms\n"; + armnn::IOptimizedNetworkPtr optNet{nullptr, [](armnn::IOptimizedNetwork*){}}; { ARMNN_SCOPED_HEAP_PROFILING("Optimizing"); @@ -410,7 +415,12 @@ public: options.m_ReduceFp32ToBf16 = params.m_EnableBf16TurboMode; options.m_Debug = params.m_PrintIntermediateLayers; + const auto optimization_start_time = armnn::GetTimeNow(); optNet = armnn::Optimize(*network, params.m_ComputeDevices, m_Runtime->GetDeviceSpec(), options); + + ARMNN_LOG(info) << "Optimization time: " << std::setprecision(2) + << std::fixed << armnn::GetTimeDuration(optimization_start_time).count() << " ms\n"; + if (!optNet) { throw armnn::Exception("Optimize returned nullptr"); @@ -494,13 +504,13 @@ public: } // Start timer to record inference time in EnqueueWorkload (in milliseconds) - const auto start_time = GetCurrentTime(); + const auto start_time = armnn::GetTimeNow(); armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetworkIdentifier, MakeInputTensors(inputContainers), MakeOutputTensors(outputContainers)); - const auto end_time = GetCurrentTime(); + const auto duration = armnn::GetTimeDuration(start_time); // if profiling is enabled print out the results if (profiler && profiler->IsProfilingEnabled()) @@ -514,7 +524,7 @@ public: } else { - return std::chrono::duration<double, std::milli>(end_time - start_time); + return duration; } } @@ -584,17 +594,4 @@ private: { return armnnUtils::MakeOutputTensors(m_OutputBindings, outputDataContainers); } - - std::chrono::high_resolution_clock::time_point GetCurrentTime() - { - return std::chrono::high_resolution_clock::now(); - } - - std::chrono::duration<double, std::milli> GetTimeDuration( - std::chrono::high_resolution_clock::time_point& start_time, - std::chrono::high_resolution_clock::time_point& end_time) - { - return std::chrono::duration<double, std::milli>(end_time - start_time); - } - }; |