From 2ebaac7a007cbfae7fff818e4d6c4c33562eea0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonny=20Sv=C3=A4rd?= Date: Tue, 10 May 2022 17:29:30 +0200 Subject: Refactor performance measurements Change 'Inference runtime' to measure CPU cycles for the Tensorflow Lite Micro interpreter.Invoke() call. Add 'Operator(s) runtime' print that prints a summary for cycles spent on all operators during an inference. (This is equivalent to the old reported 'Inference runtime') Move prints out of the EndEvent() function in ArmProfiler as it otherwise interferes with the inference cycle measurement. Change-Id: Ie11b5abb5b12a3bcf5a67841f04834d05dfd796d --- applications/inference_process/include/inference_process.hpp | 1 + 1 file changed, 1 insertion(+) (limited to 'applications/inference_process/include/inference_process.hpp') diff --git a/applications/inference_process/include/inference_process.hpp b/applications/inference_process/include/inference_process.hpp index 9635884..fc54ae0 100644 --- a/applications/inference_process/include/inference_process.hpp +++ b/applications/inference_process/include/inference_process.hpp @@ -52,6 +52,7 @@ struct InferenceJob { std::vector input; std::vector output; std::vector expectedOutput; + uint64_t cpuCycles{0}; size_t numBytesToPrint; void *externalContext; -- cgit v1.2.1