summaryrefslogtreecommitdiff
path: root/source/use_case/object_detection/src/UseCaseHandler.cc
diff options
context:
space:
mode:
authorKshitij Sisodia <kshitij.sisodia@arm.com>2022-12-19 16:37:33 +0000
committerKshitij Sisodia <kshitij.sisodia@arm.com>2022-12-19 17:05:29 +0000
commit2ea46232a15aaf7600f1b92314612f4aa2fc6cd2 (patch)
tree7c05c514c3bbe932a067067b719d46ff16e5c2e7 /source/use_case/object_detection/src/UseCaseHandler.cc
parent9a97134ee00125c7a406cbf57c3ba8360df8f980 (diff)
downloadml-embedded-evaluation-kit-2ea46232a15aaf7600f1b92314612f4aa2fc6cd2.tar.gz
MLECO-3611: Formatting fixes for generated files.
Template files updated for generated files to adhere to coding guidelines and clang format configuration. There will still be unavoidable violations, but most of the others have been fixed. Change-Id: Ia03db40f8c62a369f2b07fe02eea65e41993a523 Signed-off-by: Kshitij Sisodia <kshitij.sisodia@arm.com>
Diffstat (limited to 'source/use_case/object_detection/src/UseCaseHandler.cc')
-rw-r--r--source/use_case/object_detection/src/UseCaseHandler.cc116
1 files changed, 68 insertions, 48 deletions
diff --git a/source/use_case/object_detection/src/UseCaseHandler.cc b/source/use_case/object_detection/src/UseCaseHandler.cc
index 084059e..9330187 100644
--- a/source/use_case/object_detection/src/UseCaseHandler.cc
+++ b/source/use_case/object_detection/src/UseCaseHandler.cc
@@ -1,6 +1,6 @@
/*
- * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates <open-source-office@arm.com>
- * SPDX-License-Identifier: Apache-2.0
+ * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates
+ * <open-source-office@arm.com> SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -15,11 +15,11 @@
* limitations under the License.
*/
#include "UseCaseHandler.hpp"
-#include "InputFiles.hpp"
-#include "YoloFastestModel.hpp"
-#include "UseCaseCommonUtils.hpp"
#include "DetectorPostProcessing.hpp"
#include "DetectorPreProcessing.hpp"
+#include "InputFiles.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "YoloFastestModel.hpp"
#include "hal.h"
#include "log_macros.h"
@@ -34,7 +34,8 @@ namespace app {
* @param[in] results Vector of detection results to be displayed.
* @return true if successful, false otherwise.
**/
- static bool PresentInferenceResult(const std::vector<object_detection::DetectionResult>& results);
+ static bool
+ PresentInferenceResult(const std::vector<object_detection::DetectionResult>& results);
/**
* @brief Draw boxes directly on the LCD for all detected objects.
@@ -43,11 +44,10 @@ namespace app {
* @param[in] imageStartY Y coordinate where the image starts on the LCD.
* @param[in] imgDownscaleFactor How much image has been downscaled on LCD.
**/
- static void DrawDetectionBoxes(
- const std::vector<object_detection::DetectionResult>& results,
- uint32_t imgStartX,
- uint32_t imgStartY,
- uint32_t imgDownscaleFactor);
+ static void DrawDetectionBoxes(const std::vector<object_detection::DetectionResult>& results,
+ uint32_t imgStartX,
+ uint32_t imgStartY,
+ uint32_t imgDownscaleFactor);
/* Object detection inference handler. */
bool ObjectDetectionHandler(ApplicationContext& ctx, uint32_t imgIndex, bool runAll)
@@ -55,8 +55,8 @@ namespace app {
auto& profiler = ctx.Get<Profiler&>("profiler");
constexpr uint32_t dataPsnImgDownscaleFactor = 1;
- constexpr uint32_t dataPsnImgStartX = 10;
- constexpr uint32_t dataPsnImgStartY = 35;
+ constexpr uint32_t dataPsnImgStartX = 10;
+ constexpr uint32_t dataPsnImgStartY = 35;
constexpr uint32_t dataPsnTxtInfStartX = 20;
constexpr uint32_t dataPsnTxtInfStartY = 28;
@@ -78,7 +78,7 @@ namespace app {
auto initialImgIdx = ctx.Get<uint32_t>("imgIndex");
- TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
TfLiteTensor* outputTensor0 = model.GetOutputTensor(0);
TfLiteTensor* outputTensor1 = model.GetOutputTensor(1);
@@ -99,12 +99,14 @@ namespace app {
DetectorPreProcess preProcess = DetectorPreProcess(inputTensor, true, model.IsDataSigned());
std::vector<object_detection::DetectionResult> results;
- const object_detection::PostProcessParams postProcessParams {
- inputImgRows, inputImgCols, object_detection::originalImageSize,
- object_detection::anchor1, object_detection::anchor2
- };
- DetectorPostProcess postProcess = DetectorPostProcess(outputTensor0, outputTensor1,
- results, postProcessParams);
+ const object_detection::PostProcessParams postProcessParams{
+ inputImgRows,
+ inputImgCols,
+ object_detection::originalImageSize,
+ object_detection::anchor1,
+ object_detection::anchor2};
+ DetectorPostProcess postProcess =
+ DetectorPostProcess(outputTensor0, outputTensor1, results, postProcessParams);
do {
/* Ensure there are no results leftover from previous inference when running all. */
results.clear();
@@ -112,11 +114,11 @@ namespace app {
/* Strings for presentation/logging. */
std::string str_inf{"Running inference... "};
- const uint8_t* currImage = get_img_array(ctx.Get<uint32_t>("imgIndex"));
+ const uint8_t* currImage = GetImgArray(ctx.Get<uint32_t>("imgIndex"));
auto dstPtr = static_cast<uint8_t*>(inputTensor->data.uint8);
- const size_t copySz = inputTensor->bytes < IMAGE_DATA_SIZE ?
- inputTensor->bytes : IMAGE_DATA_SIZE;
+ const size_t copySz =
+ inputTensor->bytes < IMAGE_DATA_SIZE ? inputTensor->bytes : IMAGE_DATA_SIZE;
/* Run the pre-processing, inference and post-processing. */
if (!preProcess.DoPreProcess(currImage, copySz)) {
@@ -135,12 +137,13 @@ namespace app {
dataPsnImgDownscaleFactor);
/* Display message on the LCD - inference running. */
- hal_lcd_display_text(str_inf.c_str(), str_inf.size(),
- dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
+ hal_lcd_display_text(
+ str_inf.c_str(), str_inf.size(), dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
/* Run inference over this image. */
- info("Running inference on image %" PRIu32 " => %s\n", ctx.Get<uint32_t>("imgIndex"),
- get_filename(ctx.Get<uint32_t>("imgIndex")));
+ info("Running inference on image %" PRIu32 " => %s\n",
+ ctx.Get<uint32_t>("imgIndex"),
+ GetFilename(ctx.Get<uint32_t>("imgIndex")));
if (!RunInference(model, profiler)) {
printf_err("Inference failed.");
@@ -154,11 +157,12 @@ namespace app {
/* Erase. */
str_inf = std::string(str_inf.size(), ' ');
- hal_lcd_display_text(str_inf.c_str(), str_inf.size(),
- dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
+ hal_lcd_display_text(
+ str_inf.c_str(), str_inf.size(), dataPsnTxtInfStartX, dataPsnTxtInfStartY, false);
/* Draw boxes. */
- DrawDetectionBoxes(results, dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
+ DrawDetectionBoxes(
+ results, dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
#if VERIFY_TEST_OUTPUT
DumpTensor(modelOutput0);
@@ -171,14 +175,15 @@ namespace app {
profiler.PrintProfilingResult();
- IncrementAppCtxIfmIdx(ctx,"imgIndex");
+ IncrementAppCtxIfmIdx(ctx, "imgIndex");
} while (runAll && ctx.Get<uint32_t>("imgIndex") != initialImgIdx);
return true;
}
- static bool PresentInferenceResult(const std::vector<object_detection::DetectionResult>& results)
+ static bool
+ PresentInferenceResult(const std::vector<object_detection::DetectionResult>& results)
{
hal_lcd_set_text_color(COLOR_GREEN);
@@ -187,9 +192,14 @@ namespace app {
info("Total number of inferences: 1\n");
for (uint32_t i = 0; i < results.size(); ++i) {
- info("%" PRIu32 ") (%f) -> %s {x=%d,y=%d,w=%d,h=%d}\n", i,
- results[i].m_normalisedVal, "Detection box:",
- results[i].m_x0, results[i].m_y0, results[i].m_w, results[i].m_h );
+ info("%" PRIu32 ") (%f) -> %s {x=%d,y=%d,w=%d,h=%d}\n",
+ i,
+ results[i].m_normalisedVal,
+ "Detection box:",
+ results[i].m_x0,
+ results[i].m_y0,
+ results[i].m_w,
+ results[i].m_h);
}
return true;
@@ -202,24 +212,34 @@ namespace app {
{
uint32_t lineThickness = 1;
- for (const auto& result: results) {
+ for (const auto& result : results) {
/* Top line. */
- hal_lcd_display_box(imgStartX + result.m_x0/imgDownscaleFactor,
- imgStartY + result.m_y0/imgDownscaleFactor,
- result.m_w/imgDownscaleFactor, lineThickness, COLOR_GREEN);
+ hal_lcd_display_box(imgStartX + result.m_x0 / imgDownscaleFactor,
+ imgStartY + result.m_y0 / imgDownscaleFactor,
+ result.m_w / imgDownscaleFactor,
+ lineThickness,
+ COLOR_GREEN);
/* Bot line. */
- hal_lcd_display_box(imgStartX + result.m_x0/imgDownscaleFactor,
- imgStartY + (result.m_y0 + result.m_h)/imgDownscaleFactor - lineThickness,
- result.m_w/imgDownscaleFactor, lineThickness, COLOR_GREEN);
+ hal_lcd_display_box(imgStartX + result.m_x0 / imgDownscaleFactor,
+ imgStartY + (result.m_y0 + result.m_h) / imgDownscaleFactor -
+ lineThickness,
+ result.m_w / imgDownscaleFactor,
+ lineThickness,
+ COLOR_GREEN);
/* Left line. */
- hal_lcd_display_box(imgStartX + result.m_x0/imgDownscaleFactor,
- imgStartY + result.m_y0/imgDownscaleFactor,
- lineThickness, result.m_h/imgDownscaleFactor, COLOR_GREEN);
+ hal_lcd_display_box(imgStartX + result.m_x0 / imgDownscaleFactor,
+ imgStartY + result.m_y0 / imgDownscaleFactor,
+ lineThickness,
+ result.m_h / imgDownscaleFactor,
+ COLOR_GREEN);
/* Right line. */
- hal_lcd_display_box(imgStartX + (result.m_x0 + result.m_w)/imgDownscaleFactor - lineThickness,
- imgStartY + result.m_y0/imgDownscaleFactor,
- lineThickness, result.m_h/imgDownscaleFactor, COLOR_GREEN);
+ hal_lcd_display_box(imgStartX + (result.m_x0 + result.m_w) / imgDownscaleFactor -
+ lineThickness,
+ imgStartY + result.m_y0 / imgDownscaleFactor,
+ lineThickness,
+ result.m_h / imgDownscaleFactor,
+ COLOR_GREEN);
}
}