/* * Copyright (c) 2022 Arm Limited. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "UseCaseHandler.hpp" #include "InputFiles.hpp" #include "YoloFastestModel.hpp" #include "UseCaseCommonUtils.hpp" #include "DetectorPostProcessing.hpp" #include "hal.h" #include "log_macros.h" #include namespace arm { namespace app { /** * @brief Presents inference results along using the data presentation * object. * @param[in] results Vector of detection results to be displayed. * @return true if successful, false otherwise. **/ static bool PresentInferenceResult(const std::vector& results); /** * @brief Draw boxes directly on the LCD for all detected objects. * @param[in] results Vector of detection results to be displayed. * @param[in] imageStartX X coordinate where the image starts on the LCD. * @param[in] imageStartY Y coordinate where the image starts on the LCD. * @param[in] imgDownscaleFactor How much image has been downscaled on LCD. **/ static void DrawDetectionBoxes( const std::vector& results, uint32_t imgStartX, uint32_t imgStartY, uint32_t imgDownscaleFactor); /* Object detection classification handler. */ bool ObjectDetectionHandler(ApplicationContext& ctx, uint32_t imgIndex, bool runAll) { auto& profiler = ctx.Get("profiler"); constexpr uint32_t dataPsnImgDownscaleFactor = 1; constexpr uint32_t dataPsnImgStartX = 10; constexpr uint32_t dataPsnImgStartY = 35; constexpr uint32_t dataPsnTxtInfStartX = 150; constexpr uint32_t dataPsnTxtInfStartY = 40; hal_lcd_clear(COLOR_BLACK); auto& model = ctx.Get("model"); /* If the request has a valid size, set the image index. */ if (imgIndex < NUMBER_OF_FILES) { if (!SetAppCtxIfmIdx(ctx, imgIndex, "imgIndex")) { return false; } } if (!model.IsInited()) { printf_err("Model is not initialised! Terminating processing.\n"); return false; } auto curImIdx = ctx.Get("imgIndex"); TfLiteTensor* inputTensor = model.GetInputTensor(0); if (!inputTensor->dims) { printf_err("Invalid input tensor dims\n"); return false; } else if (inputTensor->dims->size < 3) { printf_err("Input tensor dimension should be >= 3\n"); return false; } TfLiteIntArray* inputShape = model.GetInputShape(0); const uint32_t nCols = inputShape->data[arm::app::YoloFastestModel::ms_inputColsIdx]; const uint32_t nRows = inputShape->data[arm::app::YoloFastestModel::ms_inputRowsIdx]; /* Get pre/post-processing objects. */ auto& postp = ctx.Get("postprocess"); do { /* Strings for presentation/logging. */ std::string str_inf{"Running inference... "}; const uint8_t* curr_image = get_img_array(ctx.Get("imgIndex")); /* Copy over the data and convert to grayscale */ auto* dstPtr = static_cast(inputTensor->data.uint8); const size_t copySz = inputTensor->bytes < IMAGE_DATA_SIZE ? inputTensor->bytes : IMAGE_DATA_SIZE; /* Convert to gray scale and populate input tensor. */ image::RgbToGrayscale(curr_image, dstPtr, copySz); /* Display image on the LCD. */ hal_lcd_display_image( (channelsImageDisplayed == 3) ? curr_image : dstPtr, nCols, nRows, channelsImageDisplayed, dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor); /* If the data is signed. */ if (model.IsDataSigned()) { image::ConvertImgToInt8(inputTensor->data.data, inputTensor->bytes); } /* Display message on the LCD - inference running. */ hal_lcd_display_text(str_inf.c_str(), str_inf.size(), dataPsnTxtInfStartX, dataPsnTxtInfStartY, false); /* Run inference over this image. */ info("Running inference on image %" PRIu32 " => %s\n", ctx.Get("imgIndex"), get_filename(ctx.Get("imgIndex"))); if (!RunInference(model, profiler)) { return false; } /* Erase. */ str_inf = std::string(str_inf.size(), ' '); hal_lcd_display_text(str_inf.c_str(), str_inf.size(), dataPsnTxtInfStartX, dataPsnTxtInfStartY, false); /* Detector post-processing*/ std::vector results; TfLiteTensor* modelOutput0 = model.GetOutputTensor(0); TfLiteTensor* modelOutput1 = model.GetOutputTensor(1); postp.RunPostProcessing( nRows, nCols, modelOutput0, modelOutput1, results); /* Draw boxes. */ DrawDetectionBoxes(results, dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor); #if VERIFY_TEST_OUTPUT arm::app::DumpTensor(modelOutput0); arm::app::DumpTensor(modelOutput1); #endif /* VERIFY_TEST_OUTPUT */ if (!PresentInferenceResult(results)) { return false; } profiler.PrintProfilingResult(); IncrementAppCtxIfmIdx(ctx,"imgIndex"); } while (runAll && ctx.Get("imgIndex") != curImIdx); return true; } static bool PresentInferenceResult(const std::vector& results) { hal_lcd_set_text_color(COLOR_GREEN); /* If profiling is enabled, and the time is valid. */ info("Final results:\n"); info("Total number of inferences: 1\n"); for (uint32_t i = 0; i < results.size(); ++i) { info("%" PRIu32 ") (%f) -> %s {x=%d,y=%d,w=%d,h=%d}\n", i, results[i].m_normalisedVal, "Detection box:", results[i].m_x0, results[i].m_y0, results[i].m_w, results[i].m_h ); } return true; } static void DrawDetectionBoxes(const std::vector& results, uint32_t imgStartX, uint32_t imgStartY, uint32_t imgDownscaleFactor) { uint32_t lineThickness = 1; for (const auto& result: results) { /* Top line. */ hal_lcd_display_box(imgStartX + result.m_x0/imgDownscaleFactor, imgStartY + result.m_y0/imgDownscaleFactor, result.m_w/imgDownscaleFactor, lineThickness, COLOR_GREEN); /* Bot line. */ hal_lcd_display_box(imgStartX + result.m_x0/imgDownscaleFactor, imgStartY + (result.m_y0 + result.m_h)/imgDownscaleFactor - lineThickness, result.m_w/imgDownscaleFactor, lineThickness, COLOR_GREEN); /* Left line. */ hal_lcd_display_box(imgStartX + result.m_x0/imgDownscaleFactor, imgStartY + result.m_y0/imgDownscaleFactor, lineThickness, result.m_h/imgDownscaleFactor, COLOR_GREEN); /* Right line. */ hal_lcd_display_box(imgStartX + (result.m_x0 + result.m_w)/imgDownscaleFactor - lineThickness, imgStartY + result.m_y0/imgDownscaleFactor, lineThickness, result.m_h/imgDownscaleFactor, COLOR_GREEN); } } } /* namespace app */ } /* namespace arm */