From 3107aa2152de9be8317e62da1d0327bcad6552e2 Mon Sep 17 00:00:00 2001 From: Isabella Gottardi Date: Thu, 27 Jan 2022 16:39:37 +0000 Subject: MLECO-2873: Object detection usecase follow-up Change-Id: Ic14e93a50fb7b3f3cfd9497bac1280794cc0fc15 Signed-off-by: Isabella Gottardi --- .../ExpectedObjectDetectionResults.cpp | 66 -------------- .../object_detection/InferenceTestYoloFastest.cc | 101 +++++++++++++-------- .../object_detection/ObjectDetectionUCTest.cc | 14 +-- .../include/ExpectedObjectDetectionResults.hpp | 26 ------ 4 files changed, 69 insertions(+), 138 deletions(-) delete mode 100644 tests/use_case/object_detection/ExpectedObjectDetectionResults.cpp delete mode 100644 tests/use_case/object_detection/include/ExpectedObjectDetectionResults.hpp (limited to 'tests') diff --git a/tests/use_case/object_detection/ExpectedObjectDetectionResults.cpp b/tests/use_case/object_detection/ExpectedObjectDetectionResults.cpp deleted file mode 100644 index 2c69057..0000000 --- a/tests/use_case/object_detection/ExpectedObjectDetectionResults.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2022 Arm Limited. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "ExpectedObjectDetectionResults.hpp" - - -/* -//Reference results -Got 2 boxes -0) (0.999246) -> Detection box: {x=89,y=17,w=41,h=56} -1) (0.995367) -> Detection box: {x=27,y=81,w=48,h=53} -Entering TestInference -Got 1 boxes -0) (0.998107) -> Detection box: {x=87,y=35,w=53,h=64} -Entering TestInference -Got 2 boxes -0) (0.999244) -> Detection box: {x=105,y=73,w=58,h=66} -1) (0.985984) -> Detection box: {x=34,y=40,w=70,h=95} -Entering TestInference -Got 2 boxes -0) (0.993294) -> Detection box: {x=22,y=43,w=39,h=53} -1) (0.992021) -> Detection box: {x=63,y=60,w=38,h=45} - -*/ - -void get_expected_ut_results(std::vector> &expected_results) -{ - - expected_results.resize(4); - - std::vector img_1(2); - std::vector img_2(1); - std::vector img_3(2); - std::vector img_4(2); - - img_1[0] = arm::app::DetectionResult(0.99,89,17,41,56); - img_1[1] = arm::app::DetectionResult(0.99,27,81,48,53); - - img_2[0] = arm::app::DetectionResult(0.99,87,35,53,64); - - img_3[0] = arm::app::DetectionResult(0.99,105,73,58,66); - img_3[1] = arm::app::DetectionResult(0.98,34,40,70,95); - - img_4[0] = arm::app::DetectionResult(0.99,22,43,39,53); - img_4[1] = arm::app::DetectionResult(0.99,63,60,38,45); - - expected_results[0] = img_1; - expected_results[1] = img_2; - expected_results[2] = img_3; - expected_results[3] = img_4; - - -} diff --git a/tests/use_case/object_detection/InferenceTestYoloFastest.cc b/tests/use_case/object_detection/InferenceTestYoloFastest.cc index e6ae573..e5a5efe 100644 --- a/tests/use_case/object_detection/InferenceTestYoloFastest.cc +++ b/tests/use_case/object_detection/InferenceTestYoloFastest.cc @@ -21,22 +21,52 @@ #include "DetectorPostProcessing.hpp" #include "InputFiles.hpp" #include "UseCaseCommonUtils.hpp" -#include "DetectionUseCaseUtils.hpp" -#include "ExpectedObjectDetectionResults.hpp" #include +void GetExpectedResults(std::vector> &expected_results) +{ + /* Img1 + 0) (0.999246) -> Detection box: {x=89,y=17,w=41,h=56} + 1) (0.995367) -> Detection box: {x=27,y=81,w=48,h=53} + */ + expected_results.push_back({ + arm::app::object_detection::DetectionResult(0.99,89,17,41,56), + arm::app::object_detection::DetectionResult(0.99,27,81,48,53) + }); + /* Img2 + 0) (0.998107) -> Detection box: {x=87,y=35,w=53,h=64} + */ + expected_results.push_back({ + arm::app::object_detection::DetectionResult(0.99,87,35,53,64) + }); + /* Img3 + 0) (0.999244) -> Detection box: {x=105,y=73,w=58,h=66} + 1) (0.985984) -> Detection box: {x=34,y=40,w=70,h=95} + */ + expected_results.push_back({ + arm::app::object_detection::DetectionResult(0.99,105,73,58,66), + arm::app::object_detection::DetectionResult(0.98,34,40,70,95) + }); + /* Img4 + 0) (0.993294) -> Detection box: {x=22,y=43,w=39,h=53} + 1) (0.992021) -> Detection box: {x=63,y=60,w=38,h=45} + */ + expected_results.push_back({ + arm::app::object_detection::DetectionResult(0.99,22,43,39,53), + arm::app::object_detection::DetectionResult(0.99,63,60,38,45) + }); +} bool RunInference(arm::app::Model& model, const uint8_t imageData[]) { TfLiteTensor* inputTensor = model.GetInputTensor(0); REQUIRE(inputTensor); - const size_t copySz = inputTensor->bytes < (INPUT_IMAGE_WIDTH*INPUT_IMAGE_HEIGHT) ? - inputTensor->bytes : - (INPUT_IMAGE_WIDTH*INPUT_IMAGE_HEIGHT); + const size_t copySz = inputTensor->bytes < IMAGE_DATA_SIZE ? + inputTensor->bytes : IMAGE_DATA_SIZE; - arm::app::RgbToGrayscale(imageData,inputTensor->data.uint8,INPUT_IMAGE_WIDTH,INPUT_IMAGE_HEIGHT); + image::RgbToGrayscale(imageData,inputTensor->data.uint8,copySz); if(model.IsDataSigned()){ convertImgIoInt8(inputTensor->data.data, copySz); @@ -46,51 +76,48 @@ bool RunInference(arm::app::Model& model, const uint8_t imageData[]) } template -void TestInference(int imageIdx, arm::app::Model& model, T tolerance) { - - info("Entering TestInference for image %d \n", imageIdx); +void TestInferenceDetectionResults(int imageIdx, arm::app::Model& model, T tolerance) { - std::vector results; + std::vector results; auto image = get_img_array(imageIdx); + TfLiteIntArray* inputShape = model.GetInputShape(0); + auto nCols = inputShape->data[arm::app::YoloFastestModel::ms_inputColsIdx]; + auto nRows = inputShape->data[arm::app::YoloFastestModel::ms_inputRowsIdx]; + REQUIRE(RunInference(model, image)); - TfLiteTensor* output_arr[2] = {nullptr,nullptr}; - output_arr[0] = model.GetOutputTensor(0); - output_arr[1] = model.GetOutputTensor(1); - - for (int i =0; i < 2; i++) { - REQUIRE(output_arr[i]); + std::vector output_arr{model.GetOutputTensor(0), model.GetOutputTensor(1)}; + for (size_t i =0; i < output_arr.size(); i++) { + REQUIRE(output_arr[i]); REQUIRE(tflite::GetTensorData(output_arr[i])); } - RunPostProcessing(NULL,output_arr,results); - - info("Got %ld boxes \n",results.size()); - - std::vector> expected_results; - get_expected_ut_results(expected_results); - - /*validate got the same number of boxes */ + arm::app::object_detection::DetectorPostprocessing postp; + postp.RunPostProcessing( + nullptr, + nRows, + nCols, + output_arr[0], + output_arr[1], + results); + + std::vector> expected_results; + GetExpectedResults(expected_results); + + /* Validate got the same number of boxes */ REQUIRE(results.size() == expected_results[imageIdx].size()); - - - for (int i=0; i < (int)results.size(); i++) { - - info("%" PRIu32 ") (%f) -> %s {x=%d,y=%d,w=%d,h=%d}\n", (int)i, - results[i].m_normalisedVal, "Detection box:", - results[i].m_x0, results[i].m_y0, results[i].m_w, results[i].m_h ); - /*validate confidence and box dimensions */ - REQUIRE(fabs(results[i].m_normalisedVal - expected_results[imageIdx][i].m_normalisedVal) < 0.1); + + for (int i=0; i < (int)results.size(); i++) { + /* Validate confidence and box dimensions */ + REQUIRE(std::abs(results[i].m_normalisedVal - expected_results[imageIdx][i].m_normalisedVal) < 0.1); REQUIRE(static_cast(results[i].m_x0) == Approx(static_cast((T)expected_results[imageIdx][i].m_x0)).epsilon(tolerance)); REQUIRE(static_cast(results[i].m_y0) == Approx(static_cast((T)expected_results[imageIdx][i].m_y0)).epsilon(tolerance)); REQUIRE(static_cast(results[i].m_w) == Approx(static_cast((T)expected_results[imageIdx][i].m_w)).epsilon(tolerance)); REQUIRE(static_cast(results[i].m_h) == Approx(static_cast((T)expected_results[imageIdx][i].m_h)).epsilon(tolerance)); } - - } @@ -105,7 +132,7 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and YoloFastest", "[Yolo REQUIRE(model.IsInited()); for (uint32_t i = 0 ; i < NUMBER_OF_FILES; ++i) { - TestInference(i, model, 1); + TestInferenceDetectionResults(i, model, 1); } } @@ -118,7 +145,7 @@ TEST_CASE("Running inference with TensorFlow Lite Micro and YoloFastest", "[Yolo REQUIRE(model.Init()); REQUIRE(model.IsInited()); - TestInference(i, model, 1); + TestInferenceDetectionResults(i, model, 1); } } } diff --git a/tests/use_case/object_detection/ObjectDetectionUCTest.cc b/tests/use_case/object_detection/ObjectDetectionUCTest.cc index 0a0486e..2e63f36 100644 --- a/tests/use_case/object_detection/ObjectDetectionUCTest.cc +++ b/tests/use_case/object_detection/ObjectDetectionUCTest.cc @@ -15,7 +15,7 @@ * limitations under the License. */ #include "DetectionResult.hpp" -//include "Detector.hpp" +#include "DetectorPostProcessing.hpp" #include "hal.h" #include "YoloFastestModel.hpp" #include "UseCaseHandler.hpp" @@ -25,7 +25,6 @@ TEST_CASE("Model info") { -printf("Entering Model info \n"); /* Model wrapper object. */ arm::app::YoloFastestModel model; @@ -43,7 +42,6 @@ printf("Entering Model info \n"); TEST_CASE("Inference by index") { -printf("Entering Inference by index \n"); hal_platform platform; data_acq_module data_acq; data_psn_module data_psn; @@ -67,17 +65,15 @@ printf("Entering Inference by index \n"); caseContext.Set("platform", platform); caseContext.Set("model", model); caseContext.Set("imgIndex", 0); + arm::app::object_detection::DetectorPostprocessing postp; + caseContext.Set("postprocess", postp); REQUIRE(arm::app::ObjectDetectionHandler(caseContext, 0, false)); - - auto results = caseContext.Get>("results"); - } TEST_CASE("Inference run all images") { - printf("Entering Inference run all images \n"); hal_platform platform; data_acq_module data_acq; data_psn_module data_psn; @@ -101,15 +97,15 @@ TEST_CASE("Inference run all images") caseContext.Set("platform", platform); caseContext.Set("model", model); caseContext.Set("imgIndex", 0); + arm::app::object_detection::DetectorPostprocessing postp; + caseContext.Set("postprocess", postp); REQUIRE(arm::app::ObjectDetectionHandler(caseContext, 0, true)); - } TEST_CASE("List all images") { -printf("Entering List all images \n"); hal_platform platform; data_acq_module data_acq; data_psn_module data_psn; diff --git a/tests/use_case/object_detection/include/ExpectedObjectDetectionResults.hpp b/tests/use_case/object_detection/include/ExpectedObjectDetectionResults.hpp deleted file mode 100644 index 09edc00..0000000 --- a/tests/use_case/object_detection/include/ExpectedObjectDetectionResults.hpp +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2022 Arm Limited. All rights reserved. - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef EXPECTED_OBJECT_DETECTION_RESULTS_H -#define EXPECTED_OBJECT_DETECTION_RESULTS_H - -#include "DetectionResult.hpp" -#include - -void get_expected_ut_results(std::vector> &expected_results); - - -#endif /* EXPECTED_OBJECT_DETECTION_RESULTS_H */ -- cgit v1.2.1