summaryrefslogtreecommitdiff
path: root/source/application/api/use_case/object_detection
diff options
context:
space:
mode:
Diffstat (limited to 'source/application/api/use_case/object_detection')
-rw-r--r--source/application/api/use_case/object_detection/CMakeLists.txt40
-rw-r--r--source/application/api/use_case/object_detection/include/DetectionResult.hpp61
-rw-r--r--source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp125
-rw-r--r--source/application/api/use_case/object_detection/include/DetectorPreProcessing.hpp60
-rw-r--r--source/application/api/use_case/object_detection/include/YoloFastestModel.hpp56
-rw-r--r--source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc240
-rw-r--r--source/application/api/use_case/object_detection/src/DetectorPreProcessing.cc52
-rw-r--r--source/application/api/use_case/object_detection/src/YoloFastestModel.cc45
8 files changed, 679 insertions, 0 deletions
diff --git a/source/application/api/use_case/object_detection/CMakeLists.txt b/source/application/api/use_case/object_detection/CMakeLists.txt
new file mode 100644
index 0000000..797ff55
--- /dev/null
+++ b/source/application/api/use_case/object_detection/CMakeLists.txt
@@ -0,0 +1,40 @@
+#----------------------------------------------------------------------------
+# Copyright (c) 2022 Arm Limited. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#----------------------------------------------------------------------------
+#########################################################
+# OBJECT DETECTION API library #
+#########################################################
+cmake_minimum_required(VERSION 3.15.6)
+
+set(OBJECT_DETECTION_API_TARGET object_detection_api)
+project(${OBJECT_DETECTION_API_TARGET}
+ DESCRIPTION "Object detection use case API library"
+ LANGUAGES C CXX)
+
+# Create static library
+add_library(${OBJECT_DETECTION_API_TARGET} STATIC
+ src/DetectorPreProcessing.cc
+ src/DetectorPostProcessing.cc
+ src/YoloFastestModel.cc)
+
+target_include_directories(${OBJECT_DETECTION_API_TARGET} PUBLIC include)
+
+target_link_libraries(${OBJECT_DETECTION_API_TARGET} PUBLIC common_api)
+
+message(STATUS "*******************************************************")
+message(STATUS "Library : " ${OBJECT_DETECTION_API_TARGET})
+message(STATUS "CMAKE_SYSTEM_PROCESSOR : " ${CMAKE_SYSTEM_PROCESSOR})
+message(STATUS "*******************************************************")
diff --git a/source/application/api/use_case/object_detection/include/DetectionResult.hpp b/source/application/api/use_case/object_detection/include/DetectionResult.hpp
new file mode 100644
index 0000000..aa74d90
--- /dev/null
+++ b/source/application/api/use_case/object_detection/include/DetectionResult.hpp
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DETECTION_RESULT_HPP
+#define DETECTION_RESULT_HPP
+
+
+namespace arm {
+namespace app {
+namespace object_detection {
+
+ /**
+ * @brief Class representing a single detection result.
+ */
+ class DetectionResult {
+ public:
+ /**
+ * @brief Constructor
+ * @param[in] normalisedVal Result normalized value
+ * @param[in] x0 Top corner x starting point
+ * @param[in] y0 Top corner y starting point
+ * @param[in] w Detection result width
+ * @param[in] h Detection result height
+ **/
+ DetectionResult(double normalisedVal,int x0,int y0, int w,int h) :
+ m_normalisedVal(normalisedVal),
+ m_x0(x0),
+ m_y0(y0),
+ m_w(w),
+ m_h(h)
+ {
+ }
+
+ DetectionResult() = default;
+ ~DetectionResult() = default;
+
+ double m_normalisedVal{0.0};
+ int m_x0{0};
+ int m_y0{0};
+ int m_w{0};
+ int m_h{0};
+ };
+
+} /* namespace object_detection */
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* DETECTION_RESULT_HPP */
diff --git a/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp b/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp
new file mode 100644
index 0000000..30bc123
--- /dev/null
+++ b/source/application/api/use_case/object_detection/include/DetectorPostProcessing.hpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DETECTOR_POST_PROCESSING_HPP
+#define DETECTOR_POST_PROCESSING_HPP
+
+#include "ImageUtils.hpp"
+#include "DetectionResult.hpp"
+#include "YoloFastestModel.hpp"
+#include "BaseProcessing.hpp"
+
+#include <forward_list>
+
+namespace arm {
+namespace app {
+
+namespace object_detection {
+
+ struct Branch {
+ int resolution;
+ int numBox;
+ const float* anchor;
+ int8_t* modelOutput;
+ float scale;
+ int zeroPoint;
+ size_t size;
+ };
+
+ struct Network {
+ int inputWidth;
+ int inputHeight;
+ int numClasses;
+ std::vector<Branch> branches;
+ int topN;
+ };
+
+} /* namespace object_detection */
+
+ /**
+ * @brief Post-processing class for Object Detection use case.
+ * Implements methods declared by BasePostProcess and anything else needed
+ * to populate result vector.
+ */
+ class DetectorPostProcess : public BasePostProcess {
+ public:
+ /**
+ * @brief Constructor.
+ * @param[in] outputTensor0 Pointer to the TFLite Micro output Tensor at index 0.
+ * @param[in] outputTensor1 Pointer to the TFLite Micro output Tensor at index 1.
+ * @param[out] results Vector of detected results.
+ * @param[in] inputImgRows Number of rows in the input image.
+ * @param[in] inputImgCols Number of columns in the input image.
+ * @param[in] threshold Post-processing threshold.
+ * @param[in] nms Non-maximum Suppression threshold.
+ * @param[in] numClasses Number of classes.
+ * @param[in] topN Top N for each class.
+ **/
+ explicit DetectorPostProcess(TfLiteTensor* outputTensor0,
+ TfLiteTensor* outputTensor1,
+ std::vector<object_detection::DetectionResult>& results,
+ int inputImgRows,
+ int inputImgCols,
+ float threshold = 0.5f,
+ float nms = 0.45f,
+ int numClasses = 1,
+ int topN = 0);
+
+ /**
+ * @brief Should perform YOLO post-processing of the result of inference then
+ * populate Detection result data for any later use.
+ * @return true if successful, false otherwise.
+ **/
+ bool DoPostProcess() override;
+
+ private:
+ TfLiteTensor* m_outputTensor0; /* Output tensor index 0 */
+ TfLiteTensor* m_outputTensor1; /* Output tensor index 1 */
+ std::vector<object_detection::DetectionResult>& m_results; /* Single inference results. */
+ int m_inputImgRows; /* Number of rows for model input. */
+ int m_inputImgCols; /* Number of cols for model input. */
+ float m_threshold; /* Post-processing threshold. */
+ float m_nms; /* NMS threshold. */
+ int m_numClasses; /* Number of classes. */
+ int m_topN; /* TopN. */
+ object_detection::Network m_net; /* YOLO network object. */
+
+ /**
+ * @brief Insert the given Detection in the list.
+ * @param[in] detections List of detections.
+ * @param[in] det Detection to be inserted.
+ **/
+ void InsertTopNDetections(std::forward_list<image::Detection>& detections, image::Detection& det);
+
+ /**
+ * @brief Given a Network calculate the detection boxes.
+ * @param[in] net Network.
+ * @param[in] imageWidth Original image width.
+ * @param[in] imageHeight Original image height.
+ * @param[in] threshold Detections threshold.
+ * @param[out] detections Detection boxes.
+ **/
+ void GetNetworkBoxes(object_detection::Network& net,
+ int imageWidth,
+ int imageHeight,
+ float threshold,
+ std::forward_list<image::Detection>& detections);
+ };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* DETECTOR_POST_PROCESSING_HPP */
diff --git a/source/application/api/use_case/object_detection/include/DetectorPreProcessing.hpp b/source/application/api/use_case/object_detection/include/DetectorPreProcessing.hpp
new file mode 100644
index 0000000..4936048
--- /dev/null
+++ b/source/application/api/use_case/object_detection/include/DetectorPreProcessing.hpp
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef DETECTOR_PRE_PROCESSING_HPP
+#define DETECTOR_PRE_PROCESSING_HPP
+
+#include "BaseProcessing.hpp"
+#include "Classifier.hpp"
+
+namespace arm {
+namespace app {
+
+ /**
+ * @brief Pre-processing class for Object detection use case.
+ * Implements methods declared by BasePreProcess and anything else needed
+ * to populate input tensors ready for inference.
+ */
+ class DetectorPreProcess : public BasePreProcess {
+
+ public:
+ /**
+ * @brief Constructor
+ * @param[in] inputTensor Pointer to the TFLite Micro input Tensor.
+ * @param[in] rgb2Gray Convert image from 3 channel RGB to 1 channel grayscale.
+ * @param[in] convertToInt8 Convert the image from uint8 to int8 range.
+ **/
+ explicit DetectorPreProcess(TfLiteTensor* inputTensor, bool rgb2Gray, bool convertToInt8);
+
+ /**
+ * @brief Should perform pre-processing of 'raw' input image data and load it into
+ * TFLite Micro input tensor ready for inference
+ * @param[in] input Pointer to the data that pre-processing will work on.
+ * @param[in] inputSize Size of the input data.
+ * @return true if successful, false otherwise.
+ **/
+ bool DoPreProcess(const void* input, size_t inputSize) override;
+
+ private:
+ TfLiteTensor* m_inputTensor;
+ bool m_rgb2Gray;
+ bool m_convertToInt8;
+ };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* DETECTOR_PRE_PROCESSING_HPP */ \ No newline at end of file
diff --git a/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp b/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp
new file mode 100644
index 0000000..4c64433
--- /dev/null
+++ b/source/application/api/use_case/object_detection/include/YoloFastestModel.hpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef YOLO_FASTEST_MODEL_HPP
+#define YOLO_FASTEST_MODEL_HPP
+
+#include "Model.hpp"
+
+extern const int originalImageSize;
+extern const int channelsImageDisplayed;
+extern const float anchor1[];
+extern const float anchor2[];
+
+namespace arm {
+namespace app {
+
+ class YoloFastestModel : public Model {
+
+ public:
+ /* Indices for the expected model - based on input tensor shape */
+ static constexpr uint32_t ms_inputRowsIdx = 1;
+ static constexpr uint32_t ms_inputColsIdx = 2;
+ static constexpr uint32_t ms_inputChannelsIdx = 3;
+
+ protected:
+ /** @brief Gets the reference to op resolver interface class. */
+ const tflite::MicroOpResolver& GetOpResolver() override;
+
+ /** @brief Adds operations to the op resolver instance. */
+ bool EnlistOperations() override;
+
+ private:
+ /* Maximum number of individual operations that can be enlisted. */
+ static constexpr int ms_maxOpCnt = 8;
+
+ /* A mutable op resolver instance. */
+ tflite::MicroMutableOpResolver<ms_maxOpCnt> m_opResolver;
+ };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* YOLO_FASTEST_MODEL_HPP */
diff --git a/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc b/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
new file mode 100644
index 0000000..fb1606a
--- /dev/null
+++ b/source/application/api/use_case/object_detection/src/DetectorPostProcessing.cc
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DetectorPostProcessing.hpp"
+#include "PlatformMath.hpp"
+
+#include <cmath>
+
+namespace arm {
+namespace app {
+
+ DetectorPostProcess::DetectorPostProcess(
+ TfLiteTensor* modelOutput0,
+ TfLiteTensor* modelOutput1,
+ std::vector<object_detection::DetectionResult>& results,
+ int inputImgRows,
+ int inputImgCols,
+ const float threshold,
+ const float nms,
+ int numClasses,
+ int topN)
+ : m_outputTensor0{modelOutput0},
+ m_outputTensor1{modelOutput1},
+ m_results{results},
+ m_inputImgRows{inputImgRows},
+ m_inputImgCols{inputImgCols},
+ m_threshold(threshold),
+ m_nms(nms),
+ m_numClasses(numClasses),
+ m_topN(topN)
+{
+ /* Init PostProcessing */
+ this->m_net =
+ object_detection::Network {
+ .inputWidth = inputImgCols,
+ .inputHeight = inputImgRows,
+ .numClasses = numClasses,
+ .branches = {
+ object_detection::Branch {
+ .resolution = inputImgCols/32,
+ .numBox = 3,
+ .anchor = anchor1,
+ .modelOutput = this->m_outputTensor0->data.int8,
+ .scale = (static_cast<TfLiteAffineQuantization*>(
+ this->m_outputTensor0->quantization.params))->scale->data[0],
+ .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
+ this->m_outputTensor0->quantization.params))->zero_point->data[0],
+ .size = this->m_outputTensor0->bytes
+ },
+ object_detection::Branch {
+ .resolution = inputImgCols/16,
+ .numBox = 3,
+ .anchor = anchor2,
+ .modelOutput = this->m_outputTensor1->data.int8,
+ .scale = (static_cast<TfLiteAffineQuantization*>(
+ this->m_outputTensor1->quantization.params))->scale->data[0],
+ .zeroPoint = (static_cast<TfLiteAffineQuantization*>(
+ this->m_outputTensor1->quantization.params))->zero_point->data[0],
+ .size = this->m_outputTensor1->bytes
+ }
+ },
+ .topN = m_topN
+ };
+ /* End init */
+}
+
+bool DetectorPostProcess::DoPostProcess()
+{
+ /* Start postprocessing */
+ int originalImageWidth = originalImageSize;
+ int originalImageHeight = originalImageSize;
+
+ std::forward_list<image::Detection> detections;
+ GetNetworkBoxes(this->m_net, originalImageWidth, originalImageHeight, m_threshold, detections);
+
+ /* Do nms */
+ CalculateNMS(detections, this->m_net.numClasses, m_nms);
+
+ for (auto& it: detections) {
+ float xMin = it.bbox.x - it.bbox.w / 2.0f;
+ float xMax = it.bbox.x + it.bbox.w / 2.0f;
+ float yMin = it.bbox.y - it.bbox.h / 2.0f;
+ float yMax = it.bbox.y + it.bbox.h / 2.0f;
+
+ if (xMin < 0) {
+ xMin = 0;
+ }
+ if (yMin < 0) {
+ yMin = 0;
+ }
+ if (xMax > originalImageWidth) {
+ xMax = originalImageWidth;
+ }
+ if (yMax > originalImageHeight) {
+ yMax = originalImageHeight;
+ }
+
+ float boxX = xMin;
+ float boxY = yMin;
+ float boxWidth = xMax - xMin;
+ float boxHeight = yMax - yMin;
+
+ for (int j = 0; j < this->m_net.numClasses; ++j) {
+ if (it.prob[j] > 0) {
+
+ object_detection::DetectionResult tmpResult = {};
+ tmpResult.m_normalisedVal = it.prob[j];
+ tmpResult.m_x0 = boxX;
+ tmpResult.m_y0 = boxY;
+ tmpResult.m_w = boxWidth;
+ tmpResult.m_h = boxHeight;
+
+ this->m_results.push_back(tmpResult);
+ }
+ }
+ }
+ return true;
+}
+
+void DetectorPostProcess::InsertTopNDetections(std::forward_list<image::Detection>& detections, image::Detection& det)
+{
+ std::forward_list<image::Detection>::iterator it;
+ std::forward_list<image::Detection>::iterator last_it;
+ for ( it = detections.begin(); it != detections.end(); ++it ) {
+ if(it->objectness > det.objectness)
+ break;
+ last_it = it;
+ }
+ if(it != detections.begin()) {
+ detections.emplace_after(last_it, det);
+ detections.pop_front();
+ }
+}
+
+void DetectorPostProcess::GetNetworkBoxes(
+ object_detection::Network& net,
+ int imageWidth,
+ int imageHeight,
+ float threshold,
+ std::forward_list<image::Detection>& detections)
+{
+ int numClasses = net.numClasses;
+ int num = 0;
+ auto det_objectness_comparator = [](image::Detection& pa, image::Detection& pb) {
+ return pa.objectness < pb.objectness;
+ };
+ for (size_t i = 0; i < net.branches.size(); ++i) {
+ int height = net.branches[i].resolution;
+ int width = net.branches[i].resolution;
+ int channel = net.branches[i].numBox*(5+numClasses);
+
+ for (int h = 0; h < net.branches[i].resolution; h++) {
+ for (int w = 0; w < net.branches[i].resolution; w++) {
+ for (int anc = 0; anc < net.branches[i].numBox; anc++) {
+
+ /* Objectness score */
+ int bbox_obj_offset = h * width * channel + w * channel + anc * (numClasses + 5) + 4;
+ float objectness = math::MathUtils::SigmoidF32(
+ (static_cast<float>(net.branches[i].modelOutput[bbox_obj_offset])
+ - net.branches[i].zeroPoint
+ ) * net.branches[i].scale);
+
+ if(objectness > threshold) {
+ image::Detection det;
+ det.objectness = objectness;
+ /* Get bbox prediction data for each anchor, each feature point */
+ int bbox_x_offset = bbox_obj_offset -4;
+ int bbox_y_offset = bbox_x_offset + 1;
+ int bbox_w_offset = bbox_x_offset + 2;
+ int bbox_h_offset = bbox_x_offset + 3;
+ int bbox_scores_offset = bbox_x_offset + 5;
+
+ det.bbox.x = (static_cast<float>(net.branches[i].modelOutput[bbox_x_offset])
+ - net.branches[i].zeroPoint) * net.branches[i].scale;
+ det.bbox.y = (static_cast<float>(net.branches[i].modelOutput[bbox_y_offset])
+ - net.branches[i].zeroPoint) * net.branches[i].scale;
+ det.bbox.w = (static_cast<float>(net.branches[i].modelOutput[bbox_w_offset])
+ - net.branches[i].zeroPoint) * net.branches[i].scale;
+ det.bbox.h = (static_cast<float>(net.branches[i].modelOutput[bbox_h_offset])
+ - net.branches[i].zeroPoint) * net.branches[i].scale;
+
+ float bbox_x, bbox_y;
+
+ /* Eliminate grid sensitivity trick involved in YOLOv4 */
+ bbox_x = math::MathUtils::SigmoidF32(det.bbox.x);
+ bbox_y = math::MathUtils::SigmoidF32(det.bbox.y);
+ det.bbox.x = (bbox_x + w) / width;
+ det.bbox.y = (bbox_y + h) / height;
+
+ det.bbox.w = std::exp(det.bbox.w) * net.branches[i].anchor[anc*2] / net.inputWidth;
+ det.bbox.h = std::exp(det.bbox.h) * net.branches[i].anchor[anc*2+1] / net.inputHeight;
+
+ for (int s = 0; s < numClasses; s++) {
+ float sig = math::MathUtils::SigmoidF32(
+ (static_cast<float>(net.branches[i].modelOutput[bbox_scores_offset + s]) -
+ net.branches[i].zeroPoint) * net.branches[i].scale
+ ) * objectness;
+ det.prob.emplace_back((sig > threshold) ? sig : 0);
+ }
+
+ /* Correct_YOLO_boxes */
+ det.bbox.x *= imageWidth;
+ det.bbox.w *= imageWidth;
+ det.bbox.y *= imageHeight;
+ det.bbox.h *= imageHeight;
+
+ if (num < net.topN || net.topN <=0) {
+ detections.emplace_front(det);
+ num += 1;
+ } else if (num == net.topN) {
+ detections.sort(det_objectness_comparator);
+ InsertTopNDetections(detections,det);
+ num += 1;
+ } else {
+ InsertTopNDetections(detections,det);
+ }
+ }
+ }
+ }
+ }
+ }
+ if(num > net.topN)
+ num -=1;
+}
+
+} /* namespace app */
+} /* namespace arm */
diff --git a/source/application/api/use_case/object_detection/src/DetectorPreProcessing.cc b/source/application/api/use_case/object_detection/src/DetectorPreProcessing.cc
new file mode 100644
index 0000000..7212046
--- /dev/null
+++ b/source/application/api/use_case/object_detection/src/DetectorPreProcessing.cc
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "DetectorPreProcessing.hpp"
+#include "ImageUtils.hpp"
+#include "log_macros.h"
+
+namespace arm {
+namespace app {
+
+ DetectorPreProcess::DetectorPreProcess(TfLiteTensor* inputTensor, bool rgb2Gray, bool convertToInt8)
+ : m_inputTensor{inputTensor},
+ m_rgb2Gray{rgb2Gray},
+ m_convertToInt8{convertToInt8}
+ {}
+
+ bool DetectorPreProcess::DoPreProcess(const void* data, size_t inputSize) {
+ if (data == nullptr) {
+ printf_err("Data pointer is null");
+ }
+
+ auto input = static_cast<const uint8_t*>(data);
+
+ if (this->m_rgb2Gray) {
+ image::RgbToGrayscale(input, this->m_inputTensor->data.uint8, this->m_inputTensor->bytes);
+ } else {
+ std::memcpy(this->m_inputTensor->data.data, input, inputSize);
+ }
+ debug("Input tensor populated \n");
+
+ if (this->m_convertToInt8) {
+ image::ConvertImgToInt8(this->m_inputTensor->data.data, this->m_inputTensor->bytes);
+ }
+
+ return true;
+ }
+
+} /* namespace app */
+} /* namespace arm */ \ No newline at end of file
diff --git a/source/application/api/use_case/object_detection/src/YoloFastestModel.cc b/source/application/api/use_case/object_detection/src/YoloFastestModel.cc
new file mode 100644
index 0000000..e293181
--- /dev/null
+++ b/source/application/api/use_case/object_detection/src/YoloFastestModel.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2022 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "YoloFastestModel.hpp"
+
+#include "log_macros.h"
+
+const tflite::MicroOpResolver& arm::app::YoloFastestModel::GetOpResolver()
+{
+ return this->m_opResolver;
+}
+
+bool arm::app::YoloFastestModel::EnlistOperations()
+{
+ this->m_opResolver.AddDepthwiseConv2D();
+ this->m_opResolver.AddConv2D();
+ this->m_opResolver.AddAdd();
+ this->m_opResolver.AddResizeNearestNeighbor();
+ /*These are needed for UT to work, not needed on FVP */
+ this->m_opResolver.AddPad();
+ this->m_opResolver.AddMaxPool2D();
+ this->m_opResolver.AddConcatenation();
+
+ if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
+ info("Added %s support to op resolver\n",
+ tflite::GetString_ETHOSU());
+ } else {
+ printf_err("Failed to add Arm NPU support to op resolver.");
+ return false;
+ }
+ return true;
+}