aboutsummaryrefslogtreecommitdiff
path: root/samples/ObjectDetection/src
diff options
context:
space:
mode:
Diffstat (limited to 'samples/ObjectDetection/src')
-rw-r--r--samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp140
-rw-r--r--samples/ObjectDetection/src/CmdArgsParser.cpp70
-rw-r--r--samples/ObjectDetection/src/CvVideoFileWriter.cpp38
-rw-r--r--samples/ObjectDetection/src/CvVideoFrameReader.cpp98
-rw-r--r--samples/ObjectDetection/src/CvWindowOutput.cpp33
-rw-r--r--samples/ObjectDetection/src/ImageUtils.cpp6
-rw-r--r--samples/ObjectDetection/src/Main.cpp54
-rw-r--r--samples/ObjectDetection/src/ObjectDetectionPipeline.cpp (renamed from samples/ObjectDetection/src/NetworkPipeline.cpp)16
-rw-r--r--samples/ObjectDetection/src/SSDResultDecoder.cpp6
-rw-r--r--samples/ObjectDetection/src/YoloResultDecoder.cpp8
10 files changed, 57 insertions, 412 deletions
diff --git a/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp b/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp
deleted file mode 100644
index cb4c0c9f84..0000000000
--- a/samples/ObjectDetection/src/ArmnnNetworkExecutor.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ArmnnNetworkExecutor.hpp"
-#include "Types.hpp"
-
-#include <random>
-#include <string>
-
-namespace od
-{
-
-armnn::DataType ArmnnNetworkExecutor::GetInputDataType() const
-{
- return m_inputBindingInfo.second.GetDataType();
-}
-
-ArmnnNetworkExecutor::ArmnnNetworkExecutor(std::string& modelPath,
- std::vector<armnn::BackendId>& preferredBackends)
-: m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions()))
-{
- // Import the TensorFlow lite model.
- armnnTfLiteParser::ITfLiteParserPtr parser = armnnTfLiteParser::ITfLiteParser::Create();
- armnn::INetworkPtr network = parser->CreateNetworkFromBinaryFile(modelPath.c_str());
-
- std::vector<std::string> inputNames = parser->GetSubgraphInputTensorNames(0);
-
- m_inputBindingInfo = parser->GetNetworkInputBindingInfo(0, inputNames[0]);
-
- m_outputLayerNamesList = parser->GetSubgraphOutputTensorNames(0);
-
- std::vector<armnn::BindingPointInfo> outputBindings;
- for(const std::string& name : m_outputLayerNamesList)
- {
- m_outputBindingInfo.push_back(std::move(parser->GetNetworkOutputBindingInfo(0, name)));
- }
-
- std::vector<std::string> errorMessages;
- // optimize the network.
- armnn::IOptimizedNetworkPtr optNet = Optimize(*network,
- preferredBackends,
- m_Runtime->GetDeviceSpec(),
- armnn::OptimizerOptions(),
- armnn::Optional<std::vector<std::string>&>(errorMessages));
-
- if (!optNet)
- {
- const std::string errorMessage{"ArmnnNetworkExecutor: Failed to optimize network"};
- ARMNN_LOG(error) << errorMessage;
- throw armnn::Exception(errorMessage);
- }
-
- // Load the optimized network onto the m_Runtime device
- std::string errorMessage;
- if (armnn::Status::Success != m_Runtime->LoadNetwork(m_NetId, std::move(optNet), errorMessage))
- {
- ARMNN_LOG(error) << errorMessage;
- }
-
- //pre-allocate memory for output (the size of it never changes)
- for (int it = 0; it < m_outputLayerNamesList.size(); ++it)
- {
- const armnn::DataType dataType = m_outputBindingInfo[it].second.GetDataType();
- const armnn::TensorShape& tensorShape = m_outputBindingInfo[it].second.GetShape();
-
- InferenceResult oneLayerOutResult;
- switch (dataType)
- {
- case armnn::DataType::Float32:
- {
- oneLayerOutResult.resize(tensorShape.GetNumElements(), 0);
- break;
- }
- default:
- {
- errorMessage = "ArmnnNetworkExecutor: unsupported output tensor data type";
- ARMNN_LOG(error) << errorMessage << " " << log_as_int(dataType);
- throw armnn::Exception(errorMessage);
- }
- }
-
- m_OutputBuffer.emplace_back(oneLayerOutResult);
-
- // Make ArmNN output tensors
- m_OutputTensors.reserve(m_OutputBuffer.size());
- for (size_t it = 0; it < m_OutputBuffer.size(); ++it)
- {
- m_OutputTensors.emplace_back(std::make_pair(
- m_outputBindingInfo[it].first,
- armnn::Tensor(m_outputBindingInfo[it].second,
- m_OutputBuffer.at(it).data())
- ));
- }
- }
-
-}
-
-void ArmnnNetworkExecutor::PrepareTensors(const void* inputData, const size_t dataBytes)
-{
- assert(m_inputBindingInfo.second.GetNumBytes() >= dataBytes);
- m_InputTensors.clear();
- m_InputTensors = {{ m_inputBindingInfo.first, armnn::ConstTensor(m_inputBindingInfo.second, inputData)}};
-}
-
-bool ArmnnNetworkExecutor::Run(const void* inputData, const size_t dataBytes, InferenceResults& outResults)
-{
- /* Prepare tensors if they are not ready */
- ARMNN_LOG(debug) << "Preparing tensors...";
- this->PrepareTensors(inputData, dataBytes);
- ARMNN_LOG(trace) << "Running inference...";
-
- armnn::Status ret = m_Runtime->EnqueueWorkload(m_NetId, m_InputTensors, m_OutputTensors);
-
- std::stringstream inferenceFinished;
- inferenceFinished << "Inference finished with code {" << log_as_int(ret) << "}\n";
-
- ARMNN_LOG(trace) << inferenceFinished.str();
-
- if (ret == armnn::Status::Failure)
- {
- ARMNN_LOG(error) << "Failed to perform inference.";
- }
-
- outResults.reserve(m_outputLayerNamesList.size());
- outResults = m_OutputBuffer;
-
- return (armnn::Status::Success == ret);
-}
-
-Size ArmnnNetworkExecutor::GetImageAspectRatio()
-{
- const auto shape = m_inputBindingInfo.second.GetShape();
- assert(shape.GetNumDimensions() == 4);
- armnnUtils::DataLayoutIndexed nhwc(armnn::DataLayout::NHWC);
- return Size(shape[nhwc.GetWidthIndex()],
- shape[nhwc.GetHeightIndex()]);
-}
-}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/src/CmdArgsParser.cpp b/samples/ObjectDetection/src/CmdArgsParser.cpp
deleted file mode 100644
index b8c74bc10f..0000000000
--- a/samples/ObjectDetection/src/CmdArgsParser.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "CmdArgsParser.hpp"
-#include <iostream>
-/*
- * Checks that a particular option was specified by the user
- */
-bool CheckOptionSpecified(const std::map<std::string, std::string>& options, const std::string& option)
-{
- auto it = options.find(option);
- return it!=options.end();
-}
-
-/*
- * Retrieves the user provided option
- */
-std::string GetSpecifiedOption(const std::map<std::string, std::string>& options, const std::string& option)
-{
- if (CheckOptionSpecified(options, option)){
- return options.at(option);
- }
- else
- {
- throw std::invalid_argument("Required option: " + option + " not defined.");
- }
-}
-
-/*
- * Parses all the command line options provided by the user and stores in a map.
- */
-int ParseOptions(std::map<std::string, std::string>& options, std::map<std::string, std::string>& acceptedOptions,
- char *argv[], int argc)
-{
- for (int i = 1; i < argc; ++i)
- {
- std::string currentOption = std::string(argv[i]);
- auto it = acceptedOptions.find(currentOption);
- if (it != acceptedOptions.end())
- {
- if (i + 1 < argc && std::string(argv[i + 1]).rfind("--", 0) != 0)
- {
- std::string value = argv[++i];
- options.insert({it->first, value});
- }
- else if (std::string(argv[i]) == HELP)
- {
- std::cout << "Available options" << std::endl;
- for (auto & acceptedOption : acceptedOptions)
- {
- std::cout << acceptedOption.first << " : " << acceptedOption.second << std::endl;
- }
- return 2;
- }
- else
- {
- std::cerr << std::string(argv[i]) << " option requires one argument." << std::endl;
- return 1;
- }
- }
- else
- {
- std::cerr << "Unrecognised option: " << std::string(argv[i]) << std::endl;
- return 1;
- }
- }
- return 0;
-}
diff --git a/samples/ObjectDetection/src/CvVideoFileWriter.cpp b/samples/ObjectDetection/src/CvVideoFileWriter.cpp
deleted file mode 100644
index ab80b95d49..0000000000
--- a/samples/ObjectDetection/src/CvVideoFileWriter.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "CvVideoFileWriter.hpp"
-
-namespace od
-{
-
-void CvVideoFileWriter::Init(const std::string& outputVideo, int encoding, double fps, int width, int height)
-{
- m_ready = m_cvWriter.open(outputVideo, cv::CAP_FFMPEG,
- encoding,
- fps,
- cv::Size(width, height), true);
-}
-
-
-void CvVideoFileWriter::WriteFrame(std::shared_ptr<cv::Mat>& frame)
-{
- if(m_cvWriter.isOpened())
- {
- cv::cvtColor(*frame, *frame, cv::COLOR_RGB2BGR);
- m_cvWriter.write(*frame);
- }
-}
-
-bool CvVideoFileWriter::IsReady() const
-{
- return m_ready;
-}
-
-void CvVideoFileWriter::Close()
-{
- m_cvWriter.release();
-}
-}// namespace od
diff --git a/samples/ObjectDetection/src/CvVideoFrameReader.cpp b/samples/ObjectDetection/src/CvVideoFrameReader.cpp
deleted file mode 100644
index 09b5050973..0000000000
--- a/samples/ObjectDetection/src/CvVideoFrameReader.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-
-#include "CvVideoFrameReader.hpp"
-
-namespace od
-{
-
-std::shared_ptr<cv::Mat> CvVideoFrameReader::ReadFrame()
-{
- // opencv copies data anyway
- cv::Mat captureFrame;
- m_capture.read(captureFrame);
- return std::make_shared<cv::Mat>(std::move(captureFrame));
-}
-
-bool CvVideoFrameReader::IsExhausted(const std::shared_ptr<cv::Mat>& frame) const
-{
- assert(frame!=nullptr);
- return frame->empty();
-}
-
-void CvVideoFrameReader::CheckIsOpen(const std::string& source)
-{
- if (!m_capture.isOpened())
- {
- throw std::runtime_error("Failed to open video capture for the source = " + source);
- }
-}
-
-void CvVideoFrameReader::Init(const std::string& source)
-{
- m_capture.open(source);
- CheckIsOpen(source);
-}
-
-int CvVideoFrameReader::GetSourceWidth() const
-{
- return static_cast<int>(lround(m_capture.get(cv::CAP_PROP_FRAME_WIDTH)));
-}
-
-int CvVideoFrameReader::GetSourceHeight() const
-{
- return static_cast<int>(lround(m_capture.get(cv::CAP_PROP_FRAME_HEIGHT)));
-}
-
-double CvVideoFrameReader::GetSourceFps() const
-{
- return m_capture.get(cv::CAP_PROP_FPS);
-}
-
-bool CvVideoFrameReader::ConvertToRGB()
-{
- m_capture.set(cv::CAP_PROP_CONVERT_RGB, 1.0);
- return static_cast<bool>(m_capture.get(cv::CAP_PROP_CONVERT_RGB));
-}
-
-std::string CvVideoFrameReader::GetSourceEncoding() const
-{
- char fourccStr[5];
- auto fourcc = (int)m_capture.get(cv::CAP_PROP_FOURCC);
- sprintf(fourccStr,"%c%c%c%c",fourcc & 0xFF, (fourcc >> 8) & 0xFF, (fourcc >> 16) & 0xFF, (fourcc >> 24) & 0xFF);
- return fourccStr;
-}
-
-int CvVideoFrameReader::GetSourceEncodingInt() const
-{
- return (int)m_capture.get(cv::CAP_PROP_FOURCC);
-}
-
-int CvVideoFrameReader::GetFrameCount() const
-{
- return static_cast<int>(lround(m_capture.get(cv::CAP_PROP_FRAME_COUNT)));
-};
-
-std::shared_ptr<cv::Mat> CvVideoFrameReaderRgbWrapper::ReadFrame()
-{
- auto framePtr = m_reader->ReadFrame();
- if (!IsExhausted(framePtr))
- {
- cv::cvtColor(*framePtr, *framePtr, cv::COLOR_BGR2RGB);
- }
- return framePtr;
-}
-
-bool CvVideoFrameReaderRgbWrapper::IsExhausted(const std::shared_ptr<cv::Mat>& frame) const
-{
- return m_reader->IsExhausted(frame);
-}
-
-CvVideoFrameReaderRgbWrapper::CvVideoFrameReaderRgbWrapper(std::unique_ptr<od::CvVideoFrameReader> reader):
- m_reader(std::move(reader))
-{}
-
-}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/src/CvWindowOutput.cpp b/samples/ObjectDetection/src/CvWindowOutput.cpp
deleted file mode 100644
index a32147b19a..0000000000
--- a/samples/ObjectDetection/src/CvWindowOutput.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "CvWindowOutput.hpp"
-
-namespace od
-{
-
-void CvWindowOutput::Init(const std::string& windowName)
-{
- m_windowName = windowName;
- cv::namedWindow(m_windowName, cv::WINDOW_AUTOSIZE);
-}
-
-void CvWindowOutput::WriteFrame(std::shared_ptr<cv::Mat>& frame)
-{
- cv::cvtColor(*frame, *frame, cv::COLOR_RGB2BGR);
- cv::imshow( m_windowName, *frame);
- cv::waitKey(30);
-}
-
-void CvWindowOutput::Close()
-{
- cv::destroyWindow(m_windowName);
-}
-
-bool CvWindowOutput::IsReady() const
-{
- return true;
-}
-}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/src/ImageUtils.cpp b/samples/ObjectDetection/src/ImageUtils.cpp
index 9a3ed17b63..05b8a66c05 100644
--- a/samples/ObjectDetection/src/ImageUtils.cpp
+++ b/samples/ObjectDetection/src/ImageUtils.cpp
@@ -15,7 +15,7 @@ static cv::Scalar GetScalarColorCode(std::tuple<int, int, int> color)
}
void AddInferenceOutputToFrame(od::DetectedObjects& decodedResults, cv::Mat& inputFrame,
- std::vector<std::tuple<std::string, od::BBoxColor>>& labels)
+ std::vector<std::tuple<std::string, common::BBoxColor>>& labels)
{
for(const od::DetectedObject& object : decodedResults)
{
@@ -86,7 +86,7 @@ void AddInferenceOutputToFrame(od::DetectedObjects& decodedResults, cv::Mat& inp
}
-void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const od::Size& aspectRatio)
+void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const common::Size& aspectRatio)
{
if(&dest != &frame)
{
@@ -119,7 +119,7 @@ void PadFrame(const cv::Mat& src, cv::Mat& dest, const int bottom, const int rig
}
}
-void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const od::Size& destSize)
+void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const common::Size& destSize)
{
ResizeFrame(frame, cache, destSize);
PadFrame(cache, dest,destSize.m_Height - cache.rows,destSize.m_Width - cache.cols);
diff --git a/samples/ObjectDetection/src/Main.cpp b/samples/ObjectDetection/src/Main.cpp
index 10abb65cce..e057981550 100644
--- a/samples/ObjectDetection/src/Main.cpp
+++ b/samples/ObjectDetection/src/Main.cpp
@@ -6,7 +6,7 @@
#include "CvVideoFrameReader.hpp"
#include "CvWindowOutput.hpp"
#include "CvVideoFileWriter.hpp"
-#include "NetworkPipeline.hpp"
+#include "ObjectDetectionPipeline.hpp"
#include "CmdArgsParser.hpp"
#include <fstream>
@@ -14,6 +14,30 @@
#include <map>
#include <random>
+const std::string MODEL_NAME = "--model-name";
+const std::string VIDEO_FILE_PATH = "--video-file-path";
+const std::string MODEL_FILE_PATH = "--model-file-path";
+const std::string OUTPUT_VIDEO_FILE_PATH = "--output-video-file-path";
+const std::string LABEL_PATH = "--label-path";
+const std::string PREFERRED_BACKENDS = "--preferred-backends";
+const std::string HELP = "--help";
+
+/*
+ * The accepted options for this Object detection executable
+ */
+static std::map<std::string, std::string> CMD_OPTIONS = {
+ {VIDEO_FILE_PATH, "[REQUIRED] Path to the video file to run object detection on"},
+ {MODEL_FILE_PATH, "[REQUIRED] Path to the Object Detection model to use"},
+ {LABEL_PATH, "[REQUIRED] Path to the label set for the provided model file. "
+ "Label file is should just be an ordered list, seperated by new line."},
+ {MODEL_NAME, "[REQUIRED] The name of the model being used. Accepted options: YOLO_V3_TINY, SSD_MOBILE"},
+ {OUTPUT_VIDEO_FILE_PATH, "[OPTIONAL] Path to the output video file with detections added in. "
+ "If specified will save file to disk, else displays the output to screen"},
+ {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
+ " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
+ " Defaults to CpuAcc,CpuRef"}
+};
+
/*
* Reads the user supplied backend preference, splits it by comma, and returns an ordered vector
*/
@@ -34,10 +58,10 @@ std::vector<armnn::BackendId> GetPreferredBackendList(const std::string& preferr
/*
* Assigns a color to each label in the label set
*/
-std::vector<std::tuple<std::string, od::BBoxColor>> AssignColourToLabel(const std::string& pathToLabelFile)
+std::vector<std::tuple<std::string, common::BBoxColor>> AssignColourToLabel(const std::string& pathToLabelFile)
{
std::ifstream in(pathToLabelFile);
- std::vector<std::tuple<std::string, od::BBoxColor>> labels;
+ std::vector<std::tuple<std::string, common::BBoxColor>> labels;
std::string str;
std::default_random_engine generator;
@@ -47,7 +71,7 @@ std::vector<std::tuple<std::string, od::BBoxColor>> AssignColourToLabel(const st
{
if(!str.empty())
{
- od::BBoxColor c{
+ common::BBoxColor c{
.colorCode = std::make_tuple(distribution(generator),
distribution(generator),
distribution(generator))
@@ -60,13 +84,13 @@ std::vector<std::tuple<std::string, od::BBoxColor>> AssignColourToLabel(const st
return labels;
}
-std::tuple<std::unique_ptr<od::IFrameReader<cv::Mat>>,
- std::unique_ptr<od::IFrameOutput<cv::Mat>>>
+std::tuple<std::unique_ptr<common::IFrameReader<cv::Mat>>,
+ std::unique_ptr<common::IFrameOutput<cv::Mat>>>
GetFrameSourceAndSink(const std::map<std::string, std::string>& options) {
- std::unique_ptr<od::IFrameReader<cv::Mat>> readerPtr;
+ std::unique_ptr<common::IFrameReader<cv::Mat>> readerPtr;
- std::unique_ptr<od::CvVideoFrameReader> reader = std::make_unique<od::CvVideoFrameReader>();
+ std::unique_ptr<common::CvVideoFrameReader> reader = std::make_unique<common::CvVideoFrameReader>();
reader->Init(GetSpecifiedOption(options, VIDEO_FILE_PATH));
auto enc = reader->GetSourceEncodingInt();
@@ -75,7 +99,7 @@ std::tuple<std::unique_ptr<od::IFrameReader<cv::Mat>>,
auto h = reader->GetSourceHeight();
if (!reader->ConvertToRGB())
{
- readerPtr = std::move(std::make_unique<od::CvVideoFrameReaderRgbWrapper>(std::move(reader)));
+ readerPtr = std::move(std::make_unique<common::CvVideoFrameReaderRgbWrapper>(std::move(reader)));
}
else
{
@@ -85,14 +109,14 @@ std::tuple<std::unique_ptr<od::IFrameReader<cv::Mat>>,
if(CheckOptionSpecified(options, OUTPUT_VIDEO_FILE_PATH))
{
std::string outputVideo = GetSpecifiedOption(options, OUTPUT_VIDEO_FILE_PATH);
- auto writer = std::make_unique<od::CvVideoFileWriter>();
+ auto writer = std::make_unique<common::CvVideoFileWriter>();
writer->Init(outputVideo, enc, fps, w, h);
return std::make_tuple<>(std::move(readerPtr), std::move(writer));
}
else
{
- auto writer = std::make_unique<od::CvWindowOutput>();
+ auto writer = std::make_unique<common::CvWindowOutput>();
writer->Init("Processed Video");
return std::make_tuple<>(std::move(readerPtr), std::move(writer));
}
@@ -109,7 +133,7 @@ int main(int argc, char *argv[])
}
// Create the network options
- od::ODPipelineOptions pipelineOptions;
+ common::PipelineOptions pipelineOptions;
pipelineOptions.m_ModelFilePath = GetSpecifiedOption(options, MODEL_FILE_PATH);
pipelineOptions.m_ModelName = GetSpecifiedOption(options, MODEL_NAME);
@@ -127,8 +151,8 @@ int main(int argc, char *argv[])
od::IPipelinePtr objectDetectionPipeline = od::CreatePipeline(pipelineOptions);
auto inputAndOutput = GetFrameSourceAndSink(options);
- std::unique_ptr<od::IFrameReader<cv::Mat>> reader = std::move(std::get<0>(inputAndOutput));
- std::unique_ptr<od::IFrameOutput<cv::Mat>> sink = std::move(std::get<1>(inputAndOutput));
+ std::unique_ptr<common::IFrameReader<cv::Mat>> reader = std::move(std::get<0>(inputAndOutput));
+ std::unique_ptr<common::IFrameOutput<cv::Mat>> sink = std::move(std::get<1>(inputAndOutput));
if (!sink->IsReady())
{
@@ -136,7 +160,7 @@ int main(int argc, char *argv[])
return 1;
}
- od::InferenceResults results;
+ common::InferenceResults<float> results;
std::shared_ptr<cv::Mat> frame = reader->ReadFrame();
diff --git a/samples/ObjectDetection/src/NetworkPipeline.cpp b/samples/ObjectDetection/src/ObjectDetectionPipeline.cpp
index 7f05882fc4..077caa40cb 100644
--- a/samples/ObjectDetection/src/NetworkPipeline.cpp
+++ b/samples/ObjectDetection/src/ObjectDetectionPipeline.cpp
@@ -3,23 +3,23 @@
// SPDX-License-Identifier: MIT
//
-#include "NetworkPipeline.hpp"
+#include "ObjectDetectionPipeline.hpp"
#include "ImageUtils.hpp"
namespace od
{
-ObjDetectionPipeline::ObjDetectionPipeline(std::unique_ptr<ArmnnNetworkExecutor> executor,
+ObjDetectionPipeline::ObjDetectionPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
std::unique_ptr<IDetectionResultDecoder> decoder) :
m_executor(std::move(executor)),
m_decoder(std::move(decoder)){}
-void od::ObjDetectionPipeline::Inference(const cv::Mat& processed, InferenceResults& result)
+void od::ObjDetectionPipeline::Inference(const cv::Mat& processed, common::InferenceResults<float>& result)
{
m_executor->Run(processed.data, processed.total() * processed.elemSize(), result);
}
-void ObjDetectionPipeline::PostProcessing(InferenceResults& inferenceResult,
+void ObjDetectionPipeline::PostProcessing(common::InferenceResults<float>& inferenceResult,
const std::function<void (DetectedObjects)>& callback)
{
DetectedObjects detections = m_decoder->Decode(inferenceResult, m_inputImageSize,
@@ -37,7 +37,7 @@ void ObjDetectionPipeline::PreProcessing(const cv::Mat& frame, cv::Mat& processe
ResizeWithPad(frame, processed, m_processedFrame, m_executor->GetImageAspectRatio());
}
-MobileNetSSDv1::MobileNetSSDv1(std::unique_ptr<ArmnnNetworkExecutor> executor,
+MobileNetSSDv1::MobileNetSSDv1(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
float objectThreshold) :
ObjDetectionPipeline(std::move(executor),
std::make_unique<SSDResultDecoder>(objectThreshold))
@@ -53,7 +53,7 @@ void MobileNetSSDv1::PreProcessing(const cv::Mat& frame, cv::Mat& processed)
}
}
-YoloV3Tiny::YoloV3Tiny(std::unique_ptr<ArmnnNetworkExecutor> executor,
+YoloV3Tiny::YoloV3Tiny(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
float NMSThreshold, float ClsThreshold, float ObjectThreshold) :
ObjDetectionPipeline(std::move(executor),
std::move(std::make_unique<YoloResultDecoder>(NMSThreshold,
@@ -70,9 +70,9 @@ void YoloV3Tiny::PreProcessing(const cv::Mat& frame, cv::Mat& processed)
}
}
-IPipelinePtr CreatePipeline(od::ODPipelineOptions& config)
+IPipelinePtr CreatePipeline(common::PipelineOptions& config)
{
- auto executor = std::make_unique<od::ArmnnNetworkExecutor>(config.m_ModelFilePath, config.m_backends);
+ auto executor = std::make_unique<common::ArmnnNetworkExecutor<float>>(config.m_ModelFilePath, config.m_backends);
if (config.m_ModelName == "SSD_MOBILE")
{
diff --git a/samples/ObjectDetection/src/SSDResultDecoder.cpp b/samples/ObjectDetection/src/SSDResultDecoder.cpp
index a3319212e5..6dfd1abf84 100644
--- a/samples/ObjectDetection/src/SSDResultDecoder.cpp
+++ b/samples/ObjectDetection/src/SSDResultDecoder.cpp
@@ -12,9 +12,9 @@
namespace od
{
-DetectedObjects SSDResultDecoder::Decode(const InferenceResults& networkResults,
- const Size& outputFrameSize,
- const Size& resizedFrameSize,
+DetectedObjects SSDResultDecoder::Decode(const common::InferenceResults<float>& networkResults,
+ const common::Size& outputFrameSize,
+ const common::Size& resizedFrameSize,
const std::vector<std::string>& labels)
{
// SSD network outputs 4 tensors: bounding boxes, labels, probabilities, number of detections.
diff --git a/samples/ObjectDetection/src/YoloResultDecoder.cpp b/samples/ObjectDetection/src/YoloResultDecoder.cpp
index ffbf7cb68d..f177802f8a 100644
--- a/samples/ObjectDetection/src/YoloResultDecoder.cpp
+++ b/samples/ObjectDetection/src/YoloResultDecoder.cpp
@@ -13,9 +13,9 @@
namespace od
{
-DetectedObjects YoloResultDecoder::Decode(const InferenceResults& networkResults,
- const Size& outputFrameSize,
- const Size& resizedFrameSize,
+DetectedObjects YoloResultDecoder::Decode(const common::InferenceResults<float>& networkResults,
+ const common::Size& outputFrameSize,
+ const common::Size& resizedFrameSize,
const std::vector<std::string>& labels)
{
@@ -33,7 +33,7 @@ DetectedObjects YoloResultDecoder::Decode(const InferenceResults& networkResults
DetectedObjects detectedObjects;
DetectedObjects resultsAfterNMS;
- for (const InferenceResult& result : networkResults)
+ for (const common::InferenceResult<float>& result : networkResults)
{
for (unsigned int i = 0; i < m_numBoxes; ++i)
{