aboutsummaryrefslogtreecommitdiff
path: root/samples/ObjectDetection/include
diff options
context:
space:
mode:
authorÉanna Ó Catháin <eanna.ocathain@arm.com>2021-04-07 14:35:25 +0100
committerJim Flynn <jim.flynn@arm.com>2021-05-07 09:11:52 +0000
commitc6ab02a626e15b4a12fc09ecd844eb8b95380c3c (patch)
tree9912ed9cdb89cdb24483b22d6621ae30049ae321 /samples/ObjectDetection/include
parente813d67f86df41a238ff79b5c554ef5027f56576 (diff)
downloadarmnn-c6ab02a626e15b4a12fc09ecd844eb8b95380c3c.tar.gz
MLECO-1252 ASR sample application using the public ArmNN C++ API.
Change-Id: I98cd505b8772a8c8fa88308121bc94135bb45068 Signed-off-by: Éanna Ó Catháin <eanna.ocathain@arm.com>
Diffstat (limited to 'samples/ObjectDetection/include')
-rw-r--r--samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp80
-rw-r--r--samples/ObjectDetection/include/CmdArgsParser.hpp50
-rw-r--r--samples/ObjectDetection/include/CvVideoFileWriter.hpp61
-rw-r--r--samples/ObjectDetection/include/CvVideoFrameReader.hpp108
-rw-r--r--samples/ObjectDetection/include/CvWindowOutput.hpp53
-rw-r--r--samples/ObjectDetection/include/IDetectionResultDecoder.hpp6
-rw-r--r--samples/ObjectDetection/include/IFrameOutput.hpp48
-rw-r--r--samples/ObjectDetection/include/IFrameReader.hpp45
-rw-r--r--samples/ObjectDetection/include/ImageUtils.hpp6
-rw-r--r--samples/ObjectDetection/include/ObjectDetectionPipeline.hpp (renamed from samples/ObjectDetection/include/NetworkPipeline.hpp)16
-rw-r--r--samples/ObjectDetection/include/SSDResultDecoder.hpp6
-rw-r--r--samples/ObjectDetection/include/Types.hpp50
-rw-r--r--samples/ObjectDetection/include/YoloResultDecoder.hpp6
13 files changed, 20 insertions, 515 deletions
diff --git a/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp b/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp
deleted file mode 100644
index c75b68bbe1..0000000000
--- a/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp
+++ /dev/null
@@ -1,80 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "Types.hpp"
-
-#include "armnn/ArmNN.hpp"
-#include "armnnTfLiteParser/ITfLiteParser.hpp"
-#include "armnnUtils/DataLayoutIndexed.hpp"
-#include <armnn/Logging.hpp>
-
-#include <string>
-#include <vector>
-
-namespace od
-{
-/**
-* @brief Used to load in a network through ArmNN and run inference on it against a given backend.
-*
-*/
-class ArmnnNetworkExecutor
-{
-private:
- armnn::IRuntimePtr m_Runtime;
- armnn::NetworkId m_NetId{};
- mutable InferenceResults m_OutputBuffer;
- armnn::InputTensors m_InputTensors;
- armnn::OutputTensors m_OutputTensors;
- std::vector<armnnTfLiteParser::BindingPointInfo> m_outputBindingInfo;
-
- std::vector<std::string> m_outputLayerNamesList;
-
- armnnTfLiteParser::BindingPointInfo m_inputBindingInfo;
-
- void PrepareTensors(const void* inputData, const size_t dataBytes);
-
- template <typename Enumeration>
- auto log_as_int(Enumeration value)
- -> typename std::underlying_type<Enumeration>::type
- {
- return static_cast<typename std::underlying_type<Enumeration>::type>(value);
- }
-
-public:
- ArmnnNetworkExecutor() = delete;
-
- /**
- * @brief Initializes the network with the given input data. Parsed through TfLiteParser and optimized for a
- * given backend.
- *
- * Note that the output layers names order in m_outputLayerNamesList affects the order of the feature vectors
- * in output of the Run method.
- *
- * * @param[in] modelPath - Relative path to the model file
- * * @param[in] backends - The list of preferred backends to run inference on
- */
- ArmnnNetworkExecutor(std::string& modelPath,
- std::vector<armnn::BackendId>& backends);
-
- /**
- * @brief Returns the aspect ratio of the associated model in the order of width, height.
- */
- Size GetImageAspectRatio();
-
- armnn::DataType GetInputDataType() const;
-
- /**
- * @brief Runs inference on the provided input data, and stores the results in the provided InferenceResults object.
- *
- * @param[in] inputData - input frame data
- * @param[in] dataBytes - input data size in bytes
- * @param[out] results - Vector of DetectionResult objects used to store the output result.
- */
- bool Run(const void* inputData, const size_t dataBytes, InferenceResults& outResults);
-
-};
-}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/include/CmdArgsParser.hpp b/samples/ObjectDetection/include/CmdArgsParser.hpp
deleted file mode 100644
index 6c22e6ff6d..0000000000
--- a/samples/ObjectDetection/include/CmdArgsParser.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-#include <string>
-#include <map>
-#include <iostream>
-
-const std::string MODEL_NAME = "--model-name";
-const std::string VIDEO_FILE_PATH = "--video-file-path";
-const std::string MODEL_FILE_PATH = "--model-file-path";
-const std::string OUTPUT_VIDEO_FILE_PATH = "--output-video-file-path";
-const std::string LABEL_PATH = "--label-path";
-const std::string PREFERRED_BACKENDS = "--preferred-backends";
-const std::string HELP = "--help";
-
-/*
- * The accepted options for this Object detection executable
- */
-static std::map<std::string, std::string> CMD_OPTIONS = {
- {VIDEO_FILE_PATH, "[REQUIRED] Path to the video file to run object detection on"},
- {MODEL_FILE_PATH, "[REQUIRED] Path to the Object Detection model to use"},
- {LABEL_PATH, "[REQUIRED] Path to the label set for the provided model file. "
- "Label file is should just be an ordered list, seperated by new line."},
- {MODEL_NAME, "[REQUIRED] The name of the model being used. Accepted options: YOLO_V3_TINY, SSD_MOBILE"},
- {OUTPUT_VIDEO_FILE_PATH, "[OPTIONAL] Path to the output video file with detections added in. "
- "If specified will save file to disk, else displays the output to screen"},
- {PREFERRED_BACKENDS, "[OPTIONAL] Takes the preferred backends in preference order, separated by comma."
- " For example: CpuAcc,GpuAcc,CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]."
- " Defaults to CpuAcc,CpuRef"}
-};
-
-/*
- * Checks that a particular option was specified by the user
- */
-bool CheckOptionSpecified(const std::map<std::string, std::string>& options, const std::string& option);
-
-
-/*
- * Retrieves the user provided option
- */
-std::string GetSpecifiedOption(const std::map<std::string, std::string>& options, const std::string& option);
-
-
-/*
- * Parses all the command line options provided by the user and stores in a map.
- */
-int ParseOptions(std::map<std::string, std::string>& options, std::map<std::string, std::string>& acceptedOptions,
- char *argv[], int argc); \ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvVideoFileWriter.hpp b/samples/ObjectDetection/include/CvVideoFileWriter.hpp
deleted file mode 100644
index ea1501b68e..0000000000
--- a/samples/ObjectDetection/include/CvVideoFileWriter.hpp
+++ /dev/null
@@ -1,61 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "IFrameOutput.hpp"
-#include <opencv2/opencv.hpp>
-
-namespace od
-{
-
-class CvVideoFileWriter : public IFrameOutput<cv::Mat> {
-public:
- /**
- * @brief Default constructor.
- *
- * Underlying open cv video writer object will be instantiated.
- */
- CvVideoFileWriter() = default;
-
- ~CvVideoFileWriter() override = default;
-
- /**
- * @brief Initialises video file writer.
- *
- * Opens opencv writer with given params. FFMPEG backend is used.
- *
- * @param outputVideo path to the video file.
- * @param encoding cv::CAP_PROP_FOURCC code.
- * @param fps target frame rate.
- * @param width target frame width.
- * @param height target frame height.
- *
- */
- void Init(const std::string& outputVideo, int encoding, double fps, int width, int height);
-
- /**
- * Writes frame to the file using opencv writer.
- *
- * @param frame data to write.
- */
- void WriteFrame(std::shared_ptr<cv::Mat>& frame) override;
-
- /**
- * Releases opencv writer.
- */
- void Close() override;
-
- /**
- * Checks if opencv writer was successfully opened.
- * @return true is underlying writer is ready to be used, false otherwise.
- */
- bool IsReady() const override;
-
-private:
- cv::VideoWriter m_cvWriter{};
- bool m_ready = false;
-};
-}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvVideoFrameReader.hpp b/samples/ObjectDetection/include/CvVideoFrameReader.hpp
deleted file mode 100644
index 081f92620e..0000000000
--- a/samples/ObjectDetection/include/CvVideoFrameReader.hpp
+++ /dev/null
@@ -1,108 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#pragma once
-
-
-#include "IFrameReader.hpp"
-#include <opencv2/opencv.hpp>
-
-namespace od
-{
-
-class CvVideoFrameReader :
- public IFrameReader<cv::Mat>
-{
-public:
- /**
- * @brief Default constructor.
- *
- * Underlying open cv video capture object will be instantiated.
- */
- CvVideoFrameReader() = default;
-
- ~CvVideoFrameReader() override = default;
-
- /**
- *@brief Initialises reader to capture frames from video file.
- *
- * @param source path to the video file or image sequence.
- *
- * @throws std::runtime_error if init failed
- */
- void Init(const std::string& source);
-
- std::shared_ptr <cv::Mat> ReadFrame() override;
-
- bool IsExhausted(const std::shared_ptr <cv::Mat>& frame) const override;
-
- /**
- * Returns effective video frame width supported by the source/set by the user.
- * Must be called after Init method.
- * @return frame width
- */
- int GetSourceWidth() const;
-
- /**
- * Returns effective video frame height supported by the source/set by the user.
- * Must be called after Init method.
- * @return frame height
- */
- int GetSourceHeight() const;
-
- /**
- * Returns effective fps value supported by the source/set by the user.
- * @return fps value
- */
- double GetSourceFps() const;
-
- /**
- * Will query OpenCV to convert images to RGB
- * Copy is actually default behaviour, but the set function needs to be called
- * in order to know whether OpenCV supports conversion from our source format.
- * @return boolean,
- * true: OpenCV returns RGB
- * false: OpenCV returns the fourcc format from GetSourceEncoding
- */
- bool ConvertToRGB();
-
- /**
- * Returns 4-character code of codec.
- * @return codec name
- */
- std::string GetSourceEncoding() const;
-
- /**
- * Get the fourcc int from its string name.
- * @return codec int
- */
- int GetSourceEncodingInt() const;
-
- int GetFrameCount() const;
-
-private:
- cv::VideoCapture m_capture;
-
- void CheckIsOpen(const std::string& source);
-};
-
-class CvVideoFrameReaderRgbWrapper :
- public IFrameReader<cv::Mat>
-{
-public:
- CvVideoFrameReaderRgbWrapper() = delete;
- CvVideoFrameReaderRgbWrapper(const CvVideoFrameReaderRgbWrapper& o) = delete;
- CvVideoFrameReaderRgbWrapper(CvVideoFrameReaderRgbWrapper&& o) = delete;
-
- CvVideoFrameReaderRgbWrapper(std::unique_ptr<od::CvVideoFrameReader> reader);
-
- std::shared_ptr<cv::Mat> ReadFrame() override;
-
- bool IsExhausted(const std::shared_ptr<cv::Mat>& frame) const override;
-
-private:
- std::unique_ptr<od::CvVideoFrameReader> m_reader;
-};
-
-}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/include/CvWindowOutput.hpp b/samples/ObjectDetection/include/CvWindowOutput.hpp
deleted file mode 100644
index 317327ba62..0000000000
--- a/samples/ObjectDetection/include/CvWindowOutput.hpp
+++ /dev/null
@@ -1,53 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "IFrameOutput.hpp"
-#include <opencv2/opencv.hpp>
-
-namespace od
-{
-
-class CvWindowOutput : public IFrameOutput<cv::Mat> {
-public:
-
- CvWindowOutput() = default;
-
- ~CvWindowOutput() override = default;
-
- /**
- * @brief Creates a named window.
- *
- * Uses opencv to create a window with given name.
- *
- * @param windowName opencv window name.
- *
- */
- void Init(const std::string& windowName);
-
- /**
- * Writes frame to the window.
- *
- * @param frame data to write.
- */
- void WriteFrame(std::shared_ptr<cv::Mat>& frame) override;
-
- /**
- * Releases all windows.
- */
- void Close() override;
-
- /**
- * Always true.
- * @return true.
- */
- bool IsReady() const override;
-
-private:
- std::string m_windowName;
-
-};
-}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/include/IDetectionResultDecoder.hpp b/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
index c0a29df33f..a8a3cbb23a 100644
--- a/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
+++ b/samples/ObjectDetection/include/IDetectionResultDecoder.hpp
@@ -30,9 +30,9 @@ public:
*
* @return Vector of decoded detected objects.
*/
- virtual DetectedObjects Decode(const InferenceResults& results,
- const Size& outputFrameSize,
- const Size& resizedFrameSize,
+ virtual DetectedObjects Decode(const common::InferenceResults<float>& results,
+ const common::Size& outputFrameSize,
+ const common::Size& resizedFrameSize,
const std::vector<std::string>& labels) = 0;
};
diff --git a/samples/ObjectDetection/include/IFrameOutput.hpp b/samples/ObjectDetection/include/IFrameOutput.hpp
deleted file mode 100644
index c8b4fe5a47..0000000000
--- a/samples/ObjectDetection/include/IFrameOutput.hpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <cstddef>
-#include <memory>
-
-namespace od
-{
-/**
- * @brief Frames output interface
- *
- * @tparam FrameDataT frame container data type
- */
- template<typename FrameDataT> class IFrameOutput
- {
-
- public:
- /**
- * @brief Writes frame to the selected output
- *
- * @param frame container
- */
- virtual void WriteFrame(std::shared_ptr <FrameDataT>& frame) = 0;
-
- /**
- * @brief Closes the frame output
- */
- virtual void Close() = 0;
-
- /**
- * @brief Checks if the frame sink is ready to write.
- *
- * @return True if frame sink is ready, False otherwise
- */
- virtual bool IsReady() const = 0;
-
- /**
- * @brief Default destructor
- */
- virtual ~IFrameOutput() = default;
-
- };
-
-}// namespace od
diff --git a/samples/ObjectDetection/include/IFrameReader.hpp b/samples/ObjectDetection/include/IFrameReader.hpp
deleted file mode 100644
index d371b7d2a5..0000000000
--- a/samples/ObjectDetection/include/IFrameReader.hpp
+++ /dev/null
@@ -1,45 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <cstddef>
-#include <memory>
-
-namespace od
-{
-/**
- * @brief Frame source reader interface
- *
- * @tparam FrameDataT frame container data type
- */
-template<typename FrameDataT> class IFrameReader
-{
-
-public:
- /**
- * @brief Reads the next frame from the source
- *
- * @return pointer to the frame container
- */
- virtual std::shared_ptr <FrameDataT> ReadFrame() = 0;
-
- /**
- * @brief Checks if the frame source has more frames to read.
- *
- * @param[in] frame the pointer to the last frame captured with the ReadFrame method could be used in
- * implementation specific logic to check frames source state.
- * @return True if frame source was exhausted, False otherwise
- */
- virtual bool IsExhausted(const std::shared_ptr <FrameDataT>& frame) const = 0;
-
- /**
- * @brief Default destructor
- */
- virtual ~IFrameReader() = default;
-
-};
-
-}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/include/ImageUtils.hpp b/samples/ObjectDetection/include/ImageUtils.hpp
index 07e2b839f9..9bae568755 100644
--- a/samples/ObjectDetection/include/ImageUtils.hpp
+++ b/samples/ObjectDetection/include/ImageUtils.hpp
@@ -21,7 +21,7 @@ const cv::InterpolationFlags DefaultResizeFlag = cv::INTER_NEAREST;
*/
void AddInferenceOutputToFrame(od::DetectedObjects& decodedResults,
cv::Mat& inputFrame,
- std::vector<std::tuple<std::string, od::BBoxColor>>& labels);
+ std::vector<std::tuple<std::string, common::BBoxColor>>& labels);
/**
* @brief Function to resize a frame while keeping aspect ratio.
@@ -30,7 +30,7 @@ void AddInferenceOutputToFrame(od::DetectedObjects& decodedResults,
* @param[out] dest the frame we want to resize into.
* @param[in] aspectRatio aspect ratio to use when resizing.
*/
-void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const od::Size& aspectRatio);
+void ResizeFrame(const cv::Mat& frame, cv::Mat& dest, const common::Size& aspectRatio);
/**
* @brief Function to pad a frame.
@@ -49,7 +49,7 @@ void PadFrame(const cv::Mat& src, cv::Mat& dest, int bottom, int right);
* @param cache operation requires intermediate data container.
* @param destSize size of the destination frame
*/
-void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const od::Size& destSize);
+void ResizeWithPad(const cv::Mat& frame, cv::Mat& dest, cv::Mat& cache, const common::Size& destSize);
/**
* @brief Function to retrieve the cv::scalar color from a RGB tuple.
diff --git a/samples/ObjectDetection/include/NetworkPipeline.hpp b/samples/ObjectDetection/include/ObjectDetectionPipeline.hpp
index c3408b494e..38de65b007 100644
--- a/samples/ObjectDetection/include/NetworkPipeline.hpp
+++ b/samples/ObjectDetection/include/ObjectDetectionPipeline.hpp
@@ -27,7 +27,7 @@ public:
* @param executor - unique pointer to inference runner
* @param decoder - unique pointer to inference results decoder
*/
- ObjDetectionPipeline(std::unique_ptr<ArmnnNetworkExecutor> executor,
+ ObjDetectionPipeline(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
std::unique_ptr<IDetectionResultDecoder> decoder);
/**
@@ -48,7 +48,7 @@ public:
* @param[in] processed - input inference data. Data type should be aligned with input tensor.
* @param[out] result - raw floating point inference results.
*/
- virtual void Inference(const cv::Mat& processed, InferenceResults& result);
+ virtual void Inference(const cv::Mat& processed, common::InferenceResults<float>& result);
/**
* @brief Standard inference results post-processing implementation.
@@ -58,13 +58,13 @@ public:
* @param[in] inferenceResult - inference results to be decoded.
* @param[in] callback - a function to be called after successful inference results decoding.
*/
- virtual void PostProcessing(InferenceResults& inferenceResult,
+ virtual void PostProcessing(common::InferenceResults<float>& inferenceResult,
const std::function<void (DetectedObjects)>& callback);
protected:
- std::unique_ptr<ArmnnNetworkExecutor> m_executor;
+ std::unique_ptr<common::ArmnnNetworkExecutor<float>> m_executor;
std::unique_ptr<IDetectionResultDecoder> m_decoder;
- Size m_inputImageSize{};
+ common::Size m_inputImageSize{};
cv::Mat m_processedFrame;
};
@@ -85,7 +85,7 @@ public:
* @param ClsThreshold[in] - class probability threshold for decoding step
* @param ObjectThreshold[in] - detected object score threshold for decoding step
*/
- YoloV3Tiny(std::unique_ptr<ArmnnNetworkExecutor> executor,
+ YoloV3Tiny(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
float NMSThreshold, float ClsThreshold, float ObjectThreshold);
/**
@@ -116,7 +116,7 @@ public:
* @param[in] - unique pointer to inference runner
* @paramp[in] objectThreshold - detected object score threshold for decoding step
*/
- MobileNetSSDv1(std::unique_ptr<ArmnnNetworkExecutor> executor,
+ MobileNetSSDv1(std::unique_ptr<common::ArmnnNetworkExecutor<float>> executor,
float objectThreshold);
/**
@@ -143,6 +143,6 @@ using IPipelinePtr = std::unique_ptr<od::ObjDetectionPipeline>;
*
* @return unique pointer to object detection pipeline.
*/
-IPipelinePtr CreatePipeline(od::ODPipelineOptions& config);
+IPipelinePtr CreatePipeline(common::PipelineOptions& config);
}// namespace od \ No newline at end of file
diff --git a/samples/ObjectDetection/include/SSDResultDecoder.hpp b/samples/ObjectDetection/include/SSDResultDecoder.hpp
index 65afb8d376..4c703c18fc 100644
--- a/samples/ObjectDetection/include/SSDResultDecoder.hpp
+++ b/samples/ObjectDetection/include/SSDResultDecoder.hpp
@@ -21,9 +21,9 @@ public:
*/
SSDResultDecoder(float ObjectThreshold);
- DetectedObjects Decode(const InferenceResults& results,
- const Size& outputFrameSize,
- const Size& resizedFrameSize,
+ DetectedObjects Decode(const common::InferenceResults<float>& results,
+ const common::Size& outputFrameSize,
+ const common::Size& resizedFrameSize,
const std::vector<std::string>& labels) override;
private:
diff --git a/samples/ObjectDetection/include/Types.hpp b/samples/ObjectDetection/include/Types.hpp
deleted file mode 100644
index 801cff392a..0000000000
--- a/samples/ObjectDetection/include/Types.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-//
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <cstddef>
-#include <cstdint>
-#include <vector>
-#include <tuple>
-#include <armnn/BackendId.hpp>
-
-namespace od
-{
-
-struct Size
-{
-
- uint32_t m_Width;
- uint32_t m_Height;
-
- Size() : Size(0, 0) {}
-
- Size(uint32_t width, uint32_t height) :
- m_Width{width}, m_Height{height} {}
-
- Size(const Size& other)
- : Size(other.m_Width, other.m_Height) {}
-
- ~Size() = default;
-
- Size &operator=(const Size& other) = default;
-};
-
-struct BBoxColor
-{
- std::tuple<int, int, int> colorCode;
-};
-
-struct ODPipelineOptions
-{
- std::string m_ModelName;
- std::string m_ModelFilePath;
- std::vector<armnn::BackendId> m_backends;
-};
-
-using InferenceResult = std::vector<float>;
-using InferenceResults = std::vector<InferenceResult>;
-} \ No newline at end of file
diff --git a/samples/ObjectDetection/include/YoloResultDecoder.hpp b/samples/ObjectDetection/include/YoloResultDecoder.hpp
index 98435e3cc9..ae6cb5e710 100644
--- a/samples/ObjectDetection/include/YoloResultDecoder.hpp
+++ b/samples/ObjectDetection/include/YoloResultDecoder.hpp
@@ -26,9 +26,9 @@ public:
*/
YoloResultDecoder(float NMSThreshold, float ClsThreshold, float ObjectThreshold);
- DetectedObjects Decode(const InferenceResults& results,
- const Size& outputFrameSize,
- const Size& resizedFrameSize,
+ DetectedObjects Decode(const common::InferenceResults<float>& results,
+ const common::Size& outputFrameSize,
+ const common::Size& resizedFrameSize,
const std::vector <std::string>& labels) override;
private:
float m_NmsThreshold;