aboutsummaryrefslogtreecommitdiff
path: root/samples/ObjectDetection/include/ArmnnNetworkExecutor.hpp
blob: c75b68bbe1e51454da3f4756b1536ca9e7a19120 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
//
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include "Types.hpp"

#include "armnn/ArmNN.hpp"
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include "armnnUtils/DataLayoutIndexed.hpp"
#include <armnn/Logging.hpp>

#include <string>
#include <vector>

namespace od
{
/**
* @brief Used to load in a network through ArmNN and run inference on it against a given backend.
*
*/
class ArmnnNetworkExecutor
{
private:
    armnn::IRuntimePtr m_Runtime;
    armnn::NetworkId m_NetId{};
    mutable InferenceResults m_OutputBuffer;
    armnn::InputTensors     m_InputTensors;
    armnn::OutputTensors    m_OutputTensors;
    std::vector<armnnTfLiteParser::BindingPointInfo> m_outputBindingInfo;

    std::vector<std::string> m_outputLayerNamesList;

    armnnTfLiteParser::BindingPointInfo m_inputBindingInfo;

    void PrepareTensors(const void* inputData, const size_t dataBytes);

    template <typename Enumeration>
    auto log_as_int(Enumeration value)
    -> typename std::underlying_type<Enumeration>::type
    {
        return static_cast<typename std::underlying_type<Enumeration>::type>(value);
    }

public:
    ArmnnNetworkExecutor() = delete;

    /**
    * @brief Initializes the network with the given input data. Parsed through TfLiteParser and optimized for a
    *        given backend.
    *
    * Note that the output layers names order in m_outputLayerNamesList affects the order of the feature vectors
    * in output of the Run method.
    *
    *       * @param[in] modelPath - Relative path to the model file
    *       * @param[in] backends - The list of preferred backends to run inference on
    */
    ArmnnNetworkExecutor(std::string& modelPath,
                         std::vector<armnn::BackendId>& backends);

    /**
    * @brief Returns the aspect ratio of the associated model in the order of width, height.
    */
    Size GetImageAspectRatio();

    armnn::DataType GetInputDataType() const;

    /**
    * @brief Runs inference on the provided input data, and stores the results in the provided InferenceResults object.
    *
    * @param[in] inputData - input frame data
    * @param[in] dataBytes - input data size in bytes
    * @param[out] results - Vector of DetectionResult objects used to store the output result.
    */
    bool Run(const void* inputData, const size_t dataBytes, InferenceResults& outResults);

};
}// namespace od