aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork/ExecuteNetworkParams.cpp
blob: 4002e89eba9893082247b402205dcb1afed93c52 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
//
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include "ExecuteNetworkParams.hpp"

#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
#include <InferenceModel.hpp>
#include <armnn/Logging.hpp>

#include <fmt/format.h>

bool IsModelBinary(const std::string& modelFormat)
{
    // Parse model binary flag from the model-format string we got from the command-line
    if (modelFormat.find("binary") != std::string::npos)
    {
        return true;
    }
    else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
    {
        return false;
    }
    else
    {
        throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
                                                          "Please include 'binary' or 'text'",
                                                          modelFormat));
    }
}

void CheckModelFormat(const std::string& modelFormat)
{
    // Forward to implementation based on the parser type
    if (modelFormat.find("armnn") != std::string::npos)
    {
#if defined(ARMNN_SERIALIZER)
#else
        throw armnn::InvalidArgumentException("Can't run model in armnn format without a "
                                              "built with serialization support.");
#endif
    }
    else if (modelFormat.find("onnx") != std::string::npos)
    {
#if defined(ARMNN_ONNX_PARSER)
#else
        throw armnn::InvalidArgumentException("Can't run model in onnx format without a "
                                              "built with Onnx parser support.");
#endif
    }
    else if (modelFormat.find("tflite") != std::string::npos)
    {
#if defined(ARMNN_TF_LITE_PARSER)
        if (!IsModelBinary(modelFormat))
        {
            throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. Only 'binary' "
                                                              "format supported for tflite files",
                                                              modelFormat));
        }
#elif defined(ARMNN_TFLITE_DELEGATE)
#else
        throw armnn::InvalidArgumentException("Can't run model in tflite format without a "
                                              "built with Tensorflow Lite parser support.");
#endif
    }
    else
    {
        throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
                                                          "Please include 'tflite' or 'onnx'",
                                                          modelFormat));
    }
}

void CheckClTuningParameter(const int& tuningLevel,
                            const std::string& tuningPath,
                            const std::vector<armnn::BackendId> computeDevices)
{
    if (!tuningPath.empty())
    {
        if (tuningLevel == 0)
        {
            ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n";
            if (!ValidatePath(tuningPath, true))
            {
                throw armnn::InvalidArgumentException("The tuning path is not valid");
            }
        }
        else if ((1 <= tuningLevel) && (tuningLevel <= 3))
        {
            ARMNN_LOG(info) << "Starting execution to generate a cl tuning file: " << tuningPath << "\n"
                            << "Tuning level in use: " << tuningLevel << "\n";
        }
        else if ((0 < tuningLevel) || (tuningLevel > 3))
        {
            throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.",
                                                              tuningLevel));
        }

        // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted
        // Only warn if it's not enabled
        auto it = std::find(computeDevices.begin(), computeDevices.end(), "GpuAcc");
        if (it == computeDevices.end())
        {
            ARMNN_LOG(warning) << "To use Cl Tuning the compute device GpuAcc needs to be active.";
        }
    }

}

void ExecuteNetworkParams::ValidateParams()
{
    // Set to true if it is preferred to throw an exception rather than use ARMNN_LOG
    bool throwExc = false;

    try
    {
        if (m_DynamicBackendsPath == "")
        {
            // Check compute devices are valid unless they are dynamically loaded at runtime
            std::string invalidBackends;
            if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
            {
                ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
                                 << invalidBackends;
            }
        }

        CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices);

        if (m_EnableBf16TurboMode && m_EnableFp16TurboMode)
        {
            ARMNN_LOG(fatal) << "BFloat16 and Float16 turbo mode cannot be enabled at the same time.";
        }

        m_IsModelBinary = IsModelBinary(m_ModelFormat);

        CheckModelFormat(m_ModelFormat);

        // Check input tensor shapes
        if ((m_InputTensorShapes.size() != 0) &&
            (m_InputTensorShapes.size() != m_InputNames.size()))
        {
            ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements. ";
        }

        if (m_InputTensorDataFilePaths.size() != 0)
        {
            if (!ValidatePaths(m_InputTensorDataFilePaths, true))
            {
                ARMNN_LOG(fatal) << "One or more input data file paths are not valid. ";
            }

            if (!m_Concurrent && m_InputTensorDataFilePaths.size() != m_InputNames.size())
            {
                ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements. ";
            }

            if (m_InputTensorDataFilePaths.size() < m_SimultaneousIterations * m_InputNames.size())
            {
                ARMNN_LOG(fatal) << "There is not enough input data for " << m_SimultaneousIterations << " execution.";
            }
            if (m_InputTensorDataFilePaths.size() > m_SimultaneousIterations * m_InputNames.size())
            {
                ARMNN_LOG(fatal) << "There is more input data for " << m_SimultaneousIterations << " execution.";
            }
        }

        if ((m_OutputTensorFiles.size() != 0) &&
            (m_OutputTensorFiles.size() != m_OutputNames.size()))
        {
            ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements. ";
        }

        if ((m_OutputTensorFiles.size() != 0)
            && m_OutputTensorFiles.size() != m_SimultaneousIterations * m_OutputNames.size())
        {
            ARMNN_LOG(fatal) << "There is not enough output data for " << m_SimultaneousIterations << " execution.";
        }

        if (m_InputTypes.size() == 0)
        {
            //Defaults the value of all inputs to "float"
            m_InputTypes.assign(m_InputNames.size(), "float");
        }
        else if ((m_InputTypes.size() != 0) &&
                 (m_InputTypes.size() != m_InputNames.size()))
        {
            ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
        }

        if (m_OutputTypes.size() == 0)
        {
            //Defaults the value of all outputs to "float"
            m_OutputTypes.assign(m_OutputNames.size(), "float");
        }
        else if ((m_OutputTypes.size() != 0) &&
                 (m_OutputTypes.size() != m_OutputNames.size()))
        {
            ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
        }

        // Check that threshold time is not less than zero
        if (m_ThresholdTime < 0)
        {
            ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
        }
    }
    catch (std::string& exc)
    {
        if (throwExc)
        {
            throw armnn::InvalidArgumentException(exc);
        }
        else
        {
            std::cout << exc;
            exit(EXIT_FAILURE);
        }
    }
    // Check turbo modes

    // Warn if ExecuteNetwork will generate dummy input data
    if (m_GenerateTensorData)
    {
        ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
    }
}