aboutsummaryrefslogtreecommitdiff
path: root/tests/ExecuteNetwork/ExecuteNetworkParams.hpp
blob: ffcb4f482cc38b3b4230bd5171c32dd8a36e62ab (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
//
// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once

#include <armnn/BackendId.hpp>
#include <armnn/Tensor.hpp>

#if defined(ARMNN_TFLITE_DELEGATE)
#include <DelegateOptions.hpp>
#endif

/// Holds all parameters necessary to execute a network
/// Check ExecuteNetworkProgramOptions.cpp for a description of each parameter
struct ExecuteNetworkParams
{
    enum class TfLiteExecutor
    {
        ArmNNTfLiteParser,
        ArmNNTfLiteDelegate,
        TfliteInterpreter,
        ArmNNTfLiteOpaqueDelegate,
    };

    bool                              m_AllowExpandedDims;
    std::string                       m_CachedNetworkFilePath;
    std::vector<armnn::BackendId>     m_ComputeDevices;
    bool                              m_Concurrent;
    bool                              m_DequantizeOutput;
    std::string                       m_DynamicBackendsPath;
    bool                              m_EnableBf16TurboMode;
    bool                              m_EnableFastMath = false;
    bool                              m_EnableFp16TurboMode;
    bool                              m_EnableLayerDetails = false;
    bool                              m_EnableProfiling;
    bool                              m_GenerateTensorData;
    bool                              m_InferOutputShape = false;
    bool                              m_EnableDelegate = false;
    bool                              m_IsModelBinary;
    std::vector<std::string>          m_InputNames;
    std::vector<std::string>          m_InputTensorDataFilePaths;
    std::vector<armnn::TensorShape>   m_InputTensorShapes;
    size_t                            m_Iterations;
    std::string                       m_ModelPath;
    unsigned int                      m_NumberOfThreads;
    bool                              m_OutputDetailsToStdOut;
    bool                              m_OutputDetailsOnlyToStdOut;
    std::vector<std::string>          m_OutputNames;
    std::vector<std::string>          m_OutputTensorFiles;
    bool                              m_ParseUnsupported = false;
    bool                              m_PrintIntermediate;
    bool                              m_PrintIntermediateOutputsToFile;
    bool                              m_DontPrintOutputs;
    bool                              m_QuantizeInput;
    bool                              m_SaveCachedNetwork;
    size_t                            m_SubgraphId;
    double                            m_ThresholdTime;
    int                               m_TuningLevel;
    std::string                       m_TuningPath;
    std::string                       m_MLGOTuningFilePath;
    TfLiteExecutor                    m_TfLiteExecutor;
    size_t                            m_ThreadPoolSize;
    bool                              m_ImportInputsIfAligned;
    bool                              m_ReuseBuffers;
    std::string                       m_ComparisonFile;
    std::vector<armnn::BackendId>     m_ComparisonComputeDevices;
    bool                              m_CompareWithTflite;
    // Ensures that the parameters for ExecuteNetwork fit together
    void ValidateParams();

#if defined(ARMNN_TFLITE_DELEGATE)
    /// A utility method that populates a DelegateOptions object from this ExecuteNetworkParams.
    armnnDelegate::DelegateOptions ToDelegateOptions() const;
#endif

};