aboutsummaryrefslogtreecommitdiff
path: root/ModelToINetworkConverter.hpp
blob: 7ced514b1593a1e9679e9df1d750d598a8e5ccf9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// See LICENSE file in the project root for full license information.
//

#pragma once

#include "HalInterfaces.h"
#include "NeuralNetworks.h"
#include "ActivationFunctor.h"

#include <armnn/ArmNN.hpp>
#include <armnn/INetwork.hpp>
#include <CpuExecutor.h>

#include "Utils.hpp"

#include <memory>
#include <vector>
#include <set>

namespace armnn_driver
{

class ConstTensorPin;
class LayerInputHandle;

enum class ConversionResult
{
    Success,
    ErrorMappingPools,
    UnsupportedFeature
};

// A helper performing the conversion from an AndroidNN driver Model representation,
// to an armnn::INetwork object
class ModelToINetworkConverter
{
public:
    ModelToINetworkConverter(armnn::Compute compute, const Model& model,
        const std::set<unsigned int>& forcedUnsupportedOperations);

    ConversionResult GetConversionResult() const { return m_ConversionResult; }

    // Returns the ArmNN INetwork corresponding to the input model, if preparation went smoothly, nullptr otherwise.
    armnn::INetwork* GetINetwork() const { return m_Network.get(); }

    bool IsOperationSupported(uint32_t operationIndex) const;

private:
    void Convert();

    bool ConvertOperation(const Operation& operation);

    bool ConvertAdd(const Operation& operation);

    bool ConvertAveragePool2d(const Operation& operation);

    bool ConvertConcatenation(const Operation& operation);

    bool ConvertConv2d(const Operation& operation);

    bool ConvertDepthwiseConv2d(const Operation& operation);

    bool ConvertFloor(const Operation& operation);

    bool ConvertFullyConnected(const Operation& operation);

    bool ConvertLogistic(const Operation& operation);

    bool ConvertLocalResponseNormalization(const Operation& operation);

    bool ConvertL2Normalization(const Operation& operation);

    bool ConvertL2Pool2d(const Operation& operation);

    bool ConvertMaxPool2d(const Operation& operation);

    bool ConvertMul(const Operation& operation);

    bool ConvertReLu(const Operation& operation);

    bool ConvertReLu1(const Operation& operation);

    bool ConvertReLu6(const Operation& operation);

    bool ConvertSoftmax(const Operation& operation);

    bool ConvertTanH(const Operation& operation);

    bool ConvertReshape(const Operation& operation);

    bool ConvertResizeBilinear(const Operation& operation);

    bool ConvertToActivation(const Operation& operation, const char* operationName,
        const armnn::ActivationDescriptor& activationDesc);

    bool ConvertPooling2d(const Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);


    const void* GetOperandValueReadOnlyAddress(const Operand& operand) const;

    const Operand* GetInputOperand(const Operation& operation, uint32_t inputIndex) const;

    const Operand* GetOutputOperand(const Operation& operation, uint32_t outputIndex) const;

    template<typename T>
    bool GetInputScalar(const Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;

    bool GetInputInt32(const Operation& operation, uint32_t inputIndex, int32_t& outValue) const;

    bool GetInputFloat32(const Operation& operation, uint32_t inputIndex, float& outValue) const;

    bool GetInputActivationFunction(const Operation& operation, uint32_t inputIndex,
        ActivationFn& outActivationFunction) const;

    bool GetInputPaddingScheme(const Operation& operation, uint32_t inputIndex,
        android::nn::PaddingScheme& outPaddingScheme) const;

    LayerInputHandle ConvertToLayerInputHandle(const Operation& operation, uint32_t inputIndex);

    ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation& operation, uint32_t inputIndex,
        const armnn::PermutationVector& dimensionMappings = g_DontPermute,
        const armnn::TensorShape* overrideTensorShape = nullptr);

    ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
        const armnn::PermutationVector& dimensionMappings = g_DontPermute,
        const armnn::TensorShape* overrideTensorShape = nullptr);

    bool GetTensorInt32Values(const Operand& operand, std::vector<int32_t>& outValues) const;


    armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo, ActivationFn activation,
                                                armnn::IConnectableLayer* prevLayer);


    bool SetupAndTrackLayerOutputSlot(const Operation& operation, uint32_t outputIndex,
                                      armnn::IConnectableLayer& layer);


    // Input data
    armnn::Compute                    m_Compute;
    const Model&                      m_Model;
    const std::set<unsigned int>&     m_ForcedUnsupportedOperations;

    // Output data
    armnn::INetworkPtr                m_Network;
    ConversionResult                  m_ConversionResult;
    std::map<uint32_t, bool>          m_OperationSupported;

    // Working/intermediate data
    std::vector<armnn::IOutputSlot*>  m_OutputSlotForOperand;
    std::vector<android::nn::RunTimePoolInfo> m_MemPools;
};

} // armnn_driver