aboutsummaryrefslogtreecommitdiff
path: root/include/armnn/Types.hpp
blob: dd0fc57f65ecca520e33d05817a3414ca91070d1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once

#include <array>
#include <functional>
#include <memory>
#include "BackendId.hpp"
#include "Exceptions.hpp"

namespace armnn
{

constexpr unsigned int MaxNumOfTensorDimensions = 5U;

/// @enum Status enumeration
/// @var Status::Successful
/// @var Status::Failure
enum class Status
{
    Success = 0,
    Failure = 1
};

enum class DataType
{
    Float16 = 0,
    Float32 = 1,
    QuantisedAsymm8 = 2,
    Signed32 = 3,
    Boolean = 4,
    QuantisedSymm16 = 5,
    QuantizedSymm8PerAxis = 6,
    QuantisedSymm8 = 7
};

enum class DataLayout
{
    NCHW = 1,
    NHWC = 2
};

enum class ActivationFunction
{
    Sigmoid     = 0,
    TanH        = 1,
    Linear      = 2,
    ReLu        = 3,
    BoundedReLu = 4, ///< min(a, max(b, input))
    SoftReLu    = 5,
    LeakyReLu   = 6,
    Abs         = 7,
    Sqrt        = 8,
    Square      = 9
};

enum class PoolingAlgorithm
{
    Max     = 0,
    Average = 1,
    L2      = 2
};

enum class ResizeMethod
{
    Bilinear        = 0,
    NearestNeighbor = 1
};

///
/// The padding method modifies the output of pooling layers.
/// In both supported methods, the values are ignored (they are
/// not even zeroes, which would make a difference for max pooling
/// a tensor with negative values). The difference between
/// IgnoreValue and Exclude is that the former counts the padding
/// fields in the divisor of Average and L2 pooling, while
/// Exclude does not.
///
enum class PaddingMethod
{
    /// The padding fields count, but are ignored
    IgnoreValue = 0,
    /// The padding fields don't count and are ignored
    Exclude     = 1
};

enum class NormalizationAlgorithmChannel
{
    Across = 0,
    Within = 1
};

enum class NormalizationAlgorithmMethod
{
    /// Krichevsky 2012: Local Brightness Normalization
    LocalBrightness = 0,
    /// Jarret 2009: Local Contrast Normalization
    LocalContrast = 1
};

enum class OutputShapeRounding
{
    Floor       = 0,
    Ceiling     = 1
};

/// Each backend should implement an IBackend.
class IBackend
{
protected:
    IBackend() {}
    virtual ~IBackend() {}

public:
    virtual const BackendId& GetId() const = 0;
};

using IBackendSharedPtr = std::shared_ptr<IBackend>;
using IBackendUniquePtr = std::unique_ptr<IBackend, void(*)(IBackend* backend)>;

/// Device specific knowledge to be passed to the optimizer.
class IDeviceSpec
{
protected:
    IDeviceSpec() {}
    virtual ~IDeviceSpec() {}
public:
    virtual const BackendIdSet& GetSupportedBackends() const = 0;
};

/// Type of identifiers for bindable layers (inputs, outputs).
using LayerBindingId = int;

class PermutationVector
{
public:
    using ValueType = unsigned int;
    using SizeType = unsigned int;
    using ArrayType = std::array<ValueType, MaxNumOfTensorDimensions>;
    using ConstIterator = typename ArrayType::const_iterator;

    /// @param dimMappings - Indicates how to translate tensor elements from a given source into the target destination,
    /// when source and target potentially have different memory layouts.
    ///
    /// E.g. For a 4-d tensor laid out in a memory with the format (Batch Element, Height, Width, Channels),
    /// which is to be passed as an input to ArmNN, each source dimension is mapped to the corresponding
    /// ArmNN dimension. The Batch dimension remains the same (0 -> 0). The source Height dimension is mapped
    /// to the location of the ArmNN Height dimension (1 -> 2). Similar arguments are made for the Width and
    /// Channels (2 -> 3 and 3 -> 1). This will lead to @ref m_DimMappings pointing to the following array:
    /// [ 0, 2, 3, 1 ].
    ///
    /// Note that the mapping should be reversed if considering the case of ArmNN 4-d outputs (Batch Element,
    /// Channels, Height, Width) being written to a destination with the format mentioned above. We now have
    /// 0 -> 0, 2 -> 1, 3 -> 2, 1 -> 3, which, when reordered, lead to the following @ref m_DimMappings contents:
    /// [ 0, 3, 1, 2 ].
    ///
    PermutationVector(const ValueType *dimMappings, SizeType numDimMappings);

    PermutationVector(std::initializer_list<ValueType> dimMappings);

    ValueType operator[](SizeType i) const { return m_DimMappings.at(i); }

    SizeType GetSize() const { return m_NumDimMappings; }

    ConstIterator begin() const { return m_DimMappings.begin(); }
    ConstIterator end() const { return m_DimMappings.end(); }

    bool IsEqual(const PermutationVector& other) const
    {
        if (m_NumDimMappings != other.m_NumDimMappings) return false;
        for (unsigned int i = 0; i < m_NumDimMappings; ++i)
        {
            if (m_DimMappings[i] != other.m_DimMappings[i]) return false;
        }
        return true;
    }

    bool IsInverse(const PermutationVector& other) const
    {
        bool isInverse = (GetSize() == other.GetSize());
        for (SizeType i = 0; isInverse && (i < GetSize()); ++i)
        {
            isInverse = (m_DimMappings[other.m_DimMappings[i]] == i);
        }
        return isInverse;
    }

private:
    ArrayType m_DimMappings;
    /// Number of valid entries in @ref m_DimMappings
    SizeType m_NumDimMappings;
};

/// Define LayerGuid type.
using LayerGuid = unsigned int;

class ITensorHandle;

/// Define the type of callback for the Debug layer to call
/// @param guid - guid of layer connected to the input of the Debug layer
/// @param slotIndex - index of the output slot connected to the input of the Debug layer
/// @param tensorHandle - TensorHandle for the input tensor to the Debug layer
using DebugCallbackFunction = std::function<void(LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensorHandle)>;

} // namespace armnn