aboutsummaryrefslogtreecommitdiff
path: root/src/backends/backendsCommon/LayerSupportBase.cpp
blob: 69b980d56f72fcda1c035ca3e3a295e0b4e80fc7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

#include <armnn/Deprecated.hpp>
#include <armnn/Exceptions.hpp>
#include <armnn/Types.hpp>

#include <backendsCommon/LayerSupportBase.hpp>

#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/utility/PolymorphicDowncast.hpp>

namespace
{

bool DefaultLayerSupport(const char* func,
                         const char* file,
                         unsigned int line,
                         armnn::Optional<std::string&> reasonIfUnsupported)
{
    // NOTE: We only need to return the reason if the optional parameter is not empty
    if (reasonIfUnsupported)
    {
        std::stringstream message;
        message << func << " is not implemented [" << file << ":" << line << "]";

        reasonIfUnsupported.value() = message.str();
    }

    return false;
}

} // anonymous namespace

namespace armnn
{

bool LayerSupportBase::IsLayerSupported(const LayerType& type,
                                        const std::vector<TensorInfo>& infos,
                                        const BaseDescriptor& descriptor,
                                        const Optional<LstmInputParamsInfo>&,
                                        const Optional<QuantizedLstmInputParamsInfo>&,
                                        Optional<std::string&> reasonIfUnsupported) const
{
    switch(type)
    {
        case LayerType::MemCopy:
            return IsMemCopySupported(infos[0], infos[1], reasonIfUnsupported);
        case LayerType::MemImport:
            return IsMemImportSupported(infos[0], infos[1], reasonIfUnsupported);
        case LayerType::StandIn:
        {
            auto desc = *(PolymorphicDowncast<const StandInDescriptor*>(&descriptor));

            if (infos.size() != (desc.m_NumInputs + desc.m_NumOutputs))
            {
                throw InvalidArgumentException("Number of StandIn layer TensorInfos does not equal "
                                               "the combined number of input and output slots assigned "
                                               "to the StandIn descriptor");
            }

            std::vector<const TensorInfo*> inputInfos;
            for (uint32_t i = 0; i < desc.m_NumInputs; i++)
            {
                inputInfos.push_back(&infos[i]);
            }
            std::vector<const TensorInfo*> outputInfos;
            for (uint32_t i = desc.m_NumInputs; i < infos.size(); i++)
            {
                outputInfos.push_back(&infos[i]);
            }

            return IsStandInSupported(inputInfos,
                                      outputInfos,
                                      desc,
                                      reasonIfUnsupported);
        }
        default:
            return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
    }
}

bool LayerSupportBase::IsDetectionPostProcessSupported(const TensorInfo&, // boxEncodings
                                                       const TensorInfo&, // scores
                                                       const TensorInfo&, // anchors
                                                       const TensorInfo&, // detectionBoxes
                                                       const TensorInfo&, // detectionClasses
                                                       const TensorInfo&, // detectionScores
                                                       const TensorInfo&, // numDetections
                                                       const DetectionPostProcessDescriptor&, //descriptor
                                                       Optional<std::string&> reasonIfUnsupported) const
{
    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}

bool LayerSupportBase::IsMemCopySupported(const armnn::TensorInfo&, // input
                                          const armnn::TensorInfo&, // output
                                          armnn::Optional<std::string &> ) const // reasonIfUnsupported
{
    return true;
}

bool LayerSupportBase::IsMemImportSupported(const armnn::TensorInfo&, // input
                                            const armnn::TensorInfo&, // output
                                            armnn::Optional<std::string &> ) const // reasonIfUnsupported
{
    return true;
}

bool LayerSupportBase::IsMergeSupported(const TensorInfo&, // input0
                                        const TensorInfo&, // input1
                                        const TensorInfo&, // output
                                        Optional<std::string&> reasonIfUnsupported) const
{
    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}

bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo&, // input
                                                const TensorInfo&, // previousCellStateIn
                                                const TensorInfo&, // previousOutputIn
                                                const TensorInfo&, // cellStateOut
                                                const TensorInfo&, // output
                                                const QuantizedLstmInputParamsInfo&, // paramsInfo
                                                Optional<std::string&> reasonIfUnsupported) const
{
    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}

bool LayerSupportBase::IsShapeSupported(const TensorInfo&, // input
                                        const TensorInfo&, // output
                                        Optional<std::string&> reasonIfUnsupported) const
{
    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
}

bool LayerSupportBase::IsStandInSupported(const std::vector<const TensorInfo*>&, // inputs
                                          const std::vector<const TensorInfo*>&, // outputs
                                          const StandInDescriptor&, // descriptor
                                          Optional<std::string&> reasonIfUnsupported) const
{
    if (reasonIfUnsupported)
    {
        std::stringstream message;
        message << "StandIn layer is not executable via backends";

        reasonIfUnsupported.value() = message.str();
    }

    return false;
}

} // namespace armnn