aboutsummaryrefslogtreecommitdiff
path: root/Utils.hpp
blob: f68747b076a07cd39f9a94ba8e8176ff0f0c2524 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//

#pragma once
#include <armnn/ArmNN.hpp>

#include <CpuExecutor.h>
#include <HalInterfaces.h>
#include <NeuralNetworks.h>
#include <Utils.h>

#include <vector>
#include <string>
#include <fstream>
#include <iomanip>

namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;

#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
#endif

#ifdef ARMNN_ANDROID_NN_V1_3
namespace V1_3 = ::android::hardware::neuralnetworks::V1_3;
#endif

namespace armnn_driver
{

#ifdef ARMNN_ANDROID_R
using DataLocation = ::android::nn::hal::DataLocation;
#endif

inline const V1_0::Model&    getMainModel(const V1_0::Model& model) { return model; }
inline const V1_1::Model&    getMainModel(const V1_1::Model& model) { return model; }

#if defined (ARMNN_ANDROID_NN_V1_2) || defined (ARMNN_ANDROID_NN_V1_3)
inline const V1_2::Model&    getMainModel(const V1_2::Model& model) { return model; }
#endif

#ifdef ARMNN_ANDROID_NN_V1_3
inline const V1_3::Subgraph& getMainModel(const V1_3::Model& model) { return model.main; }
#endif

extern const armnn::PermutationVector g_DontPermute;

template <typename OperandType>
class UnsupportedOperand: public std::runtime_error
{
public:
    UnsupportedOperand(const OperandType type)
        : std::runtime_error("Operand type is unsupported")
        , m_type(type)
    {}

    OperandType m_type;
};

/// Swizzles tensor data in @a input according to the dimension mappings.
void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
                                     const armnn::PermutationVector& mappings);

/// Returns a pointer to a specific location in a pool
void* GetMemoryFromPool(V1_0::DataLocation location,
                        const std::vector<android::nn::RunTimePoolInfo>& memPools);

/// Can throw UnsupportedOperand
armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand);

#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand);
#endif

#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand);
#endif

std::string GetOperandSummary(const V1_0::Operand& operand);

#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
std::string GetOperandSummary(const V1_2::Operand& operand);
#endif

#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
std::string GetOperandSummary(const V1_3::Operand& operand);
#endif

template <typename HalModel>
std::string GetModelSummary(const HalModel& model)
{
    std::stringstream result;

    result << getMainModel(model).inputIndexes.size() << " input(s), "
           << getMainModel(model).operations.size() << " operation(s), "
           << getMainModel(model).outputIndexes.size() << " output(s), "
           << getMainModel(model).operands.size() << " operand(s) "
           << std::endl;

    result << "Inputs: ";
    for (uint32_t i = 0; i < getMainModel(model).inputIndexes.size(); i++)
    {
        result << GetOperandSummary(getMainModel(model).operands[getMainModel(model).inputIndexes[i]]) << ", ";
    }
    result << std::endl;

    result << "Operations: ";
    for (uint32_t i = 0; i < getMainModel(model).operations.size(); i++)
    {
        result << toString(getMainModel(model).operations[i].type).c_str() << ", ";
    }
    result << std::endl;

    result << "Outputs: ";
    for (uint32_t i = 0; i < getMainModel(model).outputIndexes.size(); i++)
    {
        result << GetOperandSummary(getMainModel(model).operands[getMainModel(model).outputIndexes[i]]) << ", ";
    }
    result << std::endl;

    return result.str();
}

void DumpTensor(const std::string& dumpDir,
                const std::string& requestName,
                const std::string& tensorName,
                const armnn::ConstTensor& tensor);

void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
                                 const std::string& dumpDir,
                                 armnn::NetworkId networkId,
                                 const armnn::IProfiler* profiler);

std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
                                        const std::string& dumpDir);

std::string SerializeNetwork(const armnn::INetwork& network, const std::string& dumpDir);

void RenameExportedFiles(const std::string& existingSerializedFileName,
                         const std::string& existingDotFileName,
                         const std::string& dumpDir,
                         const armnn::NetworkId networkId);

void RenameFile(const std::string& existingName,
                const std::string& extension,
                const std::string& dumpDir,
                const armnn::NetworkId networkId);

/// Checks if a tensor info represents a dynamic tensor
bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);

/// Checks for ArmNN support of dynamic tensors.
bool AreDynamicTensorsSupported(void);

std::string GetFileTimestamp();

#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
inline V1_2::OutputShape ComputeShape(const armnn::TensorInfo& info)
{
    V1_2::OutputShape shape;

    armnn::TensorShape tensorShape = info.GetShape();
    // Android will expect scalars as a zero dimensional tensor
    if(tensorShape.GetDimensionality() == armnn::Dimensionality::Scalar)
    {
         shape.dimensions = android::hardware::hidl_vec<uint32_t>{};
    }
    else
    {
        android::hardware::hidl_vec<uint32_t> dimensions;
        const unsigned int numDims = tensorShape.GetNumDimensions();
        dimensions.resize(numDims);
        for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
        {
            dimensions[outputIdx] = tensorShape[outputIdx];
        }
        shape.dimensions = dimensions;
    }

    shape.isSufficient = true;

    return shape;
}
#endif

void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);

} // namespace armnn_driver