1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// See LICENSE file in the project root for full license information.
//
#pragma once
#include "ArmnnDriver.hpp"
#include <NeuralNetworks.h>
#include <armnn/ArmNN.hpp>
#include <CpuExecutor.h>
#include <vector>
#include <string>
namespace armnn_driver
{
extern const armnn::PermutationVector g_DontPermute;
class UnsupportedOperand: public std::runtime_error
{
public:
UnsupportedOperand(const OperandType type)
: std::runtime_error("Operand type is unsupported")
, m_type(type)
{}
OperandType m_type;
};
/// Swizzles tensor data in @a input according to the dimension mappings.
void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
const armnn::PermutationVector& mappings);
/// Returns a pointer to a specific location in a pool
void* GetMemoryFromPool(DataLocation location,
const std::vector<android::nn::RunTimePoolInfo>& memPools);
/// Can throw UnsupportedOperand
armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand);
std::string GetOperandSummary(const Operand& operand);
template <typename Model>
std::string GetModelSummary(const Model& model)
{
std::stringstream result;
result << model.inputIndexes.size() << " input(s), " << model.operations.size() << " operation(s), " <<
model.outputIndexes.size() << " output(s), " << model.operands.size() << " operand(s)" << std::endl;
result << "Inputs: ";
for (uint32_t i = 0; i < model.inputIndexes.size(); i++)
{
result << GetOperandSummary(model.operands[model.inputIndexes[i]]) << ", ";
}
result << std::endl;
result << "Operations: ";
for (uint32_t i = 0; i < model.operations.size(); i++)
{
result << toString(model.operations[i].type).c_str() << ", ";
}
result << std::endl;
result << "Outputs: ";
for (uint32_t i = 0; i < model.outputIndexes.size(); i++)
{
result << GetOperandSummary(model.operands[model.outputIndexes[i]]) << ", ";
}
result << std::endl;
return result.str();
}
void DumpTensor(const std::string& dumpDir,
const std::string& requestName,
const std::string& tensorName,
const armnn::ConstTensor& tensor);
void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
const std::string& dumpDir,
armnn::NetworkId networkId,
const armnn::IProfiler* profiler);
void ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
const std::string& dumpDir,
const ::android::hardware::neuralnetworks::V1_0::Model& model);
}
|