1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "TensorUtils.hpp"
#include <backendsCommon/ITensorHandle.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnnUtils
{
armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
unsigned int numberOfChannels,
unsigned int height,
unsigned int width,
const armnn::DataLayout dataLayout)
{
switch (dataLayout)
{
case armnn::DataLayout::NCHW:
return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
case armnn::DataLayout::NHWC:
return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
default:
throw armnn::InvalidArgumentException("Unknown data layout ["
+ std::to_string(static_cast<int>(dataLayout)) +
"]", CHECK_LOCATION());
}
}
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
unsigned int numberOfChannels,
unsigned int height,
unsigned int width,
const armnn::DataLayout dataLayout,
const armnn::DataType dataType)
{
switch (dataLayout)
{
case armnn::DataLayout::NCHW:
return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, dataType);
case armnn::DataLayout::NHWC:
return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, dataType);
default:
throw armnn::InvalidArgumentException("Unknown data layout ["
+ std::to_string(static_cast<int>(dataLayout)) +
"]", CHECK_LOCATION());
}
}
std::pair<float, float> FindMinMax(armnn::ITensorHandle* tensorHandle)
{
auto tensor_data = static_cast<const float *>(tensorHandle->Map(true));
auto tensor_size = tensorHandle->GetShape().GetNumElements();
// Set min/max initially to first value in tensor
float min = tensor_data[0];
float max = tensor_data[0];
// Loop over rest of tensor and update min/max if necessary
for (unsigned int val = 1; val < tensor_size; val++)
{
if (tensor_data[val] < min)
{
min = tensor_data[val];
}
else if (tensor_data[val] > max)
{
max = tensor_data[val];
}
}
tensorHandle->Unmap();
return std::make_pair(min, max);
}
armnn::TensorShape ExpandDims(const armnn::TensorShape& tensorShape, int axis)
{
unsigned int outputDim = tensorShape.GetNumDimensions() + 1;
if (axis < -boost::numeric_cast<int>(outputDim) || axis > boost::numeric_cast<int>(tensorShape.GetNumDimensions()))
{
throw armnn::InvalidArgumentException(
boost::str(boost::format("Invalid expansion axis %1% for %2%D input tensor. %3%") %
axis %
tensorShape.GetNumDimensions() %
CHECK_LOCATION().AsString()));
}
if (axis < 0)
{
axis = boost::numeric_cast<int>(outputDim) + axis;
}
std::vector<unsigned int> outputShape;
for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
{
outputShape.push_back(tensorShape[i]);
}
outputShape.insert(outputShape.begin() + axis, 1);
return armnn::TensorShape(outputDim, outputShape.data());
}
}
|