1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "SpaceToBatchNd.hpp"
#include <backendsCommon/DataLayoutIndexed.hpp>
namespace armnn
{
unsigned int GetOffset(const TensorShape& shape,
unsigned int b,
unsigned int h,
unsigned int w,
unsigned int c,
const DataLayoutIndexed& dataLayout)
{
if (dataLayout.GetDataLayout() == DataLayout::NHWC)
{
return ((b * shape[dataLayout.GetHeightIndex()] + h) * shape[dataLayout.GetWidthIndex()] + w) *
shape[dataLayout.GetChannelsIndex()] + c;
}
else
{
return ((b * shape[dataLayout.GetChannelsIndex()] + c) * shape[dataLayout.GetHeightIndex()] + h) *
shape[dataLayout.GetWidthIndex()] + w;
}
}
template<typename T>
void SpaceToBatchNd(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
const SpaceToBatchNdDescriptor& params,
const T* inputData,
T* outputData)
{
DataLayoutIndexed dataLayout = params.m_DataLayout;
const TensorShape& inputShape = inputInfo.GetShape();
const TensorShape& outputShape = outputInfo.GetShape();
const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()];
const unsigned int inputBatchSize = inputShape[0];
const unsigned int inputHeight = inputShape[dataLayout.GetHeightIndex()];
const unsigned int inputWidth = inputShape[dataLayout.GetWidthIndex()];
const unsigned int outputBatchSize = outputShape[0];
const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
const unsigned int blockHeight = params.m_BlockShape[0];
const unsigned int blockWidth = params.m_BlockShape[1];
const unsigned int paddingTop = params.m_PadList[0].first;
const unsigned int paddingLeft = params.m_PadList[1].first;
for (unsigned int outB = 0; outB < outputBatchSize; outB++)
{
unsigned int inB = outB % inputBatchSize;
unsigned int shiftW = (outB / inputBatchSize) % blockWidth;
unsigned int shiftH = (outB / inputBatchSize) / blockWidth;
for (unsigned int outH = 0; outH < outputHeight; outH++)
{
for (unsigned int outW = 0; outW < outputWidth; outW++)
{
if (outH * blockHeight + shiftH < paddingTop ||
outH * blockHeight + shiftH >= paddingTop + inputHeight ||
outW * blockWidth + shiftW < paddingLeft ||
outW * blockWidth + shiftW >= paddingLeft + inputWidth)
{
for (unsigned int c = 0; c < channels; c++)
{
unsigned int outOffset = GetOffset(outputShape,
outB,
outH,
outW,
c,
dataLayout);
outputData[outOffset] = 0;
}
}
else
{
for (unsigned int c = 0; c < channels; c++)
{
unsigned int inOffset = GetOffset(inputShape,
inB,
(outH * blockHeight + shiftH) - paddingTop,
(outW * blockWidth + shiftW) - paddingLeft,
c,
dataLayout);
unsigned int outOffset = GetOffset(outputShape,
outB,
outH,
outW,
c,
dataLayout);
outputData[outOffset] = inputData[inOffset];
}
}
}
}
}
}
template void SpaceToBatchNd<float>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
const SpaceToBatchNdDescriptor& params,
const float* inputData,
float* outData);
template void SpaceToBatchNd<uint8_t>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
const SpaceToBatchNdDescriptor& params,
const uint8_t* inputData,
uint8_t* outData);
} //namespace armnn
|