26 case PoolingAlgorithm::Max:
28 return std::numeric_limits<float>::lowest();
30 case PoolingAlgorithm::Average:
31 case PoolingAlgorithm::L2:
42 using Accumulator = std::function<void(float & accu, float value)>;
48 case PoolingAlgorithm::Max:
50 return [](
float & accu,
float value) {
57 case PoolingAlgorithm::Average:
59 return [](
float & accu,
float value) {
64 case PoolingAlgorithm::L2:
66 return [](
float & accu,
float value) {
67 accu += (value*value);
78 using Executor = std::function<void(float & accumulated, float kernelSize)>;
84 case PoolingAlgorithm::Max:
86 return [](
float & ,
float ) {};
89 case PoolingAlgorithm::Average:
91 return [](
float & accumulated,
float kernelSize) {
92 accumulated /= kernelSize;
96 case PoolingAlgorithm::L2:
98 return [](
float & accumulated,
float kernelSize) {
99 accumulated = sqrtf(accumulated / kernelSize);
110 bool OnPaddingOnly(
int start,
int end,
int maxRange)
112 if (end <= 0 || start > maxRange)
123 bool ClampRange(
int & start,
int & end,
int maxRange)
125 if (start < 0 || end > maxRange)
127 start = std::min(std::max(start, 0), maxRange);
128 end = std::min(std::max(end, 0), maxRange);
168 float defaultInitializer = DefaultInitializer(params.
m_PoolType);
170 Accumulator accumulate = GetAccumulator(params.
m_PoolType);
171 Executor execute = GetExecutor(params.
m_PoolType);
183 for (
int n = 0; n < batchSize; n++)
185 for (
int c = 0; c < channels; c++)
187 for (
int yOutput = 0; yOutput < heightOutput; yOutput++)
190 int hstart = (yOutput * strideY) - padTop;
191 int hend = hstart + poolHeight;
194 hend = std::min(hend, heightInput + padBottom);
196 int height = hend - hstart;
197 bool hclamped = ClampRange(hstart, hend, heightInput);
199 for (
int xOutput = 0; xOutput < widthOutput; xOutput++)
201 int wstart = (xOutput * strideX) - padLeft;
202 int wend = wstart + poolWidth;
206 wend = std::min(wend, widthInput + padRight);
208 float result = defaultInitializer;
216 if (OnPaddingOnly(hstart, hend, heightInput) ||
217 OnPaddingOnly(wstart, wend, widthInput))
225 outputIndex = n * heightOutput * widthOutput * channels +
226 yOutput * widthOutput * channels +
232 outputIndex = n * heightOutput * widthOutput * channels +
233 c * heightOutput * widthOutput +
234 yOutput * widthOutput +
238 rOutputEncoder[
static_cast<unsigned int>(outputIndex)];
239 rOutputEncoder.
Set(result);
243 bool clamped = hclamped |= ClampRange(wstart, wend, widthInput);
252 for (
auto yInput = hstart; yInput < hend; yInput++)
254 for (
auto xInput = wstart; xInput < wend; xInput++)
260 inputIndex = n * heightInput * widthInput * channels +
261 yInput * widthInput * channels +
268 inputIndex = n * heightInput * widthInput * channels +
269 c * heightInput * widthInput +
270 yInput * widthInput +
274 accumulate(result, decodedInputVec[static_cast<unsigned int>(inputIndex)]);
278 execute(result, poolAreaSize);
284 outputIndex = n * heightOutput * widthOutput * channels +
285 yOutput * widthOutput * channels +
291 outputIndex = n * heightOutput * widthOutput * channels +
292 c * heightOutput * widthOutput +
293 yOutput * widthOutput +
297 rOutputEncoder[
static_cast<unsigned int>(outputIndex)];
298 rOutputEncoder.
Set(result);
uint32_t m_PadBottom
Padding bottom value in the height dimension.
unsigned int GetWidthIndex() const
const TensorShape & GetShape() const
uint32_t m_PadLeft
Padding left value in the width dimension.
uint32_t m_PoolWidth
Pooling width value.
virtual std::vector< float > DecodeTensor(const TensorShape &tensorShape, bool isDepthwise=false)=0
virtual void Set(IType right)=0
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
Copyright (c) 2021 ARM Limited and Contributors.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
unsigned int GetHeightIndex() const
uint32_t m_PoolHeight
Pooling height value.
uint32_t m_PadRight
Padding right value in the width dimension.
Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout...
armnn::DataLayout GetDataLayout() const
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
std::enable_if_t< std::is_unsigned< Source >::value &&std::is_unsigned< Dest >::value, Dest > numeric_cast(Source source)
A Pooling2dDescriptor for the Pooling2dLayer.
void Pooling2d(Decoder< float > &rInputDecoder, Encoder< float > &rOutputEncoder, const TensorInfo &inputInfo, const TensorInfo &outputInfo, const Pooling2dDescriptor ¶ms)
Computes the Pooling2d operation.
unsigned int GetChannelsIndex() const
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.