ArmNN
 22.02
FoldPadTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "LayersFwd.hpp"
7 #include <Network.hpp>
8 #include <TestUtils.hpp>
9 #include <doctest/doctest.h>
11 #include <Optimizer.hpp>
12 
13 TEST_SUITE("Optimizer")
14 {
15 using namespace armnn;
16 using namespace armnn::optimizations;
17 
18 TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
19 {
20  Graph graph;
21  const unsigned int inputShape[] = {1, 2, 2, 3};
22  const unsigned int paddedShape[] = {1, 6, 6, 3};
23  const unsigned int weightsShape[] = {1, 2, 3, 3};
24  const unsigned int outputShape[] = {1, 2, 1, 1};
25 
26  TensorInfo inputInfo(4, inputShape, DataType::Float32);
27  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
28  TensorInfo outputInfo(4, outputShape, DataType::Float32);
29 
30  Layer* input = graph.AddLayer<InputLayer>(0, "input");
31  input->GetOutputSlot().SetTensorInfo(inputInfo);
32 
33  PadDescriptor padDescriptor({{0, 0},
34  {2, 2},
35  {2, 2},
36  {0, 0}});
37 
38  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
39  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
40 
41  Convolution2dDescriptor convolution2dDescriptor;
42  convolution2dDescriptor.m_BiasEnabled = false;
43  convolution2dDescriptor.m_StrideX = 1;
44  convolution2dDescriptor.m_StrideY = 1;
45  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
46 
47  std::vector<float> weightsVector(18);
48  ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
49 
50  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
51  conv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
52  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
53 
54  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
55 
56  // Connect up layers - input -> pad -> conv2d -> output
57  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
58  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
59  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
60 
61  auto checkSimpleConv2d = [](const Layer* const layer)->bool {
62  const auto conv2dLayer = static_cast<const Convolution2dLayer*>(layer);
63  const auto conv2dLayerParams = conv2dLayer->GetParameters();
64  return IsLayerOfType<Convolution2dLayer>(layer) && (layer->GetNameStr() == "conv2d") &&
65  (conv2dLayerParams.m_PadLeft == 0) && (conv2dLayerParams.m_PadRight == 0) &&
66  (conv2dLayerParams.m_PadTop == 0) && (conv2dLayerParams.m_PadBottom == 0) &&
67  (conv2dLayerParams.m_StrideX == 1) && (conv2dLayerParams.m_StrideY == 1) &&
68  (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
69  };
70 
71  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
72  &IsLayerOfType<InputLayer>,
73  &IsLayerOfType<PadLayer>,
74  checkSimpleConv2d,
75  &IsLayerOfType<OutputLayer>));
76 
78 
79  auto checkPadFoldedIntoConv2d = [](const Layer* const layer)->bool {
80  const auto conv2dLayer = static_cast<const Convolution2dLayer*>(layer);
81  const auto conv2dLayerParams = conv2dLayer->GetParameters();
82  return IsLayerOfType<Convolution2dLayer>(layer) && (layer->GetNameStr() == "folded-pad-into-conv2d") &&
83  (conv2dLayerParams.m_PadLeft == 2) && (conv2dLayerParams.m_PadRight == 2) &&
84  (conv2dLayerParams.m_PadTop == 2) && (conv2dLayerParams.m_PadBottom == 2) &&
85  (conv2dLayerParams.m_StrideX == 1) && (conv2dLayerParams.m_StrideY == 1) &&
86  (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
87  };
88 
89  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
90  &IsLayerOfType<InputLayer>,
91  checkPadFoldedIntoConv2d,
92  &IsLayerOfType<OutputLayer>));
93 }
94 
95 TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
96 {
97  Graph graph;
98  const unsigned int inputShape[] = {1, 2, 2, 3};
99  const unsigned int paddedShape[] = {1, 6, 6, 3};
100  const unsigned int weightsShape[] = {1, 2, 3, 3};
101  const unsigned int outputShape[] = {1, 2, 1, 3};
102 
103  TensorInfo inputInfo(4, inputShape, DataType::Float32);
104  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
105  TensorInfo outputInfo(4, outputShape, DataType::Float32);
106 
107  Layer* input = graph.AddLayer<InputLayer>(0, "input");
108  input->GetOutputSlot().SetTensorInfo(inputInfo);
109 
110  PadDescriptor padDescriptor({{0, 0},
111  {2, 2},
112  {2, 2},
113  {0, 0}});
114 
115  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
116  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
117 
118  DepthwiseConvolution2dDescriptor depthwiseConvolution2dDescriptor;
119  depthwiseConvolution2dDescriptor.m_BiasEnabled = false;
120  depthwiseConvolution2dDescriptor.m_StrideX = 1;
121  depthwiseConvolution2dDescriptor.m_StrideY = 1;
122  depthwiseConvolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
123 
124  std::vector<float> weightsVector(18);
125  ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true), weightsVector);
126 
127  auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor,
128  "depthwiseConv2d");
129  depthwiseConv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
130  depthwiseConv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
131 
132  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
133 
134  // Connect up layers - input -> pad -> depthwiseConv2d -> output
135  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
136  padLayer->GetOutputSlot().Connect(depthwiseConv2dLayer->GetInputSlot(0));
137  depthwiseConv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
138 
139  auto checkSimpleDepthwiseConv2d = [](const Layer* const layer)->bool {
140  const auto depthwiseConv2dLayer = static_cast<const DepthwiseConvolution2dLayer*>(layer);
141  const auto depthwiseConv2dLayerParams = depthwiseConv2dLayer->GetParameters();
142  return IsLayerOfType<DepthwiseConvolution2dLayer>(layer) && (layer->GetNameStr() == "depthwiseConv2d") &&
143  (depthwiseConv2dLayerParams.m_PadLeft == 0) && (depthwiseConv2dLayerParams.m_PadRight == 0) &&
144  (depthwiseConv2dLayerParams.m_PadTop == 0) && (depthwiseConv2dLayerParams.m_PadBottom == 0) &&
145  (depthwiseConv2dLayerParams.m_StrideX == 1) && (depthwiseConv2dLayerParams.m_StrideY == 1) &&
146  (depthwiseConv2dLayerParams.m_BiasEnabled == false) &&
147  (depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
148  };
149 
150  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
151  &IsLayerOfType<InputLayer>,
152  &IsLayerOfType<PadLayer>,
153  checkSimpleDepthwiseConv2d,
154  &IsLayerOfType<OutputLayer>));
155 
157 
158  auto checkPadFoldedIntoDepthwiseConv2d = [](const Layer* const layer)->bool {
159  const auto depthwiseConv2dLayer = static_cast<const DepthwiseConvolution2dLayer*>(layer);
160  const auto depthwiseConv2dLayerParams = depthwiseConv2dLayer->GetParameters();
161  return IsLayerOfType<DepthwiseConvolution2dLayer>(layer) &&
162  (layer->GetNameStr() == "folded-pad-into-depthwiseConv2d") &&
163  (depthwiseConv2dLayerParams.m_PadLeft == 2) && (depthwiseConv2dLayerParams.m_PadRight == 2) &&
164  (depthwiseConv2dLayerParams.m_PadTop == 2) && (depthwiseConv2dLayerParams.m_PadBottom == 2) &&
165  (depthwiseConv2dLayerParams.m_StrideX == 1) && (depthwiseConv2dLayerParams.m_StrideY == 1) &&
166  (depthwiseConv2dLayerParams.m_BiasEnabled == false) &&
167  (depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
168  };
169 
170  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
171  &IsLayerOfType<InputLayer>,
172  checkPadFoldedIntoDepthwiseConv2d,
173  &IsLayerOfType<OutputLayer>));
174 }
175 
176 TEST_CASE("FoldPadLayerIntoPooling2dLayer")
177 {
178  Graph graph;
179  const unsigned int inputShape[] = {1, 2, 2, 3};
180  const unsigned int paddedShape[] = {1, 4, 4, 3};
181  const unsigned int outputShape[] = {1, 2, 2, 3};
182 
183  TensorInfo inputInfo(4, inputShape, DataType::Float32);
184  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
185  TensorInfo outputInfo(4, outputShape, DataType::Float32);
186 
187  Layer* input = graph.AddLayer<InputLayer>(0, "input");
188  input->GetOutputSlot().SetTensorInfo(inputInfo);
189 
190  PadDescriptor padDescriptor({{0, 0},
191  {1, 1},
192  {1, 1},
193  {0, 0}});
194 
195  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
196  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
197 
198  Pooling2dDescriptor pooling2dDescriptor;
199  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
200  pooling2dDescriptor.m_PoolWidth = 3;
201  pooling2dDescriptor.m_PoolHeight = 3;
202  pooling2dDescriptor.m_StrideX = 1;
203  pooling2dDescriptor.m_StrideY = 1;
204  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
205 
206  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
207  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
208 
209  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
210 
211  // Connect up layers - input -> pad -> pool2d -> output
212  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
213  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
214  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
215 
216  auto checkSimplePool2d = [&](const Layer* const layer) {
217  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
218  return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
219  (pool2dLayer->GetParameters() == pooling2dDescriptor);
220  };
221 
222  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
223  &IsLayerOfType<InputLayer>,
224  &IsLayerOfType<PadLayer>,
225  checkSimplePool2d,
226  &IsLayerOfType<OutputLayer>));
227 
229 
230  auto checkPadFoldedIntoPool2d = [&](const Layer* const layer) {
231  if (!IsLayerOfType<Pooling2dLayer>(layer) || (layer->GetNameStr() != "folded-pad-into-pool2d"))
232  {
233  return false;
234  }
235 
236  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
237  const Pooling2dDescriptor pool2dLayerParams = pool2dLayer->GetParameters();
238 
239  Pooling2dDescriptor pool2dLayerParamsNoPad = pool2dLayerParams;
240  pool2dLayerParamsNoPad.m_PadLeft = 0;
241  pool2dLayerParamsNoPad.m_PadRight = 0;
242  pool2dLayerParamsNoPad.m_PadTop = 0;
243  pool2dLayerParamsNoPad.m_PadBottom = 0;
244  // If we fold then PaddingMethod will be set to Ignore. The original will be Exclude.
245  pool2dLayerParamsNoPad.m_PaddingMethod = PaddingMethod::Exclude;
246 
247  return (pool2dLayerParamsNoPad == pooling2dDescriptor) && (pool2dLayerParams.m_PadLeft == 1) &&
248  (pool2dLayerParams.m_PadRight == 1) && (pool2dLayerParams.m_PadTop == 1) &&
249  (pool2dLayerParams.m_PadBottom == 1) && (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue);
250  };
251 
252  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
253  &IsLayerOfType<InputLayer>,
254  checkPadFoldedIntoPool2d,
255  &IsLayerOfType<OutputLayer>));
256 }
257 
258 TEST_CASE("FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized")
259 {
260  // In this test case we'll setup a pad layer with two outputs. One goes to a polling layers and the other
261  // goes to an output layer. FoldPadLayerIntoPooling2d should not optimize this graph as it uses the
262  // OptimizeForExclusiveConnection method.
263  Graph graph;
264  const unsigned int inputShape[] = {1, 2, 2, 3};
265  const unsigned int paddedShape[] = {1, 4, 4, 3};
266  const unsigned int outputShape[] = {1, 2, 2, 3};
267 
268  TensorInfo inputInfo(4, inputShape, DataType::Float32);
269  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
270  TensorInfo outputInfo(4, outputShape, DataType::Float32);
271 
272  Layer* input = graph.AddLayer<InputLayer>(0, "input");
273  input->GetOutputSlot().SetTensorInfo(inputInfo);
274 
275  PadDescriptor padDescriptor({{0, 0},
276  {1, 1},
277  {1, 1},
278  {0, 0}});
279 
280  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
281  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
282 
283  Pooling2dDescriptor pooling2dDescriptor;
284  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
285  pooling2dDescriptor.m_PoolWidth = 3;
286  pooling2dDescriptor.m_PoolHeight = 3;
287  pooling2dDescriptor.m_StrideX = 1;
288  pooling2dDescriptor.m_StrideY = 1;
289  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
290 
291  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
292  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
293 
294  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
295 
296  // Connect up layers - input -> pad -> pool2d -> output
297  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
298  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
299  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
300 
301  // Add the alternative branch from the pas layer to an output layer.
302  Layer* secondOutput = graph.AddLayer<OutputLayer>(1, "dummy output");
303  padLayer->GetOutputSlot().Connect(secondOutput->GetInputSlot(0));
304 
305  auto checkSimplePool2d = [&](const Layer* const layer) {
306  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
307  return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
308  (pool2dLayer->GetParameters() == pooling2dDescriptor);
309  };
310 
311  // Initial sequence.
312  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
313  &IsLayerOfType<InputLayer>,
314  &IsLayerOfType<PadLayer>,
315  checkSimplePool2d,
316  &IsLayerOfType<OutputLayer>,
317  &IsLayerOfType<OutputLayer>));
318 
320 
321  // The network should not change.
322  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
323  &IsLayerOfType<InputLayer>,
324  &IsLayerOfType<PadLayer>,
325  checkSimplePool2d,
326  &IsLayerOfType<OutputLayer>,
327  &IsLayerOfType<OutputLayer>));
328 }
329 
330 TEST_CASE("FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding")
331 {
332  // In this test setup input, Pad layer, Pooling layer that includes padding, output layer. The optimization
333  // should not work as the pooling layer already includes and existing pad and specifies PaddingMethod::Exclude.
334  Graph graph;
335  const unsigned int inputShape[] = {1, 2, 2, 3};
336  const unsigned int paddedShape[] = {1, 4, 4, 3};
337  const unsigned int outputShape[] = {1, 2, 2, 3};
338 
339  TensorInfo inputInfo(4, inputShape, DataType::Float32);
340  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
341  TensorInfo outputInfo(4, outputShape, DataType::Float32);
342 
343  Layer* input = graph.AddLayer<InputLayer>(0, "input");
344  input->GetOutputSlot().SetTensorInfo(inputInfo);
345 
346  PadDescriptor padDescriptor({{0, 0},
347  {1, 1},
348  {1, 1},
349  {0, 0}});
350 
351  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
352  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
353 
354  Pooling2dDescriptor pooling2dDescriptor;
355  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
356  pooling2dDescriptor.m_PoolWidth = 3;
357  pooling2dDescriptor.m_PoolHeight = 3;
358  pooling2dDescriptor.m_StrideX = 1;
359  pooling2dDescriptor.m_StrideY = 1;
360  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
361  // Include a pad with the pooling layer. This should prevent the optimization working.
362  pooling2dDescriptor.m_PadLeft = 1;
363  pooling2dDescriptor.m_PadRight = 1;
364  pooling2dDescriptor.m_PadTop = 1;
365  pooling2dDescriptor.m_PadBottom = 1;
366  pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
367 
368  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
369  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
370 
371  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
372 
373  // Connect up layers - input -> pad -> pool2d -> output
374  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
375  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
376  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
377 
378  auto checkSimplePool2d = [&](const Layer* const layer) {
379  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
380  return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
381  (pool2dLayer->GetParameters() == pooling2dDescriptor);
382  };
383 
384  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
385  &IsLayerOfType<InputLayer>,
386  &IsLayerOfType<PadLayer>,
387  checkSimplePool2d,
388  &IsLayerOfType<OutputLayer>));
389 
391 
392  // The optimization should not have modified the graph.
393  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
394  &IsLayerOfType<InputLayer>,
395  &IsLayerOfType<PadLayer>,
396  checkSimplePool2d,
397  &IsLayerOfType<OutputLayer>));
398 }
399 
400 TEST_CASE("FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded")
401 {
402  // In this test setup input, Pad layer with a large pad value, Max Pooling layer, output layer. The optimization
403  // should not work as the pad value will modify the result of the max pooling layer.
404  Graph graph;
405  const unsigned int inputShape[] = {1, 2, 2, 3};
406  const unsigned int paddedShape[] = {1, 4, 4, 3};
407  const unsigned int outputShape[] = {1, 2, 2, 3};
408 
409  TensorInfo inputInfo(4, inputShape, DataType::Float32);
410  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
411  TensorInfo outputInfo(4, outputShape, DataType::Float32);
412 
413  Layer* input = graph.AddLayer<InputLayer>(0, "input");
414  input->GetOutputSlot().SetTensorInfo(inputInfo);
415 
416  PadDescriptor padDescriptor({{0, 0},
417  {1, 1},
418  {1, 1},
419  {0, 0}});
420  // For Max pooling of a float a pad value of 0 is more than enough to stop the fold happening.
421  // Set this to -std::numeric_limits<float>::infinity() to make the fold happen.
422  padDescriptor.m_PadValue = 0;
423 
424  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
425  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
426 
427  Pooling2dDescriptor pooling2dDescriptor;
428  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max;
429  pooling2dDescriptor.m_PoolWidth = 3;
430  pooling2dDescriptor.m_PoolHeight = 3;
431  pooling2dDescriptor.m_StrideX = 1;
432  pooling2dDescriptor.m_StrideY = 1;
433  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
434 
435  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
436  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
437 
438  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
439 
440  // Connect up layers - input -> pad -> pool2d -> output
441  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
442  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
443  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
444 
445  auto checkSimplePool2d = [&](const Layer* const layer) {
446  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
447  return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
448  (pool2dLayer->GetParameters() == pooling2dDescriptor);
449  };
450 
451  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
452  &IsLayerOfType<InputLayer>,
453  &IsLayerOfType<PadLayer>,
454  checkSimplePool2d,
455  &IsLayerOfType<OutputLayer>));
456 
458 
459  // The optimization should not have modified the graph.
460  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
461  &IsLayerOfType<InputLayer>,
462  &IsLayerOfType<PadLayer>,
463  checkSimplePool2d,
464  &IsLayerOfType<OutputLayer>));
465 }
466 
467 #if defined(ARMNNREF_ENABLED)
468 TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization")
469 {
470  // The idea of this test to run a simple pad+pool2d network twice. Once
471  // with FoldPadLayerIntoPooling2dLayer enabled and a second time with it
472  // avoided. The output tensors of each should match.
473  const unsigned int inputShape[] = {1, 4, 4, 2};
474  const unsigned int paddedShape[] = {1, 6, 6, 2};
475  const unsigned int outputShape[] = {1, 4, 4, 2};
476  std::vector<float> inputData({2.0f, 2.0f, 6.0f, 6.0f,
477  4.0f, 4.0f, 8.0f, 8.0f,
478  10.0f, 12.0f, 14.0f, 16.0f,
479  10.0f, 12.0f, 16.0f, 14.0f,
480 
481  18.0f, 20.0f, 24.0f, 22.0f,
482  20.0f, 18.0f, 22.0f, 24.0f,
483  26.0f, 28.0f, 0.0f, 0.0f,
484  26.0f, 28.0f, 0.0f, 0.0f,
485  });
486  try
487  {
488  // Create a network of input, pad, pooling 2D, output.
489  INetworkPtr network = INetwork::Create();
490 
491  IConnectableLayer* inputLayer = network->AddInputLayer(0);
492  TensorInfo inputInfo(4, inputShape, DataType::Float32);
493  inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
494 
495  PadDescriptor padDescriptor({{0, 0},
496  {1, 1},
497  {1, 1},
498  {0, 0}});
499  IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "Pad");
500  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
501  padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo);
502 
503  Pooling2dDescriptor pooling2dDescriptor;
504  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
505  pooling2dDescriptor.m_PoolWidth = 3;
506  pooling2dDescriptor.m_PoolHeight = 3;
507  pooling2dDescriptor.m_StrideX = 1;
508  pooling2dDescriptor.m_StrideY = 1;
509  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
510  IConnectableLayer* pool2dLayer = network->AddPooling2dLayer(pooling2dDescriptor, "Pool2D");
511  TensorInfo outputInfo(4, outputShape, DataType::Float32);
512  pool2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
513 
514  IConnectableLayer* outputLayer = network->AddOutputLayer(0);
515 
516  // Connect layers
517  inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
518  padLayer->GetOutputSlot(0).Connect(pool2dLayer->GetInputSlot(0));
519  pool2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
520 
521  // Create ArmNN runtime
522  IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
523  // Optimise the network
524  IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
525  // Load network into runtime
526  NetworkId networkIdentifier;
527  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
528 
529  TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
530  inputTensorInfo.SetConstant(true);
531  InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
532 
533  // Set the initial values of the data to different values to the golden data just in case the inference fails.
534  std::vector<float> optimizedData(32, -std::numeric_limits<float>::infinity());
535  OutputTensors outputTensors{{0, Tensor(outputInfo, optimizedData.data())}};
536  // Execute network
537  run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
538  // Unload it.
539  run->UnloadNetwork(networkIdentifier);
540 
541  // In this second case the pad will have two outputs, one connected to the pooling layer the second connected to
542  // a second output layer. This will prevent the FoldPadLayerIntoPooling2dLayer optimization from working.
543  // A previous test, FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized, has proved that doing
544  // this will avoid the optimization.
545  IConnectableLayer* dummyOutputLayer = network->AddOutputLayer(1);
546  padLayer->GetOutputSlot(0).Connect(dummyOutputLayer->GetInputSlot(0));
547 
548  // Optimize and load and execute it a second time.
549  optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
550  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
551  std::vector<float> goldenData(32, 0.0f);
552  std::vector<float> padOutputData(72, 0.0f);
553  OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
554  {1, Tensor(paddedInfo, padOutputData.data())}};
555  run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
556 
557  // Now we can compare goldenData against optimizedData. They should be the same.
558  CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
559  }
560  catch (const std::exception& e)
561  {
562  std::cerr << e.what() << std::endl;
563  ARMNN_ASSERT_MSG(false, e.what());
564  }
565 }
566 
567 TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
568 {
569  // The idea of this test to run a simple pad+conv2d network twice. Once
570  // with FoldPadLayerIntoConv2dLayer enabled and a second time with it
571  // avoided. The output tensors of each should match.
572  const unsigned int inputShape[] = {1, 4, 4, 3}; // NHWCin
573  const unsigned int paddedShape[] = {1, 6, 6, 3};
574  const unsigned int weightsShape[] = {4, 2, 2, 3}; // CoutHWCin
575  const unsigned int outputShape[] = {1, 5, 5, 4}; // NHWCout
576 
577  std::vector<float> inputData({2.0f, 2.0f, 6.0f, 6.0f,
578  4.0f, 4.0f, 8.0f, 8.0f,
579  10.0f, 12.0f, 14.0f, 16.0f,
580  10.0f, 12.0f, 16.0f, 14.0f,
581 
582  18.0f, 20.0f, 24.0f, 22.0f,
583  20.0f, 18.0f, 22.0f, 24.0f,
584  26.0f, 28.0f, 0.0f, 0.0f,
585  26.0f, 28.0f, 0.0f, 0.0f,
586 
587  2.0f, 2.0f, 6.0f, 6.0f,
588  4.0f, 4.0f, 8.0f, 8.0f,
589  10.0f, 12.0f, 14.0f, 16.0f,
590  10.0f, 12.0f, 16.0f, 14.0f,
591  });
592  try
593  {
594  // Create a network of input, pad, pooling 2D, output.
595  INetworkPtr network = INetwork::Create();
596 
597  IConnectableLayer* inputLayer = network->AddInputLayer(0);
598  TensorInfo inputInfo(4, inputShape, DataType::Float32);
599  inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
600 
601  PadDescriptor padDescriptor({{0, 0},
602  {1, 1},
603  {1, 1},
604  {0, 0}});
605  IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "Pad");
606  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
607  padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo);
608 
609  Convolution2dDescriptor convDescriptor;
610  convDescriptor.m_DataLayout = DataLayout::NHWC;
611  convDescriptor.m_StrideX = 1;
612  convDescriptor.m_StrideY = 1;
613  convDescriptor.m_BiasEnabled = true;
614 
615  std::vector<float> weightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
616  11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
617  21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
618  31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
619  TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true);
620  ConstTensor weights(weightsInfo, weightsData);
621  std::vector<float> biasVector = {5, 6, 7, 8};
622  TensorInfo biasInfo({4}, DataType::Float32, 0.0f, 0, true);
623  ConstTensor bias(biasInfo, biasVector);
624  Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
625 
626  IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor,
627  weights,
628  optionalBias,
629  "Conv2D");
630 
631  TensorInfo outputInfo(4, outputShape, DataType::Float32);
632  conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
633 
634  IConnectableLayer* outputLayer = network->AddOutputLayer(0);
635 
636  // Connect layers
637  inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
638  padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0));
639  conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
640 
641  // Create ArmNN runtime
642  IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
643  // Optimise the network
644  IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
645  // Load network into runtime
646  NetworkId networkIdentifier;
647  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
648 
649  TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
650  inputTensorInfo.SetConstant(true);
651  InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
652 
653  // Set the initial values of the data to different values to the golden data just in case the inference fails.
654  std::vector<float> optimizedData(100, -std::numeric_limits<float>::infinity());
655  OutputTensors outputTensors{{0, Tensor(outputInfo, optimizedData.data())}};
656  // Execute network
657  run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
658  // Unload it.
659  run->UnloadNetwork(networkIdentifier);
660 
661  // In this second case the pad will have two outputs, one connected to the conv layer the second connected to
662  // a second output layer. This will prevent the FoldPadLayerIntoConv2dLayer optimization from working.
663  // A previous test, FoldPadLayerIntoConv2d_PadWithMultipleOutputsShouldNotBeOptimized, has proved that doing
664  // this will avoid the optimization.
665  IConnectableLayer* dummyOutputLayer = network->AddOutputLayer(1);
666  padLayer->GetOutputSlot(0).Connect(dummyOutputLayer->GetInputSlot(0));
667 
668  // Optimize and load and execute it a second time.
669  optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
670  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
671  std::vector<float> goldenData(100, 0.0f);
672  std::vector<float> padOutputData(108, 0.0f);
673  OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
674  {1, Tensor(paddedInfo, padOutputData.data())}};
675  run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
676 
677  // Now we can compare goldenData against optimizedData. They should be the same.
678  CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
679  }
680  catch (const std::exception& e)
681  {
682  std::cerr << e.what() << std::endl;
683  ARMNN_ASSERT_MSG(false, e.what());
684  }
685 }
686 
687 TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
688 {
689  // The idea of this test to run a simple pad+depthwiseconv2d network twice. Once
690  // with FoldPadLayerIntoDeptwiseConv2dLayer enabled and a second time with it
691  // avoided. The output tensors of each should match.
692  const unsigned int inputShape[] = {1, 4, 4, 3}; // NHWCin
693  const unsigned int paddedShape[] = {1, 6, 6, 3};
694  const unsigned int weightsShape[] = {1, 2, 2, 12}; // 1HWCout
695  const unsigned int outputShape[] = {1, 5, 5, 12}; // NHWCout
696 
697  std::vector<float> inputData({2.0f, 2.0f, 6.0f, 6.0f,
698  4.0f, 4.0f, 8.0f, 8.0f,
699  10.0f, 12.0f, 14.0f, 16.0f,
700  10.0f, 12.0f, 16.0f, 14.0f,
701 
702  18.0f, 20.0f, 24.0f, 22.0f,
703  20.0f, 18.0f, 22.0f, 24.0f,
704  26.0f, 28.0f, 0.0f, 0.0f,
705  26.0f, 28.0f, 0.0f, 0.0f,
706 
707  2.0f, 2.0f, 6.0f, 6.0f,
708  4.0f, 4.0f, 8.0f, 8.0f,
709  10.0f, 12.0f, 14.0f, 16.0f,
710  10.0f, 12.0f, 16.0f, 14.0f,
711  });
712  try
713  {
714  // Create a network of input, pad, pooling 2D, output.
715  INetworkPtr network = INetwork::Create();
716 
717  IConnectableLayer* inputLayer = network->AddInputLayer(0);
718  TensorInfo inputInfo(4, inputShape, DataType::Float32);
719  inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
720 
721  PadDescriptor padDescriptor({{0, 0},
722  {1, 1},
723  {1, 1},
724  {0, 0}});
725  IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "Pad");
726  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
727  padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo);
728 
729  DepthwiseConvolution2dDescriptor convDescriptor;
730  convDescriptor.m_DataLayout = DataLayout::NHWC;
731  convDescriptor.m_StrideX = 1;
732  convDescriptor.m_StrideY = 1;
733  convDescriptor.m_BiasEnabled = true;
734 
735  std::vector<float> weightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
736  11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
737  21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
738  31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
739  TensorInfo weightsInfo(4, weightsShape, DataType::Float32, 0.0f, 0, true);
740  ConstTensor weights(weightsInfo, weightsData);
741  std::vector<float> biasVector = {5, 6, 7, 8, 9, 10, 11, 12, 5, 6, 7, 8};
742  TensorInfo biasInfo({12}, DataType::Float32, 0.0f, 0, true);
743  ConstTensor bias(biasInfo, biasVector);
744  Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
745 
746  IConnectableLayer* conv2dLayer = network->AddDepthwiseConvolution2dLayer(convDescriptor,
747  weights,
748  optionalBias,
749  "DepthwiseConv2D");
750 
751  TensorInfo outputInfo(4, outputShape, DataType::Float32);
752  conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
753 
754  IConnectableLayer* outputLayer = network->AddOutputLayer(0);
755 
756  // Connect layers
757  inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
758  padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0));
759  conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
760 
761  // Create ArmNN runtime
762  IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
763  // Optimise the network
764  IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
765  // Load network into runtime
766  NetworkId networkIdentifier;
767  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
768 
769  TensorInfo inputTensorInfo = run->GetInputTensorInfo(networkIdentifier, 0);
770  inputTensorInfo.SetConstant(true);
771  InputTensors inputTensors{{0, ConstTensor(inputTensorInfo, inputData.data())}};
772 
773  // Set the initial values of the data to different values to the golden data just in case the inference fails.
774  std::vector<float> optimizedData(300, -std::numeric_limits<float>::infinity());
775  OutputTensors outputTensors{{0, Tensor(outputInfo, optimizedData.data())}};
776  // Execute network
777  run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
778  // Unload it.
779  run->UnloadNetwork(networkIdentifier);
780 
781  // In this second case the pad will have two outputs, one connected to the conv layer the second connected to
782  // a second output layer. This will prevent the FoldPadLayerIntoDepthwiseConv2dLayer optimization from working.
783  // A previous test, FoldPadLayerIntoDepthwiseConv2d_PadWithMultipleOutputsShouldNotBeOptimized, has proved that
784  // doing this will avoid the optimization.
785  IConnectableLayer* dummyOutputLayer = network->AddOutputLayer(1);
786  padLayer->GetOutputSlot(0).Connect(dummyOutputLayer->GetInputSlot(0));
787 
788  // Optimize and load and execute it a second time.
789  optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
790  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
791  std::vector<float> goldenData(300, 0.0f);
792  std::vector<float> padOutputData(108, 0.0f);
793  OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
794  {1, Tensor(paddedInfo, padOutputData.data())}};
795  run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
796 
797  // Now we can compare goldenData against optimizedData. They should be the same.
798  CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
799  }
800  catch (const std::exception& e)
801  {
802  std::cerr << e.what() << std::endl;
803  ARMNN_ASSERT_MSG(false, e.what());
804  }
805 }
806 #endif
807 
808 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:40
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:66
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
CPU Execution: Reference C++ kernels.
uint32_t m_PadLeft
Padding left value in the width dimension.
OptimizeForExclusiveConnection< PadLayer, Convolution2dLayer, pad_fold::FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
OptimizeForExclusiveConnection< PadLayer, DepthwiseConvolution2dLayer, pad_fold::FoldPadIntoDepthwiseConvolution2dImpl > FoldPadIntoDepthwiseConvolution2d
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:420
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:177
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:86
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:31
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:392
Copyright (c) 2021 ARM Limited and Contributors.
const Parameters & GetParameters() const override
If the layer has a descriptor return it.
This layer represents a pad operation.
Definition: PadLayer.hpp:14
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
uint32_t m_PoolHeight
Pooling height value.
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:321
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1680
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
int NetworkId
Definition: IRuntime.hpp:25
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:393
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:242
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
The padding fields count, but are ignored.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:61
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
void SetConstant(const bool IsConstant=true)
Marks the data corresponding to this tensor info as constant.
Definition: Tensor.cpp:516
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:323
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:179
This layer represents a convolution 2d operation.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:241
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
TEST_SUITE("Optimizer")
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:492
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.