ArmNN
 21.08
FoldPadTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "LayersFwd.hpp"
7 #include <Network.hpp>
8 #include <test/TestUtils.hpp>
9 #include <doctest/doctest.h>
11 #include <Optimizer.hpp>
12 
13 TEST_SUITE("Optimizer")
14 {
15 using namespace armnn;
16 using namespace armnn::optimizations;
17 
18 TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
19 {
20  Graph graph;
21  const unsigned int inputShape[] = {1, 2, 2, 3};
22  const unsigned int paddedShape[] = {1, 6, 6, 3};
23  const unsigned int weightsShape[] = {1, 2, 3, 3};
24  const unsigned int outputShape[] = {1, 2, 1, 1};
25 
26  TensorInfo inputInfo(4, inputShape, DataType::Float32);
27  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
28  TensorInfo outputInfo(4, outputShape, DataType::Float32);
29 
30  Layer* input = graph.AddLayer<InputLayer>(0, "input");
31  input->GetOutputSlot().SetTensorInfo(inputInfo);
32 
33  PadDescriptor padDescriptor({{0, 0},
34  {2, 2},
35  {2, 2},
36  {0, 0}});
37 
38  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
39  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
40 
41  Convolution2dDescriptor convolution2dDescriptor;
42  convolution2dDescriptor.m_BiasEnabled = false;
43  convolution2dDescriptor.m_StrideX = 1;
44  convolution2dDescriptor.m_StrideY = 1;
45  convolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
46 
47  std::vector<float> weightsVector(18);
48  ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32), weightsVector);
49 
50  Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d");
51  conv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
52  conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
53 
54  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
55 
56  // Connect up layers - input -> pad -> conv2d -> output
57  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
58  padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0));
59  conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
60 
61  auto checkSimpleConv2d = [](const Layer* const layer)->bool {
62  const auto conv2dLayer = static_cast<const Convolution2dLayer*>(layer);
63  const auto conv2dLayerParams = conv2dLayer->GetParameters();
64  return IsLayerOfType<Convolution2dLayer>(layer) && (layer->GetNameStr() == "conv2d") &&
65  (conv2dLayerParams.m_PadLeft == 0) && (conv2dLayerParams.m_PadRight == 0) &&
66  (conv2dLayerParams.m_PadTop == 0) && (conv2dLayerParams.m_PadBottom == 0) &&
67  (conv2dLayerParams.m_StrideX == 1) && (conv2dLayerParams.m_StrideY == 1) &&
68  (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
69  };
70 
71  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
72  &IsLayerOfType<InputLayer>,
73  &IsLayerOfType<PadLayer>,
74  checkSimpleConv2d,
75  &IsLayerOfType<OutputLayer>));
76 
78 
79  auto checkPadFoldedIntoConv2d = [](const Layer* const layer)->bool {
80  const auto conv2dLayer = static_cast<const Convolution2dLayer*>(layer);
81  const auto conv2dLayerParams = conv2dLayer->GetParameters();
82  return IsLayerOfType<Convolution2dLayer>(layer) && (layer->GetNameStr() == "folded-pad-into-conv2d") &&
83  (conv2dLayerParams.m_PadLeft == 2) && (conv2dLayerParams.m_PadRight == 2) &&
84  (conv2dLayerParams.m_PadTop == 2) && (conv2dLayerParams.m_PadBottom == 2) &&
85  (conv2dLayerParams.m_StrideX == 1) && (conv2dLayerParams.m_StrideY == 1) &&
86  (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
87  };
88 
89  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
90  &IsLayerOfType<InputLayer>,
91  checkPadFoldedIntoConv2d,
92  &IsLayerOfType<OutputLayer>));
93 }
94 
95 TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
96 {
97  Graph graph;
98  const unsigned int inputShape[] = {1, 2, 2, 3};
99  const unsigned int paddedShape[] = {1, 6, 6, 3};
100  const unsigned int weightsShape[] = {1, 2, 3, 3};
101  const unsigned int outputShape[] = {1, 2, 1, 3};
102 
103  TensorInfo inputInfo(4, inputShape, DataType::Float32);
104  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
105  TensorInfo outputInfo(4, outputShape, DataType::Float32);
106 
107  Layer* input = graph.AddLayer<InputLayer>(0, "input");
108  input->GetOutputSlot().SetTensorInfo(inputInfo);
109 
110  PadDescriptor padDescriptor({{0, 0},
111  {2, 2},
112  {2, 2},
113  {0, 0}});
114 
115  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
116  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
117 
118  DepthwiseConvolution2dDescriptor depthwiseConvolution2dDescriptor;
119  depthwiseConvolution2dDescriptor.m_BiasEnabled = false;
120  depthwiseConvolution2dDescriptor.m_StrideX = 1;
121  depthwiseConvolution2dDescriptor.m_StrideY = 1;
122  depthwiseConvolution2dDescriptor.m_DataLayout = DataLayout::NHWC;
123 
124  std::vector<float> weightsVector(18);
125  ConstTensor weights(TensorInfo(4, weightsShape, DataType::Float32), weightsVector);
126 
127  auto* depthwiseConv2dLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor,
128  "depthwiseConv2d");
129  depthwiseConv2dLayer->m_Weight = std::make_unique<ScopedTensorHandle>(weights);
130  depthwiseConv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
131 
132  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
133 
134  // Connect up layers - input -> pad -> depthwiseConv2d -> output
135  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
136  padLayer->GetOutputSlot().Connect(depthwiseConv2dLayer->GetInputSlot(0));
137  depthwiseConv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
138 
139  auto checkSimpleDepthwiseConv2d = [](const Layer* const layer)->bool {
140  const auto depthwiseConv2dLayer = static_cast<const DepthwiseConvolution2dLayer*>(layer);
141  const auto depthwiseConv2dLayerParams = depthwiseConv2dLayer->GetParameters();
142  return IsLayerOfType<DepthwiseConvolution2dLayer>(layer) && (layer->GetNameStr() == "depthwiseConv2d") &&
143  (depthwiseConv2dLayerParams.m_PadLeft == 0) && (depthwiseConv2dLayerParams.m_PadRight == 0) &&
144  (depthwiseConv2dLayerParams.m_PadTop == 0) && (depthwiseConv2dLayerParams.m_PadBottom == 0) &&
145  (depthwiseConv2dLayerParams.m_StrideX == 1) && (depthwiseConv2dLayerParams.m_StrideY == 1) &&
146  (depthwiseConv2dLayerParams.m_BiasEnabled == false) &&
147  (depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
148  };
149 
150  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
151  &IsLayerOfType<InputLayer>,
152  &IsLayerOfType<PadLayer>,
153  checkSimpleDepthwiseConv2d,
154  &IsLayerOfType<OutputLayer>));
155 
157 
158  auto checkPadFoldedIntoDepthwiseConv2d = [](const Layer* const layer)->bool {
159  const auto depthwiseConv2dLayer = static_cast<const DepthwiseConvolution2dLayer*>(layer);
160  const auto depthwiseConv2dLayerParams = depthwiseConv2dLayer->GetParameters();
161  return IsLayerOfType<DepthwiseConvolution2dLayer>(layer) &&
162  (layer->GetNameStr() == "folded-pad-into-depthwiseConv2d") &&
163  (depthwiseConv2dLayerParams.m_PadLeft == 2) && (depthwiseConv2dLayerParams.m_PadRight == 2) &&
164  (depthwiseConv2dLayerParams.m_PadTop == 2) && (depthwiseConv2dLayerParams.m_PadBottom == 2) &&
165  (depthwiseConv2dLayerParams.m_StrideX == 1) && (depthwiseConv2dLayerParams.m_StrideY == 1) &&
166  (depthwiseConv2dLayerParams.m_BiasEnabled == false) &&
167  (depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
168  };
169 
170  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
171  &IsLayerOfType<InputLayer>,
172  checkPadFoldedIntoDepthwiseConv2d,
173  &IsLayerOfType<OutputLayer>));
174 }
175 
176 TEST_CASE("FoldPadLayerIntoPooling2dLayer")
177 {
178  Graph graph;
179  const unsigned int inputShape[] = {1, 2, 2, 3};
180  const unsigned int paddedShape[] = {1, 4, 4, 3};
181  const unsigned int outputShape[] = {1, 2, 2, 3};
182 
183  TensorInfo inputInfo(4, inputShape, DataType::Float32);
184  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
185  TensorInfo outputInfo(4, outputShape, DataType::Float32);
186 
187  Layer* input = graph.AddLayer<InputLayer>(0, "input");
188  input->GetOutputSlot().SetTensorInfo(inputInfo);
189 
190  PadDescriptor padDescriptor({{0, 0},
191  {1, 1},
192  {1, 1},
193  {0, 0}});
194 
195  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
196  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
197 
198  Pooling2dDescriptor pooling2dDescriptor;
199  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
200  pooling2dDescriptor.m_PoolWidth = 3;
201  pooling2dDescriptor.m_PoolHeight = 3;
202  pooling2dDescriptor.m_StrideX = 1;
203  pooling2dDescriptor.m_StrideY = 1;
204  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
205 
206  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
207  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
208 
209  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
210 
211  // Connect up layers - input -> pad -> pool2d -> output
212  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
213  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
214  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
215 
216  auto checkSimplePool2d = [&](const Layer* const layer) {
217  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
218  return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
219  (pool2dLayer->GetParameters() == pooling2dDescriptor);
220  };
221 
222  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
223  &IsLayerOfType<InputLayer>,
224  &IsLayerOfType<PadLayer>,
225  checkSimplePool2d,
226  &IsLayerOfType<OutputLayer>));
227 
229 
230  auto checkPadFoldedIntoPool2d = [&](const Layer* const layer) {
231  if (!IsLayerOfType<Pooling2dLayer>(layer) || (layer->GetNameStr() != "folded-pad-into-pool2d"))
232  {
233  return false;
234  }
235 
236  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
237  const Pooling2dDescriptor pool2dLayerParams = pool2dLayer->GetParameters();
238 
239  Pooling2dDescriptor pool2dLayerParamsNoPad = pool2dLayerParams;
240  pool2dLayerParamsNoPad.m_PadLeft = 0;
241  pool2dLayerParamsNoPad.m_PadRight = 0;
242  pool2dLayerParamsNoPad.m_PadTop = 0;
243  pool2dLayerParamsNoPad.m_PadBottom = 0;
244  // If we fold then PaddingMethod will be set to Ignore. The original will be Exclude.
245  pool2dLayerParamsNoPad.m_PaddingMethod = PaddingMethod::Exclude;
246 
247  return (pool2dLayerParamsNoPad == pooling2dDescriptor) && (pool2dLayerParams.m_PadLeft == 1) &&
248  (pool2dLayerParams.m_PadRight == 1) && (pool2dLayerParams.m_PadTop == 1) &&
249  (pool2dLayerParams.m_PadBottom == 1) && (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue);
250  };
251 
252  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
253  &IsLayerOfType<InputLayer>,
254  checkPadFoldedIntoPool2d,
255  &IsLayerOfType<OutputLayer>));
256 }
257 
258 TEST_CASE("FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized")
259 {
260  // In this test case we'll setup a pad layer with two outputs. One goes to a polling layers and the other
261  // goes to an output layer. FoldPadLayerIntoPooling2d should not optimize this graph as it uses the
262  // OptimizeForExclusiveConnection method.
263  Graph graph;
264  const unsigned int inputShape[] = {1, 2, 2, 3};
265  const unsigned int paddedShape[] = {1, 4, 4, 3};
266  const unsigned int outputShape[] = {1, 2, 2, 3};
267 
268  TensorInfo inputInfo(4, inputShape, DataType::Float32);
269  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
270  TensorInfo outputInfo(4, outputShape, DataType::Float32);
271 
272  Layer* input = graph.AddLayer<InputLayer>(0, "input");
273  input->GetOutputSlot().SetTensorInfo(inputInfo);
274 
275  PadDescriptor padDescriptor({{0, 0},
276  {1, 1},
277  {1, 1},
278  {0, 0}});
279 
280  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
281  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
282 
283  Pooling2dDescriptor pooling2dDescriptor;
284  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
285  pooling2dDescriptor.m_PoolWidth = 3;
286  pooling2dDescriptor.m_PoolHeight = 3;
287  pooling2dDescriptor.m_StrideX = 1;
288  pooling2dDescriptor.m_StrideY = 1;
289  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
290 
291  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
292  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
293 
294  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
295 
296  // Connect up layers - input -> pad -> pool2d -> output
297  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
298  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
299  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
300 
301  // Add the alternative branch from the pas layer to an output layer.
302  Layer* secondOutput = graph.AddLayer<OutputLayer>(1, "dummy output");
303  padLayer->GetOutputSlot().Connect(secondOutput->GetInputSlot(0));
304 
305  auto checkSimplePool2d = [&](const Layer* const layer) {
306  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
307  return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
308  (pool2dLayer->GetParameters() == pooling2dDescriptor);
309  };
310 
311  // Initial sequence.
312  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
313  &IsLayerOfType<InputLayer>,
314  &IsLayerOfType<PadLayer>,
315  checkSimplePool2d,
316  &IsLayerOfType<OutputLayer>,
317  &IsLayerOfType<OutputLayer>));
318 
320 
321  // The network should not change.
322  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
323  &IsLayerOfType<InputLayer>,
324  &IsLayerOfType<PadLayer>,
325  checkSimplePool2d,
326  &IsLayerOfType<OutputLayer>,
327  &IsLayerOfType<OutputLayer>));
328 }
329 
330 TEST_CASE("FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding")
331 {
332  // In this test setup input, Pad layer, Pooling layer that includes padding, output layer. The optimization
333  // should not work as the pooling layer already includes and existing pad and specifies PaddingMethod::Exclude.
334  Graph graph;
335  const unsigned int inputShape[] = {1, 2, 2, 3};
336  const unsigned int paddedShape[] = {1, 4, 4, 3};
337  const unsigned int outputShape[] = {1, 2, 2, 3};
338 
339  TensorInfo inputInfo(4, inputShape, DataType::Float32);
340  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
341  TensorInfo outputInfo(4, outputShape, DataType::Float32);
342 
343  Layer* input = graph.AddLayer<InputLayer>(0, "input");
344  input->GetOutputSlot().SetTensorInfo(inputInfo);
345 
346  PadDescriptor padDescriptor({{0, 0},
347  {1, 1},
348  {1, 1},
349  {0, 0}});
350 
351  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
352  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
353 
354  Pooling2dDescriptor pooling2dDescriptor;
355  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
356  pooling2dDescriptor.m_PoolWidth = 3;
357  pooling2dDescriptor.m_PoolHeight = 3;
358  pooling2dDescriptor.m_StrideX = 1;
359  pooling2dDescriptor.m_StrideY = 1;
360  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
361  // Include a pad with the pooling layer. This should prevent the optimization working.
362  pooling2dDescriptor.m_PadLeft = 1;
363  pooling2dDescriptor.m_PadRight = 1;
364  pooling2dDescriptor.m_PadTop = 1;
365  pooling2dDescriptor.m_PadBottom = 1;
366  pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude;
367 
368  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
369  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
370 
371  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
372 
373  // Connect up layers - input -> pad -> pool2d -> output
374  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
375  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
376  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
377 
378  auto checkSimplePool2d = [&](const Layer* const layer) {
379  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
380  return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
381  (pool2dLayer->GetParameters() == pooling2dDescriptor);
382  };
383 
384  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
385  &IsLayerOfType<InputLayer>,
386  &IsLayerOfType<PadLayer>,
387  checkSimplePool2d,
388  &IsLayerOfType<OutputLayer>));
389 
391 
392  // The optimization should not have modified the graph.
393  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
394  &IsLayerOfType<InputLayer>,
395  &IsLayerOfType<PadLayer>,
396  checkSimplePool2d,
397  &IsLayerOfType<OutputLayer>));
398 }
399 
400 TEST_CASE("FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded")
401 {
402  // In this test setup input, Pad layer with a large pad value, Max Pooling layer, output layer. The optimization
403  // should not work as the pad value will modify the result of the max pooling layer.
404  Graph graph;
405  const unsigned int inputShape[] = {1, 2, 2, 3};
406  const unsigned int paddedShape[] = {1, 4, 4, 3};
407  const unsigned int outputShape[] = {1, 2, 2, 3};
408 
409  TensorInfo inputInfo(4, inputShape, DataType::Float32);
410  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
411  TensorInfo outputInfo(4, outputShape, DataType::Float32);
412 
413  Layer* input = graph.AddLayer<InputLayer>(0, "input");
414  input->GetOutputSlot().SetTensorInfo(inputInfo);
415 
416  PadDescriptor padDescriptor({{0, 0},
417  {1, 1},
418  {1, 1},
419  {0, 0}});
420  // For Max pooling of a float a pad value of 0 is more than enough to stop the fold happening.
421  // Set this to -std::numeric_limits<float>::infinity() to make the fold happen.
422  padDescriptor.m_PadValue = 0;
423 
424  PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad");
425  padLayer->GetOutputSlot().SetTensorInfo(paddedInfo);
426 
427  Pooling2dDescriptor pooling2dDescriptor;
428  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max;
429  pooling2dDescriptor.m_PoolWidth = 3;
430  pooling2dDescriptor.m_PoolHeight = 3;
431  pooling2dDescriptor.m_StrideX = 1;
432  pooling2dDescriptor.m_StrideY = 1;
433  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
434 
435  Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d");
436  pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo);
437 
438  Layer* output = graph.AddLayer<OutputLayer>(0, "output");
439 
440  // Connect up layers - input -> pad -> pool2d -> output
441  input->GetOutputSlot().Connect(padLayer->GetInputSlot(0));
442  padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0));
443  pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0));
444 
445  auto checkSimplePool2d = [&](const Layer* const layer) {
446  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
447  return IsLayerOfType<Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") &&
448  (pool2dLayer->GetParameters() == pooling2dDescriptor);
449  };
450 
451  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
452  &IsLayerOfType<InputLayer>,
453  &IsLayerOfType<PadLayer>,
454  checkSimplePool2d,
455  &IsLayerOfType<OutputLayer>));
456 
458 
459  // The optimization should not have modified the graph.
460  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
461  &IsLayerOfType<InputLayer>,
462  &IsLayerOfType<PadLayer>,
463  checkSimplePool2d,
464  &IsLayerOfType<OutputLayer>));
465 }
466 
467 #if defined(ARMNNREF_ENABLED)
468 TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization")
469 {
470  // The idea of this test to run a simple pad+pool2d network twice. Once
471  // with FoldPadLayerIntoPooling2dLayer enabled and a second time with it
472  // avoided. The output tensors of each should match.
473  const unsigned int inputShape[] = {1, 4, 4, 2};
474  const unsigned int paddedShape[] = {1, 6, 6, 2};
475  const unsigned int outputShape[] = {1, 4, 4, 2};
476  std::vector<float> inputData({2.0f, 2.0f, 6.0f, 6.0f,
477  4.0f, 4.0f, 8.0f, 8.0f,
478  10.0f, 12.0f, 14.0f, 16.0f,
479  10.0f, 12.0f, 16.0f, 14.0f,
480 
481  18.0f, 20.0f, 24.0f, 22.0f,
482  20.0f, 18.0f, 22.0f, 24.0f,
483  26.0f, 28.0f, 0.0f, 0.0f,
484  26.0f, 28.0f, 0.0f, 0.0f,
485  });
486  try
487  {
488  // Create a network of input, pad, pooling 2D, output.
489  INetworkPtr network = INetwork::Create();
490 
491  IConnectableLayer* inputLayer = network->AddInputLayer(0);
492  TensorInfo inputInfo(4, inputShape, DataType::Float32);
493  inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
494 
495  PadDescriptor padDescriptor({{0, 0},
496  {1, 1},
497  {1, 1},
498  {0, 0}});
499  IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "Pad");
500  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
501  padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo);
502 
503  Pooling2dDescriptor pooling2dDescriptor;
504  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
505  pooling2dDescriptor.m_PoolWidth = 3;
506  pooling2dDescriptor.m_PoolHeight = 3;
507  pooling2dDescriptor.m_StrideX = 1;
508  pooling2dDescriptor.m_StrideY = 1;
509  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
510  IConnectableLayer* pool2dLayer = network->AddPooling2dLayer(pooling2dDescriptor, "Pool2D");
511  TensorInfo outputInfo(4, outputShape, DataType::Float32);
512  pool2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
513 
514  IConnectableLayer* outputLayer = network->AddOutputLayer(0);
515 
516  // Connect layers
517  inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
518  padLayer->GetOutputSlot(0).Connect(pool2dLayer->GetInputSlot(0));
519  pool2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
520 
521  // Create ArmNN runtime
522  IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
523  // Optimise the network
524  IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
525  // Load network into runtime
526  NetworkId networkIdentifier;
527  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
528 
529  InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
530 
531  // Set the initial values of the data to different values to the golden data just in case the inference fails.
532  std::vector<float> optimizedData(32, -std::numeric_limits<float>::infinity());
533  OutputTensors outputTensors{{0, Tensor(outputInfo, optimizedData.data())}};
534  // Execute network
535  run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
536  // Unload it.
537  run->UnloadNetwork(networkIdentifier);
538 
539  // In this second case the pad will have two outputs, one connected to the pooling layer the second connected to
540  // a second output layer. This will prevent the FoldPadLayerIntoPooling2dLayer optimization from working.
541  // A previous test, FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized, has proved that doing
542  // this will avoid the optimization.
543  IConnectableLayer* dummyOutputLayer = network->AddOutputLayer(1);
544  padLayer->GetOutputSlot(0).Connect(dummyOutputLayer->GetInputSlot(0));
545 
546  // Optimize and load and execute it a second time.
547  optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
548  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
549  std::vector<float> goldenData(32, 0.0f);
550  std::vector<float> padOutputData(72, 0.0f);
551  OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
552  {1, Tensor(paddedInfo, padOutputData.data())}};
553  run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
554 
555  // Now we can compare goldenData against optimizedData. They should be the same.
556  CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
557  }
558  catch (const std::exception& e)
559  {
560  std::cerr << e.what() << std::endl;
561  ARMNN_ASSERT_MSG(false, e.what());
562  }
563 }
564 
565 TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
566 {
567  // The idea of this test to run a simple pad+conv2d network twice. Once
568  // with FoldPadLayerIntoConv2dLayer enabled and a second time with it
569  // avoided. The output tensors of each should match.
570  const unsigned int inputShape[] = {1, 4, 4, 3}; // NHWCin
571  const unsigned int paddedShape[] = {1, 6, 6, 3};
572  const unsigned int weightsShape[] = {4, 2, 2, 3}; // CoutHWCin
573  const unsigned int outputShape[] = {1, 5, 5, 4}; // NHWCout
574 
575  std::vector<float> inputData({2.0f, 2.0f, 6.0f, 6.0f,
576  4.0f, 4.0f, 8.0f, 8.0f,
577  10.0f, 12.0f, 14.0f, 16.0f,
578  10.0f, 12.0f, 16.0f, 14.0f,
579 
580  18.0f, 20.0f, 24.0f, 22.0f,
581  20.0f, 18.0f, 22.0f, 24.0f,
582  26.0f, 28.0f, 0.0f, 0.0f,
583  26.0f, 28.0f, 0.0f, 0.0f,
584 
585  2.0f, 2.0f, 6.0f, 6.0f,
586  4.0f, 4.0f, 8.0f, 8.0f,
587  10.0f, 12.0f, 14.0f, 16.0f,
588  10.0f, 12.0f, 16.0f, 14.0f,
589  });
590  try
591  {
592  // Create a network of input, pad, pooling 2D, output.
593  INetworkPtr network = INetwork::Create();
594 
595  IConnectableLayer* inputLayer = network->AddInputLayer(0);
596  TensorInfo inputInfo(4, inputShape, DataType::Float32);
597  inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
598 
599  PadDescriptor padDescriptor({{0, 0},
600  {1, 1},
601  {1, 1},
602  {0, 0}});
603  IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "Pad");
604  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
605  padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo);
606 
607  Convolution2dDescriptor convDescriptor;
608  convDescriptor.m_DataLayout = DataLayout::NHWC;
609  convDescriptor.m_StrideX = 1;
610  convDescriptor.m_StrideY = 1;
611  convDescriptor.m_BiasEnabled = true;
612 
613  std::vector<float> weightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
614  11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
615  21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
616  31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
617  TensorInfo weightsInfo(4, weightsShape, DataType::Float32);
618  ConstTensor weights(weightsInfo, weightsData);
619  std::vector<float> biasVector = {5, 6, 7, 8};
620  TensorInfo biasInfo({4}, DataType::Float32);
621  ConstTensor bias(biasInfo, biasVector);
622  Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
623 
624  IConnectableLayer* conv2dLayer = network->AddConvolution2dLayer(convDescriptor,
625  weights,
626  optionalBias,
627  "Conv2D");
628 
629  TensorInfo outputInfo(4, outputShape, DataType::Float32);
630  conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
631 
632  IConnectableLayer* outputLayer = network->AddOutputLayer(0);
633 
634  // Connect layers
635  inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
636  padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0));
637  conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
638 
639  // Create ArmNN runtime
640  IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
641  // Optimise the network
642  IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
643  // Load network into runtime
644  NetworkId networkIdentifier;
645  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
646 
647  InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
648 
649  // Set the initial values of the data to different values to the golden data just in case the inference fails.
650  std::vector<float> optimizedData(100, -std::numeric_limits<float>::infinity());
651  OutputTensors outputTensors{{0, Tensor(outputInfo, optimizedData.data())}};
652  // Execute network
653  run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
654  // Unload it.
655  run->UnloadNetwork(networkIdentifier);
656 
657  // In this second case the pad will have two outputs, one connected to the conv layer the second connected to
658  // a second output layer. This will prevent the FoldPadLayerIntoConv2dLayer optimization from working.
659  // A previous test, FoldPadLayerIntoConv2d_PadWithMultipleOutputsShouldNotBeOptimized, has proved that doing
660  // this will avoid the optimization.
661  IConnectableLayer* dummyOutputLayer = network->AddOutputLayer(1);
662  padLayer->GetOutputSlot(0).Connect(dummyOutputLayer->GetInputSlot(0));
663 
664  // Optimize and load and execute it a second time.
665  optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
666  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
667  std::vector<float> goldenData(100, 0.0f);
668  std::vector<float> padOutputData(108, 0.0f);
669  OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
670  {1, Tensor(paddedInfo, padOutputData.data())}};
671  run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
672 
673  // Now we can compare goldenData against optimizedData. They should be the same.
674  CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
675  }
676  catch (const std::exception& e)
677  {
678  std::cerr << e.what() << std::endl;
679  ARMNN_ASSERT_MSG(false, e.what());
680  }
681 }
682 
683 TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
684 {
685  // The idea of this test to run a simple pad+depthwiseconv2d network twice. Once
686  // with FoldPadLayerIntoDeptwiseConv2dLayer enabled and a second time with it
687  // avoided. The output tensors of each should match.
688  const unsigned int inputShape[] = {1, 4, 4, 3}; // NHWCin
689  const unsigned int paddedShape[] = {1, 6, 6, 3};
690  const unsigned int weightsShape[] = {1, 2, 2, 12}; // 1HWCout
691  const unsigned int outputShape[] = {1, 5, 5, 12}; // NHWCout
692 
693  std::vector<float> inputData({2.0f, 2.0f, 6.0f, 6.0f,
694  4.0f, 4.0f, 8.0f, 8.0f,
695  10.0f, 12.0f, 14.0f, 16.0f,
696  10.0f, 12.0f, 16.0f, 14.0f,
697 
698  18.0f, 20.0f, 24.0f, 22.0f,
699  20.0f, 18.0f, 22.0f, 24.0f,
700  26.0f, 28.0f, 0.0f, 0.0f,
701  26.0f, 28.0f, 0.0f, 0.0f,
702 
703  2.0f, 2.0f, 6.0f, 6.0f,
704  4.0f, 4.0f, 8.0f, 8.0f,
705  10.0f, 12.0f, 14.0f, 16.0f,
706  10.0f, 12.0f, 16.0f, 14.0f,
707  });
708  try
709  {
710  // Create a network of input, pad, pooling 2D, output.
711  INetworkPtr network = INetwork::Create();
712 
713  IConnectableLayer* inputLayer = network->AddInputLayer(0);
714  TensorInfo inputInfo(4, inputShape, DataType::Float32);
715  inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
716 
717  PadDescriptor padDescriptor({{0, 0},
718  {1, 1},
719  {1, 1},
720  {0, 0}});
721  IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "Pad");
722  TensorInfo paddedInfo(4, paddedShape, DataType::Float32);
723  padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo);
724 
725  DepthwiseConvolution2dDescriptor convDescriptor;
726  convDescriptor.m_DataLayout = DataLayout::NHWC;
727  convDescriptor.m_StrideX = 1;
728  convDescriptor.m_StrideY = 1;
729  convDescriptor.m_BiasEnabled = true;
730 
731  std::vector<float> weightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
732  11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
733  21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
734  31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42};
735  TensorInfo weightsInfo(4, weightsShape, DataType::Float32);
736  ConstTensor weights(weightsInfo, weightsData);
737  std::vector<float> biasVector = {5, 6, 7, 8, 9, 10, 11, 12, 5, 6, 7, 8};
738  TensorInfo biasInfo({12}, DataType::Float32);
739  ConstTensor bias(biasInfo, biasVector);
740  Optional<ConstTensor> optionalBias = Optional<ConstTensor>(bias);
741 
742  IConnectableLayer* conv2dLayer = network->AddDepthwiseConvolution2dLayer(convDescriptor,
743  weights,
744  optionalBias,
745  "DepthwiseConv2D");
746 
747  TensorInfo outputInfo(4, outputShape, DataType::Float32);
748  conv2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
749 
750  IConnectableLayer* outputLayer = network->AddOutputLayer(0);
751 
752  // Connect layers
753  inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
754  padLayer->GetOutputSlot(0).Connect(conv2dLayer->GetInputSlot(0));
755  conv2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
756 
757  // Create ArmNN runtime
758  IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options
759  // Optimise the network
760  IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
761  // Load network into runtime
762  NetworkId networkIdentifier;
763  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
764 
765  InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
766 
767  // Set the initial values of the data to different values to the golden data just in case the inference fails.
768  std::vector<float> optimizedData(300, -std::numeric_limits<float>::infinity());
769  OutputTensors outputTensors{{0, Tensor(outputInfo, optimizedData.data())}};
770  // Execute network
771  run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
772  // Unload it.
773  run->UnloadNetwork(networkIdentifier);
774 
775  // In this second case the pad will have two outputs, one connected to the conv layer the second connected to
776  // a second output layer. This will prevent the FoldPadLayerIntoDepthwiseConv2dLayer optimization from working.
777  // A previous test, FoldPadLayerIntoDepthwiseConv2d_PadWithMultipleOutputsShouldNotBeOptimized, has proved that
778  // doing this will avoid the optimization.
779  IConnectableLayer* dummyOutputLayer = network->AddOutputLayer(1);
780  padLayer->GetOutputSlot(0).Connect(dummyOutputLayer->GetInputSlot(0));
781 
782  // Optimize and load and execute it a second time.
783  optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
784  CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
785  std::vector<float> goldenData(300, 0.0f);
786  std::vector<float> padOutputData(108, 0.0f);
787  OutputTensors goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
788  {1, Tensor(paddedInfo, padOutputData.data())}};
789  run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
790 
791  // Now we can compare goldenData against optimizedData. They should be the same.
792  CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
793  }
794  catch (const std::exception& e)
795  {
796  std::cerr << e.what() << std::endl;
797  ARMNN_ASSERT_MSG(false, e.what());
798  }
799 }
800 #endif
801 
802 }
bool m_BiasEnabled
Enable/disable bias.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:39
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:61
uint32_t m_PadBottom
Padding bottom value in the height dimension.
bool m_BiasEnabled
Enable/disable bias.
const Parameters & GetParameters() const
CPU Execution: Reference C++ kernels.
uint32_t m_PadLeft
Padding left value in the width dimension.
OptimizeForExclusiveConnection< PadLayer, Convolution2dLayer, pad_fold::FoldPadIntoConvolution2dImpl > FoldPadIntoConvolution2d
Optimizer::Optimizations MakeOptimizations(Args &&... args)
Definition: Optimizer.hpp:43
OptimizeForExclusiveConnection< PadLayer, DepthwiseConvolution2dLayer, pad_fold::FoldPadIntoDepthwiseConvolution2dImpl > FoldPadIntoDepthwiseConvolution2d
float m_PadValue
Optional value to use for padding, defaults to 0.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
This layer represents a depthwise convolution 2d operation.
LayerT * AddLayer(Args &&... args)
Adds a new layer, of type LayerType, to the graph constructed with the arguments passed.
Definition: Graph.hpp:402
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:172
A Convolution2dDescriptor for the Convolution2dLayer.
int Connect(InputSlot &destination)
Definition: Layer.cpp:83
static void Pass(Graph &graph, const Optimizations &optimizations)
Definition: Optimizer.cpp:16
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:30
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
uint32_t m_PadTop
Padding top value in the height dimension.
std::vector< std::pair< LayerBindingId, class ConstTensor > > InputTensors
Definition: Tensor.hpp:360
Copyright (c) 2021 ARM Limited and Contributors.
This layer represents a pad operation.
Definition: PadLayer.hpp:14
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
A tensor defined by a TensorInfo (shape and data type) and a mutable backing store.
Definition: Tensor.hpp:319
uint32_t m_PoolHeight
Pooling height value.
A PadDescriptor for the PadLayer.
const InputSlot & GetInputSlot(unsigned int index) const override
Get a const input slot handle by slot index.
Definition: Layer.hpp:316
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: OutputLayer.hpp:13
uint32_t m_PadRight
Padding right value in the width dimension.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1613
#define ARMNN_ASSERT_MSG(COND, MSG)
Definition: Assert.hpp:15
int NetworkId
Definition: IRuntime.hpp:24
A tensor defined by a TensorInfo (shape and data type) and an immutable backing store.
Definition: Tensor.hpp:327
std::vector< std::pair< LayerBindingId, class Tensor > > OutputTensors
Definition: Tensor.hpp:361
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:173
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
The padding fields count, but are ignored.
std::shared_ptr< ConstTensorHandle > m_Weight
A unique pointer to store Weight values.
OptimizeForExclusiveConnection< PadLayer, Pooling2dLayer, pad_fold::FoldPadIntoPooling2dImpl > FoldPadIntoPooling2d
A layer user-provided data can be bound to (e.g. inputs, outputs).
Definition: InputLayer.hpp:13
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
void SetTensorInfo(const TensorInfo &tensorInfo) override
Definition: Layer.cpp:58
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
const OutputSlot & GetOutputSlot(unsigned int index=0) const override
Get the const output slot handle by slot index.
Definition: Layer.hpp:318
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:174
This layer represents a convolution 2d operation.
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:172
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
TEST_SUITE("Optimizer")
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:530
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.
A DepthwiseConvolution2dDescriptor for the DepthwiseConvolution2dLayer.