ArmNN
 22.08
FoldPadIntoQuantizedAveragePooling2DTests.cpp
Go to the documentation of this file.
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <GraphUtils.hpp>
7 #include <TestUtils.hpp>
8 
9 #include <armnn/INetwork.hpp>
10 
11 #include <doctest/doctest.h>
12 
13 using namespace armnn;
14 
15 namespace
16 {
17 #if defined(ARMNNREF_ENABLED)||defined(ARMCOMPUTECL_ENABLED)
18 void FoldPadIntoQuantizedAvgPoolTest(Compute backendId)
19 {
20  // Create a network
21  INetworkPtr network = INetwork::Create();
22 
23  const unsigned int inputShape[] = {1, 2, 2, 3};
24  const unsigned int paddedShape[] = {1, 4, 4, 3};
25  const unsigned int outputShape[] = {1, 2, 2, 3};
26 
27  TensorInfo inputInfo(4, inputShape, DataType::QAsymmU8, 1.0f, 0.0f);
28  TensorInfo paddedInfo(4, paddedShape, DataType::QAsymmU8, 1.0f, 0.0f);
29  TensorInfo outputInfo(4, outputShape, DataType::QAsymmU8, 1.0f, 0.0f);
30 
31  IConnectableLayer* input = network->AddInputLayer(0, "input");
32  input->GetOutputSlot(0).SetTensorInfo(inputInfo);
33 
34  PadDescriptor padDescriptor({{0, 0},
35  {1, 1},
36  {1, 1},
37  {0, 0}});
38 
39  IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "pad");
40  padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo);
41 
42  Pooling2dDescriptor pooling2dDescriptor;
43  pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average;
44  pooling2dDescriptor.m_PoolWidth = 3;
45  pooling2dDescriptor.m_PoolHeight = 3;
46  pooling2dDescriptor.m_StrideX = 1;
47  pooling2dDescriptor.m_StrideY = 1;
48  pooling2dDescriptor.m_DataLayout = DataLayout::NHWC;
49 
50  IConnectableLayer* pool2dLayer = network->AddPooling2dLayer(pooling2dDescriptor, "pool2d");
51  pool2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
52 
53  IConnectableLayer* output = network->AddOutputLayer(0, "output");
54 
55  // Connect up layers - input -> pad -> pool2d -> output
56  input->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0));
57  padLayer->GetOutputSlot(0).Connect(pool2dLayer->GetInputSlot(0));
58  pool2dLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
59 
60  // Create ArmNN runtime
62 
63  // Optimise ArmNN network
64  IOptimizedNetworkPtr optNet = Optimize(*network, {backendId}, run->GetDeviceSpec());
65 
66  auto checkPadFoldedIntoPool2d = [&](const Layer* const layer) {
67  if (!IsLayerOfType<Pooling2dLayer>(layer) || (layer->GetNameStr() != "folded-pad-into-pool2d"))
68  {
69  return false;
70  }
71 
72  const auto pool2dLayer = static_cast<const Pooling2dLayer*>(layer);
73  const Pooling2dDescriptor pool2dLayerParams = pool2dLayer->GetParameters();
74 
75  Pooling2dDescriptor pool2dLayerParamsNoPad = pool2dLayerParams;
76  pool2dLayerParamsNoPad.m_PadLeft = 0;
77  pool2dLayerParamsNoPad.m_PadRight = 0;
78  pool2dLayerParamsNoPad.m_PadTop = 0;
79  pool2dLayerParamsNoPad.m_PadBottom = 0;
80  // If we fold then PaddingMethod will be set to Ignore. The original will be Exclude.
81  pool2dLayerParamsNoPad.m_PaddingMethod = PaddingMethod::Exclude;
82 
83  return (pool2dLayerParamsNoPad == pooling2dDescriptor) && (pool2dLayerParams.m_PadLeft == 1) &&
84  (pool2dLayerParams.m_PadRight == 1) && (pool2dLayerParams.m_PadTop == 1) &&
85  (pool2dLayerParams.m_PadBottom == 1) && (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue);
86  };
87 
88  Graph& graph = GetGraphForTesting(optNet.get());
89  CHECK(CheckSequence(graph.cbegin(), graph.cend(),
90  &IsLayerOfType<InputLayer>,
91  checkPadFoldedIntoPool2d,
92  &IsLayerOfType<OutputLayer>));
93 }
94 #endif
95 }
96 
97 
98 #if defined(ARMNNREF_ENABLED)
99 TEST_SUITE("Optimizer_FoldPadIntoQuantizedAvgPoolCpuRef")
100 {
101 TEST_CASE("FoldPadIntoQuantizedAvgPoolCpuRefTest")
102 {
103  FoldPadIntoQuantizedAvgPoolTest(Compute::CpuRef);
104 }
105 }
106 #endif
107 
108 #if defined(ARMCOMPUTECL_ENABLED)
109 TEST_SUITE("Optimizer_FoldPadIntoQuantizedAvgPoolGpuAcc")
110 {
111 TEST_CASE("FoldPadIntoQuantizedAvgPoolGpuAccTest")
112 {
113  FoldPadIntoQuantizedAvgPoolTest(Compute::GpuAcc);
114 }
115 }
116 #endif
TEST_SUITE("TestConstTensorLayerVisitor")
static IRuntimePtr Create(const CreationOptions &options)
Definition: Runtime.cpp:49
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
Definition: INetwork.hpp:68
uint32_t m_PadBottom
Padding bottom value in the height dimension.
CPU Execution: Reference C++ kernels.
uint32_t m_PadLeft
Padding left value in the width dimension.
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
Definition: TestUtils.hpp:21
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:179
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
Definition: IRuntime.hpp:33
The padding fields don&#39;t count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
Copyright (c) 2021 ARM Limited and Contributors.
virtual const BaseDescriptor & GetParameters() const =0
If the layer has a descriptor return it.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
uint32_t m_PoolHeight
Pooling height value.
A PadDescriptor for the PadLayer.
Compute
The Compute enum is now deprecated and it is now being replaced by BackendId.
Definition: BackendId.hpp:21
uint32_t m_PadRight
Padding right value in the width dimension.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
Definition: Network.cpp:1864
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
Definition: INetwork.hpp:239
GPU Execution: OpenCL: ArmCompute.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
Definition: TestUtils.cpp:49
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
The padding fields count, but are ignored.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
Definition: Graph.hpp:181
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
Definition: INetwork.hpp:238
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
Definition: Network.cpp:475
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.