6 #include <GraphUtils.hpp> 7 #include <TestUtils.hpp> 11 #include <doctest/doctest.h> 13 using namespace armnn;
17 #if defined(ARMNNREF_ENABLED)||defined(ARMCOMPUTECL_ENABLED) 18 void FoldPadIntoQuantizedAvgPoolTest(
Compute backendId)
23 const unsigned int inputShape[] = {1, 2, 2, 3};
24 const unsigned int paddedShape[] = {1, 4, 4, 3};
25 const unsigned int outputShape[] = {1, 2, 2, 3};
50 IConnectableLayer* pool2dLayer = network->AddPooling2dLayer(pooling2dDescriptor,
"pool2d");
57 padLayer->GetOutputSlot(0).Connect(pool2dLayer->
GetInputSlot(0));
66 auto checkPadFoldedIntoPool2d = [&](
const Layer*
const layer) {
67 if (!IsLayerOfType<Pooling2dLayer>(layer) || (layer->GetNameStr() !=
"folded-pad-into-pool2d"))
72 const auto pool2dLayer =
static_cast<const Pooling2dLayer*
>(layer);
83 return (pool2dLayerParamsNoPad == pooling2dDescriptor) && (pool2dLayerParams.
m_PadLeft == 1) &&
90 &IsLayerOfType<InputLayer>,
91 checkPadFoldedIntoPool2d,
92 &IsLayerOfType<OutputLayer>));
98 #if defined(ARMNNREF_ENABLED) 99 TEST_SUITE(
"Optimizer_FoldPadIntoQuantizedAvgPoolCpuRef")
101 TEST_CASE(
"FoldPadIntoQuantizedAvgPoolCpuRefTest")
108 #if defined(ARMCOMPUTECL_ENABLED) 111 TEST_CASE(
"FoldPadIntoQuantizedAvgPoolGpuAccTest")
TEST_SUITE("TestConstTensorLayerVisitor")
static IRuntimePtr Create(const CreationOptions &options)
Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.
uint32_t m_PadBottom
Padding bottom value in the height dimension.
CPU Execution: Reference C++ kernels.
uint32_t m_PadLeft
Padding left value in the width dimension.
bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
uint32_t m_PoolWidth
Pooling width value.
ConstIterator cbegin() const
Returns const iterator pointing to the beginning of the list. Lowercase for range-based for loops...
std::unique_ptr< IRuntime, void(*)(IRuntime *runtime)> IRuntimePtr
The padding fields don't count and are ignored.
PaddingMethod m_PaddingMethod
The padding method to be used. (Exclude, IgnoreValue).
uint32_t m_PadTop
Padding top value in the height dimension.
Copyright (c) 2021 ARM Limited and Contributors.
virtual const BaseDescriptor & GetParameters() const =0
If the layer has a descriptor return it.
uint32_t m_StrideX
Stride value when proceeding through input for the width dimension.
virtual void SetTensorInfo(const TensorInfo &tensorInfo)=0
uint32_t m_PoolHeight
Pooling height value.
A PadDescriptor for the PadLayer.
Compute
The Compute enum is now deprecated and it is now being replaced by BackendId.
uint32_t m_PadRight
Padding right value in the width dimension.
IOptimizedNetworkPtr Optimize(const INetwork &network, const std::vector< BackendId > &backendPreferences, const IDeviceSpec &deviceSpec, const OptimizerOptions &options=OptimizerOptions(), Optional< std::vector< std::string > &> messages=EmptyOptional())
Create an optimized version of the network.
std::unique_ptr< IOptimizedNetwork, void(*)(IOptimizedNetwork *network)> IOptimizedNetworkPtr
GPU Execution: OpenCL: ArmCompute.
This layer represents a pooling 2d operation.
DataLayout m_DataLayout
The data layout to be used (NCHW, NHWC).
Graph & GetGraphForTesting(IOptimizedNetwork *optNet)
PoolingAlgorithm m_PoolType
The pooling algorithm to use (Max. Average, L2).
The padding fields count, but are ignored.
virtual const IInputSlot & GetInputSlot(unsigned int index) const =0
Get a const input slot handle by slot index.
virtual const IOutputSlot & GetOutputSlot(unsigned int index) const =0
Get the const output slot handle by slot index.
ConstIterator cend() const
Returns const iterator pointing to the end of the list. Lowercase for range-based for loops...
std::unique_ptr< INetwork, void(*)(INetwork *network)> INetworkPtr
virtual int Connect(IInputSlot &destination)=0
A Pooling2dDescriptor for the Pooling2dLayer.
static INetworkPtr Create(NetworkOptions networkOptions={})
uint32_t m_StrideY
Stride value when proceeding through input for the height dimension.