diff options
author | Teresa Charlin <teresa.charlinreyes@arm.com> | 2021-05-24 16:22:15 +0100 |
---|---|---|
committer | Teresa Charlin <teresa.charlinreyes@arm.com> | 2021-05-24 17:03:19 +0100 |
commit | 37c430efaa85f84905cf96ace21f310339374053 (patch) | |
tree | 2d8f96ee47c46420dfd7363afc17844b308d60af /src/armnn/test/OptimizerTests.cpp | |
parent | e73eda993d6af38a6be17a4d8368f78e29ec95ba (diff) | |
download | armnn-37c430efaa85f84905cf96ace21f310339374053.tar.gz |
IVGCVSW-6069 Add Unit Test for Pad + DepthwiseConv and Pad + Conv
*All fold pad test are now in a separate file
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ic0b0436f6b0194404f9a3f1553e2f69524b63580
Diffstat (limited to 'src/armnn/test/OptimizerTests.cpp')
-rw-r--r-- | src/armnn/test/OptimizerTests.cpp | 527 |
1 files changed, 0 insertions, 527 deletions
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index 110b2834d0..fcfff1a807 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -541,533 +541,6 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes) BOOST_CHECK_NO_THROW(graph.InferTensorInfos()); } -BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer) -{ - Graph graph; - const unsigned int inputShape[] = { 1, 2, 2, 3 }; - const unsigned int paddedShape[] = { 1, 6, 6, 3 }; - const unsigned int weightsShape[] = { 1, 2, 3, 3 }; - const unsigned int outputShape[] = { 1, 2, 1, 1 }; - - armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); - armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32); - armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); - - Layer* input = graph.AddLayer<InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(inputInfo); - - PadDescriptor padDescriptor({ { 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 } }); - - PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad"); - padLayer->GetOutputSlot().SetTensorInfo(paddedInfo); - - Convolution2dDescriptor convolution2dDescriptor; - convolution2dDescriptor.m_BiasEnabled = false; - convolution2dDescriptor.m_StrideX = 1; - convolution2dDescriptor.m_StrideY = 1; - convolution2dDescriptor.m_DataLayout = DataLayout::NHWC; - - std::vector<float> weightsVector(18); - armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector); - - Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor, "conv2d"); - conv2dLayer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights); - conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); - - Layer* output = graph.AddLayer<OutputLayer>(0, "output"); - - // Connect up layers - input -> pad -> conv2d -> output - input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); - padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0)); - conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); - - auto checkSimpleConv2d = [](const armnn::Layer* const layer) -> bool { - const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer); - const auto conv2dLayerParams = conv2dLayer->GetParameters(); - return IsLayerOfType<armnn::Convolution2dLayer>(layer) && (layer->GetNameStr() == "conv2d") && - (conv2dLayerParams.m_PadLeft == 0) && (conv2dLayerParams.m_PadRight == 0) && - (conv2dLayerParams.m_PadTop == 0) && (conv2dLayerParams.m_PadBottom == 0) && - (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_StrideX == 1) && - (conv2dLayerParams.m_StrideY == 1) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC); - }; - - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimpleConv2d, - &IsLayerOfType<armnn::OutputLayer>)); - - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoConvolution2d())); - - auto checkPadFoldedIntoConv2d = [](const armnn::Layer* const layer) -> bool { - const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer); - const auto conv2dLayerParams = conv2dLayer->GetParameters(); - return IsLayerOfType<armnn::Convolution2dLayer>(layer) && (layer->GetNameStr() == "folded-pad-into-conv2d") && - (conv2dLayerParams.m_PadLeft == 2) && (conv2dLayerParams.m_PadRight == 2) && - (conv2dLayerParams.m_PadTop == 2) && (conv2dLayerParams.m_PadBottom == 2) && - (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_StrideX == 1) && - (conv2dLayerParams.m_StrideY == 1) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC); - }; - - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - checkPadFoldedIntoConv2d, - &IsLayerOfType<armnn::OutputLayer>)); -} - -BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConvolution2dLayer) -{ - Graph graph; - const unsigned int inputShape[] = {1, 2, 2, 3}; - const unsigned int paddedShape[] = {1, 6, 6, 3}; - const unsigned int weightsShape[] = {1, 2, 3, 3}; - const unsigned int outputShape[] = {1, 2, 1, 3}; - - armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); - armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32); - armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); - - Layer* input = graph.AddLayer<InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(inputInfo); - - PadDescriptor padDescriptor({{0, 0}, - {2, 2}, - {2, 2}, - {0, 0}}); - - PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad"); - padLayer->GetOutputSlot().SetTensorInfo(paddedInfo); - - DepthwiseConvolution2dDescriptor depthwiseConvolution2dDescriptor; - depthwiseConvolution2dDescriptor.m_BiasEnabled = false; - depthwiseConvolution2dDescriptor.m_StrideX = 1; - depthwiseConvolution2dDescriptor.m_StrideY = 1; - depthwiseConvolution2dDescriptor.m_DataLayout = DataLayout::NHWC; - - std::vector<float> weightsVector(18); - armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector); - - auto* depthwiseConv2dLayer = graph - .AddLayer<DepthwiseConvolution2dLayer>(depthwiseConvolution2dDescriptor, "depthwiseConv2d"); - depthwiseConv2dLayer->m_Weight = std::make_unique<armnn::ScopedTensorHandle>(weights); - depthwiseConv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); - - Layer* output = graph.AddLayer<OutputLayer>(0, "output"); - - // Connect up layers - input -> pad -> depthwiseConv2d -> output - input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); - padLayer->GetOutputSlot().Connect(depthwiseConv2dLayer->GetInputSlot(0)); - depthwiseConv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); - - auto checkSimpleDepthwiseConv2d = [](const armnn::Layer* const layer)->bool { - const auto depthwiseConv2dLayer = static_cast<const armnn::DepthwiseConvolution2dLayer*>(layer); - const auto depthwiseConv2dLayerParams = depthwiseConv2dLayer->GetParameters(); - return IsLayerOfType<armnn::DepthwiseConvolution2dLayer>(layer) && (layer->GetNameStr() == "depthwiseConv2d")&& - (depthwiseConv2dLayerParams.m_PadLeft == 0) && (depthwiseConv2dLayerParams.m_PadRight == 0) && - (depthwiseConv2dLayerParams.m_PadTop == 0) && (depthwiseConv2dLayerParams.m_PadBottom == 0) && - (depthwiseConv2dLayerParams.m_BiasEnabled == false) && (depthwiseConv2dLayerParams.m_StrideX == 1) && - (depthwiseConv2dLayerParams.m_StrideY == 1) - && (depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC); - }; - - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimpleDepthwiseConv2d, - &IsLayerOfType<armnn::OutputLayer>)); - - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoDepthwiseConvolution2d())); - - auto checkPadFoldedIntoDepthwiseConv2d = [](const armnn::Layer* const layer)->bool { - const auto depthwiseConv2dLayer = static_cast<const armnn::DepthwiseConvolution2dLayer*>(layer); - const auto depthwiseConv2dLayerParams = depthwiseConv2dLayer->GetParameters(); - return IsLayerOfType<armnn::DepthwiseConvolution2dLayer>(layer) - && (layer->GetNameStr() == "folded-pad-into-depthwiseConv2d") && - (depthwiseConv2dLayerParams.m_PadLeft == 2) && (depthwiseConv2dLayerParams.m_PadRight == 2) && - (depthwiseConv2dLayerParams.m_PadTop == 2) && (depthwiseConv2dLayerParams.m_PadBottom == 2) && - (depthwiseConv2dLayerParams.m_BiasEnabled == false) && (depthwiseConv2dLayerParams.m_StrideX == 1) && - (depthwiseConv2dLayerParams.m_StrideY == 1) - && (depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC); - }; - - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - checkPadFoldedIntoDepthwiseConv2d, - &IsLayerOfType<armnn::OutputLayer>)); -} - -BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer) -{ - Graph graph; - const unsigned int inputShape[] = { 1, 2, 2, 3 }; - const unsigned int paddedShape[] = { 1, 4, 4, 3 }; - const unsigned int outputShape[] = { 1, 2, 2, 3 }; - - armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); - armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32); - armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); - - Layer* input = graph.AddLayer<InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(inputInfo); - - PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } }); - - PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad"); - padLayer->GetOutputSlot().SetTensorInfo(paddedInfo); - - Pooling2dDescriptor pooling2dDescriptor; - pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average; - pooling2dDescriptor.m_PoolWidth = 3; - pooling2dDescriptor.m_PoolHeight = 3; - pooling2dDescriptor.m_StrideX = 1; - pooling2dDescriptor.m_StrideY = 1; - pooling2dDescriptor.m_DataLayout = DataLayout::NHWC; - - Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d"); - pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); - - Layer* output = graph.AddLayer<OutputLayer>(0, "output"); - - // Connect up layers - input -> pad -> pool2d -> output - input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); - padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0)); - pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); - - auto checkSimplePool2d = [&](const armnn::Layer* const layer) { - const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer); - return IsLayerOfType<armnn::Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") && - (pool2dLayer->GetParameters() == pooling2dDescriptor); - }; - - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimplePool2d, - &IsLayerOfType<armnn::OutputLayer>)); - - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoPooling2d())); - - auto checkPadFoldedIntoPool2d = [&](const armnn::Layer* const layer) { - if (!IsLayerOfType<armnn::Pooling2dLayer>(layer) || (layer->GetNameStr() != "folded-pad-into-pool2d")) - { - return false; - } - - const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer); - const Pooling2dDescriptor pool2dLayerParams = pool2dLayer->GetParameters(); - - Pooling2dDescriptor pool2dLayerParamsNoPad = pool2dLayerParams; - pool2dLayerParamsNoPad.m_PadLeft = 0; - pool2dLayerParamsNoPad.m_PadRight = 0; - pool2dLayerParamsNoPad.m_PadTop = 0; - pool2dLayerParamsNoPad.m_PadBottom = 0; - // If we fold then PaddingMethod will be set to Ignore. The original will be Exclude. - pool2dLayerParamsNoPad.m_PaddingMethod = PaddingMethod::Exclude; - - return (pool2dLayerParamsNoPad == pooling2dDescriptor) && (pool2dLayerParams.m_PadLeft == 1) && - (pool2dLayerParams.m_PadRight == 1) && (pool2dLayerParams.m_PadTop == 1) && - (pool2dLayerParams.m_PadBottom == 1) && - (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue); - }; - - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - checkPadFoldedIntoPool2d, - &IsLayerOfType<armnn::OutputLayer>)); -} - -BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized) -{ - // In this test case we'll setup a pad layer with two outputs. One goes to a polling layers and the other - // goes to an output layer. FoldPadLayerIntoPooling2d should not optimize this graph as it uses the - // OptimizeForExclusiveConnection method. - Graph graph; - const unsigned int inputShape[] = { 1, 2, 2, 3 }; - const unsigned int paddedShape[] = { 1, 4, 4, 3 }; - const unsigned int outputShape[] = { 1, 2, 2, 3 }; - - armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); - armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32); - armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); - - Layer* input = graph.AddLayer<InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(inputInfo); - - PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } }); - - PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad"); - padLayer->GetOutputSlot().SetTensorInfo(paddedInfo); - - Pooling2dDescriptor pooling2dDescriptor; - pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average; - pooling2dDescriptor.m_PoolWidth = 3; - pooling2dDescriptor.m_PoolHeight = 3; - pooling2dDescriptor.m_StrideX = 1; - pooling2dDescriptor.m_StrideY = 1; - pooling2dDescriptor.m_DataLayout = DataLayout::NHWC; - - Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d"); - pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); - - Layer* output = graph.AddLayer<OutputLayer>(0, "output"); - - // Connect up layers - input -> pad -> pool2d -> output - input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); - padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0)); - pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); - - // Add the alternative branch from the pas layer to an output layer. - Layer* secondOutput = graph.AddLayer<OutputLayer>(1, "dummy output"); - padLayer->GetOutputSlot().Connect(secondOutput->GetInputSlot(0)); - - auto checkSimplePool2d = [&](const armnn::Layer* const layer) { - const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer); - return IsLayerOfType<armnn::Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") && - (pool2dLayer->GetParameters() == pooling2dDescriptor); - }; - - // Initial sequence. - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimplePool2d, - &IsLayerOfType<armnn::OutputLayer>, - &IsLayerOfType<armnn::OutputLayer>)); - - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoPooling2d())); - - // The network should not change. - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimplePool2d, - &IsLayerOfType<armnn::OutputLayer>, - &IsLayerOfType<armnn::OutputLayer>)); -} - -BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding) -{ - // In this test setup input, Pad layer, Pooling layer that includes padding, output layer. The optimization - // should not work as the pooling layer already includes and existing pad and specifies PaddingMethod::Exclude. - Graph graph; - const unsigned int inputShape[] = { 1, 2, 2, 3 }; - const unsigned int paddedShape[] = { 1, 4, 4, 3 }; - const unsigned int outputShape[] = { 1, 2, 2, 3 }; - - armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); - armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32); - armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); - - Layer* input = graph.AddLayer<InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(inputInfo); - - PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } }); - - PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad"); - padLayer->GetOutputSlot().SetTensorInfo(paddedInfo); - - Pooling2dDescriptor pooling2dDescriptor; - pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average; - pooling2dDescriptor.m_PoolWidth = 3; - pooling2dDescriptor.m_PoolHeight = 3; - pooling2dDescriptor.m_StrideX = 1; - pooling2dDescriptor.m_StrideY = 1; - pooling2dDescriptor.m_DataLayout = DataLayout::NHWC; - // Include a pad with the pooling layer. This should prevent the optimization working. - pooling2dDescriptor.m_PadLeft = 1; - pooling2dDescriptor.m_PadRight = 1; - pooling2dDescriptor.m_PadTop = 1; - pooling2dDescriptor.m_PadBottom = 1; - pooling2dDescriptor.m_PaddingMethod = PaddingMethod::Exclude; - - Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d"); - pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); - - Layer* output = graph.AddLayer<OutputLayer>(0, "output"); - - // Connect up layers - input -> pad -> pool2d -> output - input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); - padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0)); - pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); - - auto checkSimplePool2d = [&](const armnn::Layer* const layer) { - const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer); - return IsLayerOfType<armnn::Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") && - (pool2dLayer->GetParameters() == pooling2dDescriptor); - }; - - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimplePool2d, - &IsLayerOfType<armnn::OutputLayer>)); - - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoPooling2d())); - - // The optimization should not have modified the graph. - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimplePool2d, - &IsLayerOfType<armnn::OutputLayer>)); -} - -BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded) -{ - // In this test setup input, Pad layer with a large pad value, Max Pooling layer, output layer. The optimization - // should not work as the pad value will modify the result of the max pooling layer. - Graph graph; - const unsigned int inputShape[] = { 1, 2, 2, 3 }; - const unsigned int paddedShape[] = { 1, 4, 4, 3 }; - const unsigned int outputShape[] = { 1, 2, 2, 3 }; - - armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); - armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32); - armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); - - Layer* input = graph.AddLayer<InputLayer>(0, "input"); - input->GetOutputSlot().SetTensorInfo(inputInfo); - - PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } }); - // For Max pooling of a float a pad value of 0 is more than enough to stop the fold happening. - // Set this to -std::numeric_limits<float>::infinity() to make the fold happen. - padDescriptor.m_PadValue = 0; - - PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad"); - padLayer->GetOutputSlot().SetTensorInfo(paddedInfo); - - Pooling2dDescriptor pooling2dDescriptor; - pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Max; - pooling2dDescriptor.m_PoolWidth = 3; - pooling2dDescriptor.m_PoolHeight = 3; - pooling2dDescriptor.m_StrideX = 1; - pooling2dDescriptor.m_StrideY = 1; - pooling2dDescriptor.m_DataLayout = DataLayout::NHWC; - - Pooling2dLayer* pool2dLayer = graph.AddLayer<Pooling2dLayer>(pooling2dDescriptor, "pool2d"); - pool2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); - - Layer* output = graph.AddLayer<OutputLayer>(0, "output"); - - // Connect up layers - input -> pad -> pool2d -> output - input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); - padLayer->GetOutputSlot().Connect(pool2dLayer->GetInputSlot(0)); - pool2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); - - auto checkSimplePool2d = [&](const armnn::Layer* const layer) { - const auto pool2dLayer = static_cast<const armnn::Pooling2dLayer*>(layer); - return IsLayerOfType<armnn::Pooling2dLayer>(layer) && (layer->GetNameStr() == "pool2d") && - (pool2dLayer->GetParameters() == pooling2dDescriptor); - }; - - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimplePool2d, - &IsLayerOfType<armnn::OutputLayer>)); - - armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoPooling2d())); - - // The optimization should not have modified the graph. - BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), - &IsLayerOfType<armnn::InputLayer>, - &IsLayerOfType<armnn::PadLayer>, - checkSimplePool2d, - &IsLayerOfType<armnn::OutputLayer>)); -} - -#if defined(ARMNNREF_ENABLED) -BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization) -{ - // The idea of this test to run a simple pad+pool2d network twice. Once - // with FoldPadLayerIntoPooling2dLayer enabled and a second time with it - // avoided. The output tensors of each should match. - const unsigned int inputShape[] = { 1, 4, 4, 2 }; - const unsigned int paddedShape[] = { 1, 6, 6, 2 }; - const unsigned int outputShape[] = { 1, 4, 4, 2 }; - std::vector<float> inputData({ - 2.0f, 2.0f, 6.0f, 6.0f, 4.0f, 4.0f, 8.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 10.0f, 12.0f, 16.0f, 14.0f, - - 18.0f, 20.0f, 24.0f, 22.0f, 20.0f, 18.0f, 22.0f, 24.0f, 26.0f, 28.0f, 0.0f, 0.0f, 26.0f, 28.0f, 0.0f, 0.0f, - }); - try - { - // Create a network of input, pad, pooling 2D, output. - INetworkPtr network = INetwork::Create(); - - IConnectableLayer* inputLayer = network->AddInputLayer(0); - armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); - inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo); - - PadDescriptor padDescriptor({ { 0, 0 }, { 1, 1 }, { 1, 1 }, { 0, 0 } }); - IConnectableLayer* padLayer = network->AddPadLayer(padDescriptor, "Pad"); - armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32); - padLayer->GetOutputSlot(0).SetTensorInfo(paddedInfo); - - Pooling2dDescriptor pooling2dDescriptor; - pooling2dDescriptor.m_PoolType = PoolingAlgorithm::Average; - pooling2dDescriptor.m_PoolWidth = 3; - pooling2dDescriptor.m_PoolHeight = 3; - pooling2dDescriptor.m_StrideX = 1; - pooling2dDescriptor.m_StrideY = 1; - pooling2dDescriptor.m_DataLayout = DataLayout::NHWC; - IConnectableLayer* pool2dLayer = network->AddPooling2dLayer(pooling2dDescriptor, "Pool2D"); - armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); - pool2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo); - - IConnectableLayer* outputLayer = network->AddOutputLayer(0); - - // Connect layers - inputLayer->GetOutputSlot(0).Connect(padLayer->GetInputSlot(0)); - padLayer->GetOutputSlot(0).Connect(pool2dLayer->GetInputSlot(0)); - pool2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0)); - - // Create ArmNN runtime - IRuntimePtr run = IRuntime::Create(IRuntime::CreationOptions()); // default options - // Optimise the network - IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, { Compute::CpuRef }, run->GetDeviceSpec()); - // Load network into runtime - NetworkId networkIdentifier; - BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success); - - InputTensors inputTensors{ { 0, - ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data()) } }; - - // Set the initial values of the data to different values to the golden data just in case the inference fails. - std::vector<float> optimizedData(32, -std::numeric_limits<float>::infinity()); - armnn::OutputTensors outputTensors{ { 0, armnn::Tensor(outputInfo, optimizedData.data()) } }; - // Execute network - run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors); - // Unload it. - run->UnloadNetwork(networkIdentifier); - - // In this second case the pad will have two outputs, one connected to the pooling layer the second connected to - // a second output layer. This will prevent the FoldPadLayerIntoPooling2dLayer optimization from working. - // A previous test, FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized, has proved that doing - // this will avoid the optimization. - IConnectableLayer* dummyOutputLayer = network->AddOutputLayer(1); - padLayer->GetOutputSlot(0).Connect(dummyOutputLayer->GetInputSlot(0)); - - // Optimize and load and execute it a second time. - optimizedNetwork = Optimize(*network, { Compute::CpuRef }, run->GetDeviceSpec()); - BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success); - std::vector<float> goldenData(32, 0.0f); - std::vector<float> padOutputData(72, 0.0f); - armnn::OutputTensors goldenTensors{ { 0, armnn::Tensor(outputInfo, goldenData.data()) }, - { 1, armnn::Tensor(paddedInfo, padOutputData.data()) } }; - run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors); - - // Now we can compare goldenData against optimizedData. They should be the same. - BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin())); - } - catch (const std::exception& e) - { - std::cerr << e.what() << std::endl; - ARMNN_ASSERT_MSG(false, e.what()); - } -} -#endif - class MockLayerSupport : public LayerSupportBase { public: |