diff options
author | Nina Drozd <nina.drozd@arm.com> | 2019-04-18 14:48:51 +0100 |
---|---|---|
committer | Nina Drozd <nina.drozd@arm.com> | 2019-04-19 16:49:43 +0100 |
commit | 861985ff2964720a0165e109c3fc568cb245bbe9 (patch) | |
tree | f5829f273076dec3fb1d67b071fe1baf0bb73fbe /src/armnn/test/OptimizerTests.cpp | |
parent | 17660e68c91d48bfb3fc3c9540a1834f33e9e561 (diff) | |
download | armnn-861985ff2964720a0165e109c3fc568cb245bbe9.tar.gz |
IVGCVSW-2925: Combine Pad with Convolution2d in the Optimizer
* Added new optimization for folding pad layer into convolution2d layer following it
* Added new test in OptimizerTests.cpp
* Added new optimization into All optimizations
* Added call to new optimization in Optimize in Network.cpp
* Updated CMakeLists.txt
Signed-off-by: Nina Drozd <nina.drozd@arm.com>
Change-Id: I682e07c71bbd42c49c02dda30a848a9ab2b16e7e
Diffstat (limited to 'src/armnn/test/OptimizerTests.cpp')
-rw-r--r-- | src/armnn/test/OptimizerTests.cpp | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index f40a78a0d9..b0d8629c7f 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -1085,4 +1085,93 @@ BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes) BOOST_CHECK_NO_THROW(graph.InferTensorInfos()); } +BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer) +{ + Graph graph; + const unsigned int inputShape[] = { 1, 2, 2, 3 }; + const unsigned int paddedShape[] = { 1, 6, 6, 3 }; + const unsigned int weightsShape[] = { 1, 2, 3, 3 }; + const unsigned int outputShape[] = { 1, 2, 1, 1 }; + + + armnn::TensorInfo inputInfo(4, inputShape, DataType::Float32); + armnn::TensorInfo paddedInfo(4, paddedShape, DataType::Float32); + armnn::TensorInfo outputInfo(4, outputShape, DataType::Float32); + + Layer* input = graph.AddLayer<InputLayer>(0, "input"); + input->GetOutputSlot().SetTensorInfo(inputInfo); + + PadDescriptor padDescriptor({{ 0, 0 }, { 2, 2 }, { 2, 2 }, { 0, 0 }}); + + PadLayer* padLayer = graph.AddLayer<PadLayer>(padDescriptor, "pad"); + padLayer->GetOutputSlot().SetTensorInfo(paddedInfo); + + Convolution2dDescriptor convolution2dDescriptor; + convolution2dDescriptor.m_BiasEnabled = false; + convolution2dDescriptor.m_StrideX = 1; + convolution2dDescriptor.m_StrideY = 1; + convolution2dDescriptor.m_DataLayout = DataLayout::NHWC; + + std::vector<float> weightsVector(18); + armnn::ConstTensor weights(armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32), weightsVector); + + Convolution2dLayer* conv2dLayer = graph.AddLayer<Convolution2dLayer>(convolution2dDescriptor,"conv2d"); + conv2dLayer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights); + conv2dLayer->GetOutputSlot().SetTensorInfo(outputInfo); + + Layer* output = graph.AddLayer<OutputLayer>(0, "output"); + + // Connect up layers - input -> pad -> conv2d -> output + input->GetOutputSlot().Connect(padLayer->GetInputSlot(0)); + padLayer->GetOutputSlot().Connect(conv2dLayer->GetInputSlot(0)); + conv2dLayer->GetOutputSlot().Connect(output->GetInputSlot(0)); + + auto checkSimpleConv2d = [ ](const armnn::Layer* const layer) -> bool + { + const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer); + const auto conv2dLayerParams = conv2dLayer->GetParameters(); + return IsLayerOfType<armnn::Convolution2dLayer>(layer) && + (layer->GetNameStr() == "conv2d") && + (conv2dLayerParams.m_PadLeft == 0) && + (conv2dLayerParams.m_PadRight == 0) && + (conv2dLayerParams.m_PadTop == 0) && + (conv2dLayerParams.m_PadBottom == 0) && + (conv2dLayerParams.m_BiasEnabled == false) && + (conv2dLayerParams.m_StrideX == 1) && + (conv2dLayerParams.m_StrideY == 1) && + (conv2dLayerParams.m_DataLayout == DataLayout::NHWC); + }; + + BOOST_TEST(CheckSequence(graph.cbegin(), + graph.cend(), + &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::PadLayer>, + checkSimpleConv2d, + &IsLayerOfType<armnn::OutputLayer>)); + + armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FoldPadIntoConvolution2d())); + + auto checkPadFoldedIntoConv2d = [ ](const armnn::Layer* const layer) -> bool + { + const auto conv2dLayer = static_cast<const armnn::Convolution2dLayer*>(layer); + const auto conv2dLayerParams = conv2dLayer->GetParameters(); + return IsLayerOfType<armnn::Convolution2dLayer>(layer) && + (layer->GetNameStr() == "folded-pad-into-conv2d") && + (conv2dLayerParams.m_PadLeft == 2) && + (conv2dLayerParams.m_PadRight == 2) && + (conv2dLayerParams.m_PadTop == 2) && + (conv2dLayerParams.m_PadBottom == 2) && + (conv2dLayerParams.m_BiasEnabled == false) && + (conv2dLayerParams.m_StrideX == 1) && + (conv2dLayerParams.m_StrideY == 1) && + (conv2dLayerParams.m_DataLayout == DataLayout::NHWC); + }; + + BOOST_TEST(CheckSequence(graph.cbegin(), + graph.cend(), + &IsLayerOfType<armnn::InputLayer>, + checkPadFoldedIntoConv2d, + &IsLayerOfType<armnn::OutputLayer>)); +} + BOOST_AUTO_TEST_SUITE_END() |