diff options
author | Mike Kelly <mike.kelly@arm.com> | 2020-03-03 12:39:09 +0000 |
---|---|---|
committer | mike.kelly <mike.kelly@arm.com> | 2020-03-03 15:05:30 +0000 |
commit | 490b7becb8029ead26423b0d62e631a929e55d6c (patch) | |
tree | 31148ace54164f62927062b662b2526f22a02e95 /src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp | |
parent | 4a9e24bfc51eec7e593470091fb7e6e435ae3991 (diff) | |
download | armnn-490b7becb8029ead26423b0d62e631a929e55d6c.tar.gz |
IVGCVSW-4375 Add support for Transpose to optimizations
* Changed some existing Permutation specific optimizations to also support Transpose
* Added MoveTransposeUp optimization
* Added TransposeAsReshape optimization
* Added tests for Transpose optimizations
* Added missing layer tests for Transpose
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I20d099b284861402ae94aaa5dbf34907327a485f
Diffstat (limited to 'src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp')
-rw-r--r-- | src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp | 108 |
1 files changed, 108 insertions, 0 deletions
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp index 74ee18b482..c2180a63ca 100644 --- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp +++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp @@ -49,6 +49,37 @@ INetworkPtr CreateTestNetwork() return network; } +/// Shared function for the below tests, so that we test the same network in both cases. +INetworkPtr CreateTransposeTestNetwork() +{ + // Create a network + INetworkPtr network = INetwork::Create(); + + auto input = network->AddInputLayer(0, "input"); + const TensorInfo inputInfo({ 1, 2, 3, 4 }, DataType::Float32); + input->GetOutputSlot(0).SetTensorInfo(inputInfo); + + // Insert Permute which swaps batches and channels dimensions + auto permute = network->AddTransposeLayer(TransposeDescriptor(PermutationVector{ 3, 1, 2, 0 }), "permute"); + const TensorInfo permuteInfo({ 4, 2, 3, 1 }, DataType::Float32); + permute->GetOutputSlot(0).SetTensorInfo(permuteInfo); + input->GetOutputSlot(0).Connect(permute->GetInputSlot(0)); + + // Insert BatchToSpace + BatchToSpaceNdDescriptor batchToSpaceDesc; + batchToSpaceDesc.m_BlockShape = { 2, 2 }; + batchToSpaceDesc.m_DataLayout = DataLayout::NHWC; + auto batchToSpace = network->AddBatchToSpaceNdLayer(batchToSpaceDesc, "batchToSpace"); + const TensorInfo batchToSpaceInfo({ 1, 4, 6, 1 }, DataType::Float32); + batchToSpace->GetOutputSlot(0).SetTensorInfo(batchToSpaceInfo); + permute->GetOutputSlot(0).Connect(batchToSpace->GetInputSlot(0)); + + auto output = network->AddOutputLayer(0, "output"); + batchToSpace->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + return network; +} + } // namespace /// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected. @@ -81,6 +112,36 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest) BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers)); } +/// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected. +/// Note this does not ensure the correctness of the optimization - that is done in the below test. +BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest) +{ + INetworkPtr network = CreateTransposeTestNetwork(); + Graph graph = static_cast<Network*>(network.get())->GetGraph(); + + // Confirm initial graph is as we expect + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>, + &IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>)); + + // Perform the optimization which should merge the two layers into a DepthToSpace + armnn::Optimizer::Pass(graph, MakeOptimizations(TransposeAndBatchToSpaceAsDepthToSpace())); + + // Check that the replacement has been made as expected + auto checkDepthToSpace = [](const Layer* const layer) -> bool { + return IsLayerOfType<DepthToSpaceLayer>(layer) && + static_cast<const DepthToSpaceLayer*>(layer)->GetParameters().m_BlockSize == 2 && + static_cast<const DepthToSpaceLayer*>(layer)->GetParameters().m_DataLayout == DataLayout::NHWC && + layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32); + }; + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace, + &IsLayerOfType<OutputLayer>)); + + // Check the new layer has the two merged layers listed as related layers + std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" }; + BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers)); +} + // This unit test needs the reference backend, it's not available if the reference backend is not built #if defined(ARMNNREF_ENABLED) @@ -130,6 +191,53 @@ BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest) }; BOOST_TEST(outputData == expectedOutput); } + +/// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour +/// of the network (i.e. it still produces the correct output). +BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest) +{ + INetworkPtr network = CreateTransposeTestNetwork(); + + IRuntimePtr runtime = IRuntime::Create(IRuntime::CreationOptions()); + IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, { Compute::CpuRef }, runtime->GetDeviceSpec()); + + // Confirm that the optimization has actually taken place + const Graph& optGraph = static_cast<OptimizedNetwork*>(optimizedNetwork.get())->GetGraph(); + BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>, + &IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>)); + + // Load the graph into a runtime so we can check it produces the correct output + NetworkId netId; + runtime->LoadNetwork(netId, std::move(optimizedNetwork)); + + std::vector<float> inputData{ + // Each row here is a row of pixels where each pixel has 4 channels + // clang-format off + 1.0f, 2.0f, 3.0f, 4.0f, 10.0f, 20.0f, 30.0f, 40.0f, 100.0f, 200.0f, 300.0f, 400.0f, + -1.0f, -2.0f, -3.0f, -4.0f, -10.0f, -20.0f, -30.0f, -40.0f, -100.0f, -200.0f, -300.0f, -400.0f, + // clang-format on + }; + ConstTensor input(TensorInfo({ 1, 2, 3, 4 }, DataType::Float32), inputData); + InputTensors inputs = { { 0, input } }; + std::vector<float> outputData(4 * 6); + Tensor output(TensorInfo({ 1, 4, 6, 1 }, DataType::Float32), outputData.data()); + OutputTensors outputs = { { 0, output } }; + runtime->EnqueueWorkload(netId, inputs, outputs); + + // Check the output is as expected. + // Note this output has been generated by running the network *without* the optimization. + std::vector<float> expectedOutput = { + // Rows and columns here match exactly with the tensor, as there is only 1 channel. + // clang-format off + 1.0f, 2.0f, 10.0f, 20.0f, 100.0f, 200.0f, + 3.0f, 4.0f, 30.0f, 40.0f, 300.0f, 400.0f, + + -1.0f, -2.0f, -10.0f, -20.0f, -100.0f, -200.0f, + -3.0f, -4.0f, -30.0f, -40.0f, -300.0f, -400.0f, + // clang-format on + }; + BOOST_TEST(outputData == expectedOutput); +} #endif BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file |