diff options
author | Rob Hughes <robert.hughes@arm.com> | 2019-09-24 09:34:53 +0100 |
---|---|---|
committer | Jim Flynn Arm <jim.flynn@arm.com> | 2019-09-25 02:44:48 +0000 |
commit | 95e73d77b9a79f7d350a39d85f07d09cd58422cc (patch) | |
tree | 8b713e8d29d433a28b29b8a9ce34052e8cccd08c /src/armnn/test/optimizations/MovePermuteUpTests.cpp | |
parent | 4833cea9036df428634cf64d8f1c4b54fc5da41f (diff) | |
download | armnn-95e73d77b9a79f7d350a39d85f07d09cd58422cc.tar.gz |
NNXSW-1826 Move tests for Optimization classes to separate files
This splits up the >1000 line OptimizerTests.cpp file.
Each Optimization class now has its own test file, all of which are in a
subfolder of tests called "optimizations".
The original OptimizerTests.cpp now contains mostly (completely?) tests
for validating output shapes, which perhaps should be moved to
test files specific to the layer types they are testing.
Change-Id: Icd1196cad8b720abcb156921aab1adbd4026756b
Signed-off-by: Rob Hughes <robert.hughes@arm.com>
Diffstat (limited to 'src/armnn/test/optimizations/MovePermuteUpTests.cpp')
-rw-r--r-- | src/armnn/test/optimizations/MovePermuteUpTests.cpp | 92 |
1 files changed, 92 insertions, 0 deletions
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp new file mode 100644 index 0000000000..2c297d65cd --- /dev/null +++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp @@ -0,0 +1,92 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "../TestUtils.hpp" + +#include <Optimizer.hpp> + +#include <boost/test/unit_test.hpp> + +BOOST_AUTO_TEST_SUITE(Optimizer) +using namespace armnn::optimizations; + +BOOST_AUTO_TEST_CASE(MovePermuteUpTest) +{ + const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32); + const armnn::TensorInfo permuted({ 1, 3, 5, 2 }, armnn::DataType::Float32); + + armnn::Graph graph; + + armnn::LayerBindingId inputId = 0; + + armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output"); + + std::string permuteLayerName = "original_permute"; + + // Insert permute + head = graph.InsertNewLayer<armnn::PermuteLayer>(head->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 2, 3, 1 }), + permuteLayerName.c_str()); + + head->GetOutputHandler().SetTensorInfo(permuted); + + // Inserts layers that don't care about data format. + head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, ""); + head->GetOutputHandler().SetTensorInfo(info); + + head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), ""); + head->GetOutputHandler().SetTensorInfo(info); + + // Inserts input for 2nd input of Addition. + graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "") + ->GetOutputHandler() + .SetTensorInfo(info); + + head = graph.InsertNewLayer<armnn::FakeQuantizationLayer>(head->GetInputSlot(0), + armnn::FakeQuantizationDescriptor{}, ""); + head->GetOutputHandler().SetTensorInfo(info); + + head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), ""); + head->GetOutputHandler().SetTensorInfo(info); + + head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), ""); + head->GetOutputHandler().SetTensorInfo(info); + + head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), ""); + head->GetOutputHandler().SetTensorInfo(info); + + // Inserts input for 2nd input of Multiplication. + graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "") + ->GetOutputHandler() + .SetTensorInfo(info); + + // Inserts input. + graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "") + ->GetOutputHandler() + .SetTensorInfo(info); + + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>, + &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>, + &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>, + &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>)); + + armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp())); + + // The permute is moved to the top. New permutes for layers with multiple inputs. + BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>, + &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>, + &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>, + &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>, + &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>, + &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>)); + + std::list<std::string> testRelatedLayers = { permuteLayerName }; + + BOOST_TEST(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers)); +} + +BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file |