aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
diff options
context:
space:
mode:
authorMike Kelly <mike.kelly@arm.com>2020-03-03 12:39:09 +0000
committermike.kelly <mike.kelly@arm.com>2020-03-03 15:05:30 +0000
commit490b7becb8029ead26423b0d62e631a929e55d6c (patch)
tree31148ace54164f62927062b662b2526f22a02e95 /src/armnn/test/optimizations/MoveTransposeUpTests.cpp
parent4a9e24bfc51eec7e593470091fb7e6e435ae3991 (diff)
downloadarmnn-490b7becb8029ead26423b0d62e631a929e55d6c.tar.gz
IVGCVSW-4375 Add support for Transpose to optimizations
* Changed some existing Permutation specific optimizations to also support Transpose * Added MoveTransposeUp optimization * Added TransposeAsReshape optimization * Added tests for Transpose optimizations * Added missing layer tests for Transpose Signed-off-by: Mike Kelly <mike.kelly@arm.com> Change-Id: I20d099b284861402ae94aaa5dbf34907327a485f
Diffstat (limited to 'src/armnn/test/optimizations/MoveTransposeUpTests.cpp')
-rw-r--r--src/armnn/test/optimizations/MoveTransposeUpTests.cpp93
1 files changed, 93 insertions, 0 deletions
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
new file mode 100644
index 0000000000..e2fb3abffb
--- /dev/null
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -0,0 +1,93 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
+{
+ const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo transposed({ 1, 3, 5, 2 }, armnn::DataType::Float32);
+
+ armnn::Graph graph;
+
+ armnn::LayerBindingId inputId = 0;
+
+ armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ std::string transposeLayerName = "original_transpose";
+
+ // Insert transpose
+ head = graph.InsertNewLayer<armnn::TransposeLayer>(head->GetInputSlot(0),
+ armnn::TransposeDescriptor({ 0, 3, 1, 2 }),
+ transposeLayerName.c_str());
+
+ head->GetOutputHandler().SetTensorInfo(transposed);
+
+ // Inserts layers that don't care about data format.
+ head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ // Inserts input for 2nd input of Addition.
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
+ ->GetOutputHandler()
+ .SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::FakeQuantizationLayer>(head->GetInputSlot(0),
+ armnn::FakeQuantizationDescriptor{}, "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ // Inserts input for 2nd input of Multiplication.
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
+ ->GetOutputHandler()
+ .SetTensorInfo(info);
+
+ // Inserts input.
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
+ ->GetOutputHandler()
+ .SetTensorInfo(info);
+
+ BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp()));
+
+ // The transpose is moved to the top. New transposes for layers with multiple inputs.
+ BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
+ &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+
+ std::list<std::string> testRelatedLayers = { transposeLayerName };
+
+ BOOST_TEST(CheckRelatedLayers<armnn::TransposeLayer>(graph, testRelatedLayers));
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file