aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/test
diff options
context:
space:
mode:
authorsurmeh01 <surabhi.mehta@arm.com>2018-03-29 16:29:27 +0100
committersurmeh01 <surabhi.mehta@arm.com>2018-03-29 16:29:27 +0100
commitbceff2fb3fc68bb0aa88b886900c34b77340c826 (patch)
treed867d3e090d58d3012dfbbac456e9ea8c7f789bc /src/armnn/test
parent4fcda0101ec3d110c1d6d7bee5c83416b645528a (diff)
downloadarmnn-bceff2fb3fc68bb0aa88b886900c34b77340c826.tar.gz
Release 18.03
Diffstat (limited to 'src/armnn/test')
-rw-r--r--src/armnn/test/Network_test.cpp58
-rw-r--r--src/armnn/test/OptimizerTests.cpp334
-rw-r--r--src/armnn/test/RuntimeTests.cpp15
3 files changed, 405 insertions, 2 deletions
diff --git a/src/armnn/test/Network_test.cpp b/src/armnn/test/Network_test.cpp
index 523d47b169..057caa0505 100644
--- a/src/armnn/test/Network_test.cpp
+++ b/src/armnn/test/Network_test.cpp
@@ -29,6 +29,64 @@ bool AreAllLayerInputSlotsConnected(const armnn::IConnectableLayer& layer)
BOOST_AUTO_TEST_SUITE(Network)
+BOOST_AUTO_TEST_CASE(LayerGuids)
+{
+ armnn::Network net;
+ armnn::LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
+ armnn::LayerGuid addId = net.AddAdditionLayer()->GetGuid();
+ armnn::LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
+
+ BOOST_TEST(inputId != addId);
+ BOOST_TEST(addId != outputId);
+ BOOST_TEST(inputId != outputId);
+}
+
+BOOST_AUTO_TEST_CASE(SerializeToDot)
+{
+ armnn::Network net;
+
+ //define layers
+ auto input = net.AddInputLayer(0);
+ auto add = net.AddAdditionLayer();
+ auto output = net.AddOutputLayer(0);
+
+ // connect layers
+ input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+ input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+ add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ armnn::TensorShape shape({4});
+ armnn::TensorInfo info(shape, armnn::DataType::Float32);
+ input->GetOutputSlot(0).SetTensorInfo(info);
+ add->GetOutputSlot(0).SetTensorInfo(info);
+
+ armnn::DeviceSpec spec;
+ spec.DefaultComputeDevice = armnn::Compute::CpuAcc;
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, spec);
+
+ std::ostringstream ss;
+ optimizedNet->SerializeToDot(ss);
+
+ auto inputId = input->GetGuid();
+ auto addId = add->GetGuid();
+ auto outputId = output->GetGuid();
+
+ std::stringstream expected;
+ expected <<
+ "digraph Optimized {\n"
+ " node [shape=\"record\"];\n"
+ " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
+ " " << inputId << " [label=\"{Input}\"];\n"
+ " " << addId << " [label=\"{Addition}\"];\n"
+ " " << outputId << " [label=\"{Output}\"];\n"
+ " " << inputId << " -> " << addId << " [label=< [4] >];\n"
+ " " << inputId << " -> " << addId << " [label=< [4] >];\n"
+ " " << addId << " -> " << outputId << " [label=< [4] >];\n"
+ "}\n";
+
+ BOOST_TEST(ss.str() == expected.str());
+}
+
BOOST_AUTO_TEST_CASE(NetworkBasic)
{
armnn::Network net;
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
new file mode 100644
index 0000000000..da26fba76e
--- /dev/null
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -0,0 +1,334 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// See LICENSE file in the project root for full license information.
+//
+#include <boost/test/unit_test.hpp>
+
+#include "armnn/ArmNN.hpp"
+#include "Graph.hpp"
+#include "Optimizer.hpp"
+
+namespace
+{
+template <typename LayerT>
+bool IsLayerOfType(const armnn::Layer* const layer)
+{
+ return (layer->GetType() == armnn::LayerEnumOf<LayerT>());
+}
+
+bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
+{
+ return (first == last);
+}
+
+/// Check each unary function in Us evaluates true for each correspondent layer in the sequence [first, last)
+template <typename U, typename... Us>
+bool CheckSequence(const armnn::Graph::ConstIterator first,
+ const armnn::Graph::ConstIterator last,
+ U&& u,
+ Us&&... us)
+{
+ return u(*first) && CheckSequence(std::next(first), last, us...);
+}
+}
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+
+BOOST_AUTO_TEST_CASE(OptimizeInversePermutes)
+{
+ armnn::Graph graph;
+
+ auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
+
+ // Insert two permutes, one the inverse of the other
+ graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
+ armnn::PermuteDescriptor({0, 2, 3, 1}),
+ "perm0231");
+ graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
+ armnn::PermuteDescriptor({0, 3, 1, 2}),
+ "perm0312");
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The permutes are removed
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_CASE(MovePermuteUp)
+{
+ const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+ const armnn::TensorInfo permuted({ 1, 3, 5, 2 }, armnn::DataType::Float32);
+
+ armnn::Graph graph;
+
+ armnn::LayerBindingId inputId = 0;
+
+ armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ // Insert permute
+ head = graph.InsertNewLayer<armnn::PermuteLayer>(head->GetInputSlot(0),
+ armnn::PermuteDescriptor({ 0, 2, 3, 1 }), "");
+ head->GetOutputHandler().SetTensorInfo(permuted);
+
+ // Insert layers that don't care about data format
+ head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0),
+ armnn::ActivationDescriptor{}, "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ // Insert input for 2nd input of Addition
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
+ ->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::FakeQuantizationLayer>(head->GetInputSlot(0),
+ armnn::FakeQuantizationDescriptor{}, "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+ head->GetOutputHandler().SetTensorInfo(info);
+
+ // Insert input for 2nd input of Multiplication
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
+ ->GetOutputHandler().SetTensorInfo(info);
+
+ // Insert input
+ graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
+ ->GetOutputHandler().SetTensorInfo(info);
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The permute is moved to the top. New permutes for layers with multiple inputs
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::MultiplicationLayer>,
+ &IsLayerOfType<armnn::MemCopyLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::FakeQuantizationLayer>,
+ &IsLayerOfType<armnn::AdditionLayer>,
+ &IsLayerOfType<armnn::ActivationLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_CASE(PermuteAsReshape)
+{
+ armnn::Graph graph;
+
+ const armnn::TensorInfo infoIn({ 1, 2, 3, 1 }, armnn::DataType::Float32);
+ const armnn::TensorInfo infoOut({ 1, 1, 2, 3 }, armnn::DataType::Float32);
+
+ auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+ graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input")
+ ->GetOutputHandler().SetTensorInfo(infoIn);
+
+ // Insert permute
+ graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
+ armnn::PermuteDescriptor({ 0, 2, 3, 1 }), "")
+ ->GetOutputHandler().SetTensorInfo(infoOut);
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The permute is replaced by an equivalent reshape.
+
+ auto checkReshape = [&infoOut](const armnn::Layer* const layer) -> bool
+ {
+ const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
+ return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
+ (reshapeLayer->GetParameters().m_TargetShape == infoOut.GetShape()) &&
+ (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
+ };
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ checkReshape,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapes)
+{
+ armnn::Graph graph;
+
+ const armnn::TensorInfo info0({ 1, 2, 3, 5 }, armnn::DataType::Float32);
+
+ auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+ auto input = graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
+
+ input->GetOutputHandler().SetTensorInfo(info0);
+
+ {
+ // Insert two reshapes
+ const armnn::TensorInfo info1({1, 30, 1, 1}, armnn::DataType::Float32);
+ const armnn::TensorInfo info2({1, 2, 1, 15}, armnn::DataType::Float32);
+
+ auto reshape1 = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
+ armnn::ReshapeDescriptor{ info1.GetShape() },
+ "reshape1");
+ auto reshape2 = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
+ armnn::ReshapeDescriptor{ info2.GetShape() },
+ "reshape2");
+
+ reshape1->GetOutputHandler().SetTensorInfo(info1);
+ reshape2->GetOutputHandler().SetTensorInfo(info2);
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ auto checkReshape = [&info2](const armnn::Layer* const layer) -> bool
+ {
+ const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
+ return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
+ (reshapeLayer->GetParameters().m_TargetShape == info2.GetShape()) &&
+ (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == info2.GetShape());
+ };
+
+ // The two reshapes are replaced by a single equivalent reshape
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ checkReshape,
+ &IsLayerOfType<armnn::OutputLayer>));
+ }
+
+ {
+ // Insert a reshape to the input shape
+ auto reshapeToIn = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
+ armnn::ReshapeDescriptor{ info0.GetShape() },
+ "reshapeToIn");
+
+ reshapeToIn->GetOutputHandler().SetTensorInfo(info0);
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The two reshapes are removed
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+ }
+}
+
+BOOST_AUTO_TEST_CASE(SquashEqualSiblings)
+{
+ armnn::Graph graph;
+
+ armnn::LayerBindingId outputId = 0;
+
+ const armnn::TensorInfo info({ 1, 2, 3, 5 }, armnn::DataType::Float32);
+ const armnn::TensorInfo permuted({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+
+ auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
+ input->GetOutputSlot().SetTensorInfo(info);
+
+ // Insert equal permutes, equal reshapes and something else
+ const armnn::PermuteDescriptor permDesc({ 0, 2, 3, 1 });
+ const armnn::ReshapeDescriptor reshapeDesc{ { 1, 3, 1, 5 } };
+
+ armnn::Layer* layer;
+
+ layer = graph.AddLayer<armnn::PermuteLayer>(permDesc, "");
+ layer->GetOutputSlot().SetTensorInfo(permuted);
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ layer = graph.AddLayer<armnn::ReshapeLayer>(reshapeDesc, "");
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ layer = graph.AddLayer<armnn::FloorLayer>("");
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ layer = graph.AddLayer<armnn::ReshapeLayer>(reshapeDesc, "");
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ layer = graph.AddLayer<armnn::PermuteLayer>(permDesc, "");
+ layer->GetOutputSlot().SetTensorInfo(permuted);
+ layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+ input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ armnn::Optimizer::Optimize(graph);
+
+ // The permutes and reshapes are squashed.
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::PermuteLayer>,
+ &IsLayerOfType<armnn::ReshapeLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 117df5e55a..e42d71c37d 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
BOOST_TEST(leakedBefore == leakedAfter);
// Add resonable threshold after and before running valgrind with the ACL clear cache function.
- BOOST_TEST(reachableAfter - reachableBefore < 30000);
+ BOOST_TEST(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 1024);
// these are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning
@@ -178,7 +178,18 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
// if we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass
BOOST_TEST(leakedBefore == leakedAfter);
- BOOST_TEST(reachableBefore == reachableAfter);
+
+ #if defined(ARMCOMPUTECL_ENABLED)
+ // reachableBefore == reachableAfter should hold, but on OpenCL with Android we are still
+ // not entirely able to control the memory in the OpenCL driver. Testing is showing that
+ // after this test (which clears all OpenCL memory) we are clearing a little bit more than
+ // we expect, probably depending on the order in which other tests are run.
+ BOOST_TEST(reachableBefore - reachableAfter <= 24);
+ #else
+ BOOST_TEST(reachableBefore == reachableAfter);
+ #endif
+
+ BOOST_TEST(reachableBefore >= reachableAfter);
// these are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning