aboutsummaryrefslogtreecommitdiff
path: root/src/backends/reference
diff options
context:
space:
mode:
Diffstat (limited to 'src/backends/reference')
-rw-r--r--src/backends/reference/backend.mk3
-rw-r--r--src/backends/reference/test/CMakeLists.txt3
-rw-r--r--src/backends/reference/test/RefEndToEndTests.cpp251
-rw-r--r--src/backends/reference/test/RefJsonPrinterTests.cpp22
-rw-r--r--src/backends/reference/test/RefOptimizedNetworkTests.cpp212
5 files changed, 491 insertions, 0 deletions
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 455ab4618e..007efceb9b 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -65,6 +65,9 @@ BACKEND_SOURCES := \
BACKEND_TEST_SOURCES := \
test/RefCreateWorkloadTests.cpp \
+ test/RefEndToEndTests.cpp \
+ test/RefJsonPrinterTests.cpp \
test/RefLayerSupportTests.cpp \
test/RefLayerTests.cpp \
+ test/RefOptimizedNetworkTests.cpp \
test/RefRuntimeTests.cpp
diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt
index dea0ef6498..1eec594aa9 100644
--- a/src/backends/reference/test/CMakeLists.txt
+++ b/src/backends/reference/test/CMakeLists.txt
@@ -5,8 +5,11 @@
list(APPEND armnnRefBackendUnitTests_sources
RefCreateWorkloadTests.cpp
+ RefEndToEndTests.cpp
+ RefJsonPrinterTests.cpp
RefLayerSupportTests.cpp
RefLayerTests.cpp
+ RefOptimizedNetworkTests.cpp
RefRuntimeTests.cpp
)
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
new file mode 100644
index 0000000000..8938d6f222
--- /dev/null
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -0,0 +1,251 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <backends/test/EndToEndTestImpl.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(RefEndToEnd)
+
+BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ BOOST_TEST(ConstantUsageFloat32Test(backends));
+}
+
+BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ BOOST_TEST(ConstantUsageUint8Test(backends));
+}
+
+BOOST_AUTO_TEST_CASE(Unsigned8)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ armnn::INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input = net->AddInputLayer(0, "input");
+ IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax");
+ IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+ input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
+ softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Sets the tensors in the network.
+ TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ inputTensorInfo.SetQuantizationOffset(100);
+ inputTensorInfo.SetQuantizationScale(10000.0f);
+ input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+
+ TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+ outputTensorInfo.SetQuantizationOffset(0);
+ outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
+ softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+ // optimize the network
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ auto error = runtime->LoadNetwork(netId, std::move(optNet));
+ BOOST_TEST(error == Status::Success);
+
+ // Creates structures for input & output.
+ std::vector<uint8_t> inputData
+ {
+ 1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
+ };
+ std::vector<uint8_t> outputData(5);
+
+ armnn::InputTensors inputTensors
+ {
+ {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ };
+ armnn::OutputTensors outputTensors
+ {
+ {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results.
+ BOOST_TEST(outputData[0] == 0);
+ BOOST_TEST(outputData[1] == 0);
+ BOOST_TEST(outputData[2] == 0);
+ BOOST_TEST(outputData[3] == 255); // softmax has been saturated.
+ BOOST_TEST(outputData[4] == 0);
+}
+
+BOOST_AUTO_TEST_CASE(TrivialAdd)
+{
+ // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp.
+
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ armnn::INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input1 = net->AddInputLayer(0);
+ IConnectableLayer* input2 = net->AddInputLayer(1);
+ IConnectableLayer* add = net->AddAdditionLayer();
+ IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input1->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+ input2->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+ add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ // Sets the tensors in the network.
+ TensorInfo tensorInfo(TensorShape({3, 4}), DataType::Float32);
+ input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // optimize the network
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output - matching android nn test.
+ std::vector<float> input1Data
+ {
+ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+ };
+ std::vector<float> input2Data
+ {
+ 100.f, 200.f, 300.f, 400.f, 500.f, 600.f, 700.f, 800.f, 900.f, 1000.f, 1100.f, 1200.f
+ };
+ std::vector<float> outputData(12);
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
+ {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+ };
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results
+ BOOST_TEST(outputData[0] == 101);
+ BOOST_TEST(outputData[1] == 202);
+ BOOST_TEST(outputData[2] == 303);
+ BOOST_TEST(outputData[3] == 404);
+ BOOST_TEST(outputData[4] == 505);
+ BOOST_TEST(outputData[5] == 606);
+ BOOST_TEST(outputData[6] == 707);
+ BOOST_TEST(outputData[7] == 808);
+ BOOST_TEST(outputData[8] == 909);
+ BOOST_TEST(outputData[9] == 1010);
+ BOOST_TEST(outputData[10] == 1111);
+ BOOST_TEST(outputData[11] == 1212);
+}
+
+BOOST_AUTO_TEST_CASE(MultipleOutputs)
+{
+ using namespace armnn;
+
+ // Create runtime in which test will run
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ // Builds up the structure of the network.
+ INetworkPtr net(INetwork::Create());
+
+ IConnectableLayer* input = net->AddInputLayer(0);
+
+ // ReLu1
+ ActivationDescriptor activation1Descriptor;
+ activation1Descriptor.m_Function = ActivationFunction::BoundedReLu;
+ activation1Descriptor.m_A = 1.f;
+ activation1Descriptor.m_B = -1.f;
+ IConnectableLayer* activation1 = net->AddActivationLayer(activation1Descriptor);
+
+ // ReLu6
+ ActivationDescriptor activation2Descriptor;
+ activation2Descriptor.m_Function = ActivationFunction::BoundedReLu;
+ activation2Descriptor.m_A = 6.0f;
+ IConnectableLayer* activation2 = net->AddActivationLayer(activation2Descriptor);
+
+ // BoundedReLu(min=2, max=5)
+ ActivationDescriptor activation3Descriptor;
+ activation3Descriptor.m_Function = ActivationFunction::BoundedReLu;
+ activation3Descriptor.m_A = 5.0f;
+ activation3Descriptor.m_B = 2.0f;
+ IConnectableLayer* activation3 = net->AddActivationLayer(activation3Descriptor);
+
+ IConnectableLayer* output1 = net->AddOutputLayer(0);
+ IConnectableLayer* output2 = net->AddOutputLayer(1);
+ IConnectableLayer* output3 = net->AddOutputLayer(2);
+
+ input->GetOutputSlot(0).Connect(activation1->GetInputSlot(0));
+ input->GetOutputSlot(0).Connect(activation2->GetInputSlot(0));
+ input->GetOutputSlot(0).Connect(activation3->GetInputSlot(0));
+
+ activation1->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
+ activation2->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
+ activation3->GetOutputSlot(0).Connect(output3->GetInputSlot(0));
+
+ // Sets the tensors in the network.
+ TensorInfo tensorInfo(TensorShape({ 10 }), DataType::Float32);
+ input->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ activation1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ activation2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+ activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+ // optimize the network
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ // Creates structures for input & output.
+ const std::vector<float> inputData{ 3.f, 5.f, 2.f, 3.f, 7.f, 0.f, -2.f, -1.f, 3.f, 3.f };
+
+ std::vector<float> output1Data(inputData.size());
+ std::vector<float> output2Data(inputData.size());
+ std::vector<float> output3Data(inputData.size());
+
+ InputTensors inputTensors
+ {
+ {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+ };
+ OutputTensors outputTensors
+ {
+ {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), output1Data.data())},
+ {1,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 1), output2Data.data())},
+ {2,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 2), output3Data.data())}
+ };
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results.
+ BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
+ BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
+ BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/backends/reference/test/RefJsonPrinterTests.cpp b/src/backends/reference/test/RefJsonPrinterTests.cpp
new file mode 100644
index 0000000000..ee668a2513
--- /dev/null
+++ b/src/backends/reference/test/RefJsonPrinterTests.cpp
@@ -0,0 +1,22 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/BackendId.hpp>
+
+#include <backends/test/JsonPrinterTestImpl.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <vector>
+
+BOOST_AUTO_TEST_SUITE(RefJsonPrinter)
+
+BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterCpuRefTest)
+{
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+ SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJsonPrinterResult(backends);
+}
+
+BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
new file mode 100644
index 0000000000..63615e6859
--- /dev/null
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -0,0 +1,212 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Graph.hpp>
+#include <armnn/Network.hpp>
+
+#include <backends/reference/RefWorkloadFactory.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(RefOptimizedNetwork)
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
+{
+ const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
+
+ armnn::Network net;
+
+ armnn::NormalizationDescriptor nmDesc;
+ armnn::ActivationDescriptor acDesc;
+
+ // in
+ // |
+ // nm
+ // / |
+ // ac |
+ // \ |
+ // ml
+ // |
+ // sm
+ // |
+ // ot
+ armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+ layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+ armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+
+ layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
+ normLayer->GetOutputSlot(0).SetTensorInfo(desc);
+
+ layer = net.AddActivationLayer(acDesc, "ac");
+
+ normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+ armnn::IConnectableLayer* prevLayer = layer;
+ layer = net.AddMultiplicationLayer("ml");
+
+ prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+ normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+ layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+ prevLayer = layer;
+ armnn::SoftmaxDescriptor softmaxDescriptor;
+ layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+
+ prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+ layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+ prevLayer = layer;
+ layer = net.AddOutputLayer(0, "ot");
+
+ prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+ static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph().AllocateDynamicBuffers();
+ BOOST_CHECK(optNet);
+
+ // Validates workloads.
+ armnn::RefWorkloadFactory fact;
+ for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ {
+ BOOST_CHECK_NO_THROW(
+ layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
+ }
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer)
+{
+ // Create runtime in which test will run
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = net->AddInputLayer(0);
+
+ armnn::PermuteDescriptor descriptor({0, 2, 3, 1});
+ armnn::IConnectableLayer* permute = net->AddPermuteLayer(descriptor);
+
+ armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
+ permute->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+ permute->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 4, 1, 4 }, armnn::DataType::Float32));
+
+ // optimize the network
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ {
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+ }
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer)
+{
+ // Create runtime in which test will run
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+ // build up the structure of the network
+ armnn::INetworkPtr net(armnn::INetwork::Create());
+
+ armnn::IConnectableLayer* input = net->AddInputLayer(0);
+
+ armnn::MeanDescriptor descriptor({ 0, 1 }, false);
+ armnn::IConnectableLayer* meanLayer = net->AddMeanLayer(descriptor);
+
+ armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+ input->GetOutputSlot(0).Connect(meanLayer->GetInputSlot(0));
+ meanLayer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 4, 3, 2 }, armnn::DataType::Float32));
+ meanLayer->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 2 }, armnn::DataType::Float32));
+
+ // optimize the network
+ armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+
+ for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+ {
+ BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+ }
+}
+
+BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnCpuRef)
+{
+ // Test to check when FP16 Turbo mode set
+ // it converts the FP32 network to FP16 Network
+ // add FP32ToFP16 conversion layer after the InputLayer
+ // add FP16ToFP32 conversion layer after the OutputLayer
+ // checks the other layers if they are supported in FP16
+ // if they are not put the conversion layers before and after
+ // if they are not supported in FP16 use FP32 instead
+ // if there are inverse conversion layers remove them with optimization
+ // at the moment FloorLayer is not supported in FP16 so it rolls back to FP32
+ // and inverse conversion layers are removed by the optimizer
+ armnn::Network net;
+
+ // Defines layers.
+ auto input = net.AddInputLayer(0);
+ auto floor = net.AddFloorLayer();
+ auto output = net.AddOutputLayer(0);
+
+ // Connects layers.
+ input->GetOutputSlot(0).Connect(floor->GetInputSlot(0));
+ floor->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+ armnn::TensorShape shape({4});
+ armnn::TensorInfo info(shape, armnn::DataType::Float32);
+ input->GetOutputSlot(0).SetTensorInfo(info);
+ floor->GetOutputSlot(0).SetTensorInfo(info);
+
+ armnn::IRuntime::CreationOptions options;
+ armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+ std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+
+ armnn::OptimizerOptions optimizerOptions;
+ optimizerOptions.m_ReduceFp32ToFp16 = true;
+
+ armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(),
+ optimizerOptions);
+
+ std::ostringstream ss;
+ optimizedNet->SerializeToDot(ss);
+
+ auto inputId = input->GetGuid();
+ auto floorId = floor->GetGuid();
+ auto outputId = output->GetGuid();
+
+ std::stringstream expected;
+ expected <<
+ "digraph Optimized {\n"
+ " node [shape=\"record\"];\n"
+ " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
+ " " << inputId << " [label=\"{Input}\"];\n"
+ " " << floorId << " [label=\"{Floor}\"];\n"
+ " " << outputId << " [label=\"{Output}\"];\n"
+ " " << inputId << " -> " << floorId << " [label=< [4] >];\n"
+ " " << floorId << " -> " << outputId << " [label=< [4] >];\n"
+ "}\n";
+
+ BOOST_TEST(ss.str() == expected.str());
+}
+
+BOOST_AUTO_TEST_SUITE_END()