From 738c2e6a647b886750e1bc3daa6dd615a0244baa Mon Sep 17 00:00:00 2001 From: keidav01 Date: Tue, 11 Dec 2018 16:14:20 +0000 Subject: IVGCVSW-1434 Add debug mode to Optimizer * Modified optimizer to support debug mode via DebugLayer Change-Id: Ic8f313778e55540c182cf99876c44a0823be04c6 --- src/armnn/Network.cpp | 6 +++ src/armnn/NetworkUtils.cpp | 29 +++++++++++++ src/armnn/NetworkUtils.hpp | 2 + src/armnn/optimizations/AddDebug.hpp | 37 +++++++++++++++++ src/armnn/optimizations/All.hpp | 1 + src/armnn/test/OptimizerTests.cpp | 37 +++++++++++++++++ .../reference/test/RefOptimizedNetworkTests.cpp | 47 ++++++++++++++++++++++ 7 files changed, 159 insertions(+) create mode 100644 src/armnn/optimizations/AddDebug.hpp (limited to 'src') diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 0cf0ed36fd..ecab5041db 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -129,6 +129,12 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter())); } + // if debug optimization is set, then print out data after each layer + if (options.m_Debug) + { + Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(InsertDebugLayer())); + } + // We know that DeviceSpec should be the only implementation of IDeviceSpec. const DeviceSpec& spec = *boost::polymorphic_downcast(&deviceSpec); auto const& supportedBackends = spec.GetSupportedBackends(); diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp index 1e3add6301..9a4ce87b59 100644 --- a/src/armnn/NetworkUtils.cpp +++ b/src/armnn/NetworkUtils.cpp @@ -74,4 +74,33 @@ std::vector InsertConvertFp32ToFp16LayersAfter(Graph& g return convertLayers; } + +std::vector InsertDebugLayerAfter(Graph& graph, Layer& layer) +{ + std::vector debugLayers; + debugLayers.reserve(layer.GetNumOutputSlots()); + + // Change outputs to DataType::Float16 + for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot) + { + // Insert debug layer after the layer + const std::string name = + std::string("DebugLayerAfter") + layer.GetName(); + + const DebugDescriptor descriptor; + + DebugLayer* debugLayer = + graph.InsertNewLayer(*outputSlot, descriptor, name.c_str()); + + // Sets output tensor info for the debug layer. + TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); + + debugLayer->GetOutputSlot().SetTensorInfo(debugInfo); + + debugLayers.emplace_back(debugLayer); + } + + return debugLayers; +} + } // namespace armnn diff --git a/src/armnn/NetworkUtils.hpp b/src/armnn/NetworkUtils.hpp index dbb85380ff..b81d5cb5e7 100644 --- a/src/armnn/NetworkUtils.hpp +++ b/src/armnn/NetworkUtils.hpp @@ -14,4 +14,6 @@ std::vector InsertConvertFp16ToFp32LayersBefore(Graph& std::vector InsertConvertFp32ToFp16LayersAfter(Graph& graph, Layer& layer); +std::vector InsertDebugLayerAfter(Graph& graph, Layer& layer); + } // namespace armnn diff --git a/src/armnn/optimizations/AddDebug.hpp b/src/armnn/optimizations/AddDebug.hpp new file mode 100644 index 0000000000..60271b0d77 --- /dev/null +++ b/src/armnn/optimizations/AddDebug.hpp @@ -0,0 +1,37 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "Optimization.hpp" +#include "NetworkUtils.hpp" + +namespace armnn +{ +namespace optimizations +{ + +class AddDebugImpl +{ +public: + + void Run(Graph& graph, Layer& layer) const + { + if (layer.GetType() != LayerType::Debug && layer.GetType() != LayerType::Output) + { + // if the inputs/outputs of this layer do not have a debug layer + // insert the debug layer after them + InsertDebugLayerAfter(graph, layer); + } + } + +protected: + AddDebugImpl() = default; + ~AddDebugImpl() = default; +}; + +using InsertDebugLayer = OptimizeForType; + +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp index a1bff3c5e1..0a6684ee3b 100644 --- a/src/armnn/optimizations/All.hpp +++ b/src/armnn/optimizations/All.hpp @@ -12,3 +12,4 @@ #include "MovePermuteUp.hpp" #include "OptimizeInverseConversions.hpp" #include "ConvertFp32NetworkToFp16.hpp" +#include "AddDebug.hpp" diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index 30ca52092a..29d1702c64 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -768,6 +768,43 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest) &IsLayerOfType)); } +BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest) +{ + armnn::Graph graph; + + const armnn::TensorInfo info({ 2,2,1,3 }, armnn::DataType::Float32); + + // Create the simple test network + auto input = graph.AddLayer(0, "input"); + input->GetOutputSlot().SetTensorInfo(info); + + auto floor = graph.AddLayer("floor"); + floor->GetOutputSlot().SetTensorInfo(info); + + auto output = graph.AddLayer(1, "output"); + + // Connect up the layers + input->GetOutputSlot().Connect(floor->GetInputSlot(0)); + floor->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), + graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); + + // Run the optimizer + armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(InsertDebugLayer())); + + BOOST_TEST(CheckSequence(graph.cbegin(), + graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); +} + void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape, const unsigned int* weightsShape, const unsigned int* outputShape, DataLayout dataLayout = DataLayout::NCHW) diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp index 907e7950f5..68617b9d4d 100644 --- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp +++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp @@ -10,6 +10,7 @@ #include #include +#include BOOST_AUTO_TEST_SUITE(RefOptimizedNetwork) @@ -209,4 +210,50 @@ BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnCpuRef) BOOST_TEST(ss.str() == expected.str()); } +BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef) +{ + armnn::Network net; + + armnn::ActivationDescriptor activation1Descriptor; + activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu; + activation1Descriptor.m_A = 1.f; + activation1Descriptor.m_B = -1.f; + + // Defines layers. + auto input = net.AddInputLayer(0, "InputLayer"); + auto activation = net.AddActivationLayer(activation1Descriptor, "ActivationLayer"); + auto output = net.AddOutputLayer(0, "OutputLayer"); + + // Connects layers. + input->GetOutputSlot(0).Connect(activation->GetInputSlot(0)); + activation->GetOutputSlot(0).Connect(output->GetInputSlot(0)); + + armnn::TensorShape shape({4}); + armnn::TensorInfo info(shape, armnn::DataType::Float32); + input->GetOutputSlot(0).SetTensorInfo(info); + activation->GetOutputSlot(0).SetTensorInfo(info); + + armnn::IRuntime::CreationOptions options; + armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options)); + + std::vector backends = {armnn::Compute::CpuRef}; + + armnn::OptimizerOptions optimizerOptions; + optimizerOptions.m_Debug = true; + + armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(), + optimizerOptions); + + const armnn::Graph& graph = static_cast(optimizedNet.get())->GetGraph(); + // Tests that all layers are present in the graph. + BOOST_TEST(graph.GetNumLayers() == 5); + + // Tests that the vertices exist and have correct names. + BOOST_TEST(GraphHasNamedLayer(graph, "InputLayer")); + BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterInputLayer")); + BOOST_TEST(GraphHasNamedLayer(graph, "ActivationLayer")); + BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterActivationLayer")); + BOOST_TEST(GraphHasNamedLayer(graph, "OutputLayer")); +} + BOOST_AUTO_TEST_SUITE_END() -- cgit v1.2.1