From 738c2e6a647b886750e1bc3daa6dd615a0244baa Mon Sep 17 00:00:00 2001 From: keidav01 Date: Tue, 11 Dec 2018 16:14:20 +0000 Subject: IVGCVSW-1434 Add debug mode to Optimizer * Modified optimizer to support debug mode via DebugLayer Change-Id: Ic8f313778e55540c182cf99876c44a0823be04c6 --- src/armnn/Network.cpp | 6 ++++++ src/armnn/NetworkUtils.cpp | 29 ++++++++++++++++++++++++++++ src/armnn/NetworkUtils.hpp | 2 ++ src/armnn/optimizations/AddDebug.hpp | 37 ++++++++++++++++++++++++++++++++++++ src/armnn/optimizations/All.hpp | 1 + src/armnn/test/OptimizerTests.cpp | 37 ++++++++++++++++++++++++++++++++++++ 6 files changed, 112 insertions(+) create mode 100644 src/armnn/optimizations/AddDebug.hpp (limited to 'src/armnn') diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index 0cf0ed36fd..ecab5041db 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -129,6 +129,12 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork, Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter())); } + // if debug optimization is set, then print out data after each layer + if (options.m_Debug) + { + Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(InsertDebugLayer())); + } + // We know that DeviceSpec should be the only implementation of IDeviceSpec. const DeviceSpec& spec = *boost::polymorphic_downcast(&deviceSpec); auto const& supportedBackends = spec.GetSupportedBackends(); diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp index 1e3add6301..9a4ce87b59 100644 --- a/src/armnn/NetworkUtils.cpp +++ b/src/armnn/NetworkUtils.cpp @@ -74,4 +74,33 @@ std::vector InsertConvertFp32ToFp16LayersAfter(Graph& g return convertLayers; } + +std::vector InsertDebugLayerAfter(Graph& graph, Layer& layer) +{ + std::vector debugLayers; + debugLayers.reserve(layer.GetNumOutputSlots()); + + // Change outputs to DataType::Float16 + for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot) + { + // Insert debug layer after the layer + const std::string name = + std::string("DebugLayerAfter") + layer.GetName(); + + const DebugDescriptor descriptor; + + DebugLayer* debugLayer = + graph.InsertNewLayer(*outputSlot, descriptor, name.c_str()); + + // Sets output tensor info for the debug layer. + TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); + + debugLayer->GetOutputSlot().SetTensorInfo(debugInfo); + + debugLayers.emplace_back(debugLayer); + } + + return debugLayers; +} + } // namespace armnn diff --git a/src/armnn/NetworkUtils.hpp b/src/armnn/NetworkUtils.hpp index dbb85380ff..b81d5cb5e7 100644 --- a/src/armnn/NetworkUtils.hpp +++ b/src/armnn/NetworkUtils.hpp @@ -14,4 +14,6 @@ std::vector InsertConvertFp16ToFp32LayersBefore(Graph& std::vector InsertConvertFp32ToFp16LayersAfter(Graph& graph, Layer& layer); +std::vector InsertDebugLayerAfter(Graph& graph, Layer& layer); + } // namespace armnn diff --git a/src/armnn/optimizations/AddDebug.hpp b/src/armnn/optimizations/AddDebug.hpp new file mode 100644 index 0000000000..60271b0d77 --- /dev/null +++ b/src/armnn/optimizations/AddDebug.hpp @@ -0,0 +1,37 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// +#pragma once + +#include "Optimization.hpp" +#include "NetworkUtils.hpp" + +namespace armnn +{ +namespace optimizations +{ + +class AddDebugImpl +{ +public: + + void Run(Graph& graph, Layer& layer) const + { + if (layer.GetType() != LayerType::Debug && layer.GetType() != LayerType::Output) + { + // if the inputs/outputs of this layer do not have a debug layer + // insert the debug layer after them + InsertDebugLayerAfter(graph, layer); + } + } + +protected: + AddDebugImpl() = default; + ~AddDebugImpl() = default; +}; + +using InsertDebugLayer = OptimizeForType; + +} // namespace optimizations +} // namespace armnn diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp index a1bff3c5e1..0a6684ee3b 100644 --- a/src/armnn/optimizations/All.hpp +++ b/src/armnn/optimizations/All.hpp @@ -12,3 +12,4 @@ #include "MovePermuteUp.hpp" #include "OptimizeInverseConversions.hpp" #include "ConvertFp32NetworkToFp16.hpp" +#include "AddDebug.hpp" diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp index 30ca52092a..29d1702c64 100644 --- a/src/armnn/test/OptimizerTests.cpp +++ b/src/armnn/test/OptimizerTests.cpp @@ -768,6 +768,43 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest) &IsLayerOfType)); } +BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest) +{ + armnn::Graph graph; + + const armnn::TensorInfo info({ 2,2,1,3 }, armnn::DataType::Float32); + + // Create the simple test network + auto input = graph.AddLayer(0, "input"); + input->GetOutputSlot().SetTensorInfo(info); + + auto floor = graph.AddLayer("floor"); + floor->GetOutputSlot().SetTensorInfo(info); + + auto output = graph.AddLayer(1, "output"); + + // Connect up the layers + input->GetOutputSlot().Connect(floor->GetInputSlot(0)); + floor->GetOutputSlot().Connect(output->GetInputSlot(0)); + + BOOST_TEST(CheckSequence(graph.cbegin(), + graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); + + // Run the optimizer + armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(InsertDebugLayer())); + + BOOST_TEST(CheckSequence(graph.cbegin(), + graph.cend(), + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType, + &IsLayerOfType)); +} + void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape, const unsigned int* weightsShape, const unsigned int* outputShape, DataLayout dataLayout = DataLayout::NCHW) -- cgit v1.2.1