aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorkeidav01 <keith.davis@arm.com>2018-12-11 16:14:20 +0000
committerKeith Davis Arm <keith.davis@arm.com>2018-12-11 17:11:20 +0000
commit738c2e6a647b886750e1bc3daa6dd615a0244baa (patch)
treeef88c6a5ee1076471bd36b85c30afb36a68bbfc9 /src/armnn
parent44a7167e0f13dc1d703cd573f57636fde711c618 (diff)
downloadarmnn-738c2e6a647b886750e1bc3daa6dd615a0244baa.tar.gz
IVGCVSW-1434 Add debug mode to Optimizer
* Modified optimizer to support debug mode via DebugLayer Change-Id: Ic8f313778e55540c182cf99876c44a0823be04c6
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/Network.cpp6
-rw-r--r--src/armnn/NetworkUtils.cpp29
-rw-r--r--src/armnn/NetworkUtils.hpp2
-rw-r--r--src/armnn/optimizations/AddDebug.hpp37
-rw-r--r--src/armnn/optimizations/All.hpp1
-rw-r--r--src/armnn/test/OptimizerTests.cpp37
6 files changed, 112 insertions, 0 deletions
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 0cf0ed36fd..ecab5041db 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -129,6 +129,12 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(Fp32NetworkToFp16Converter()));
}
+ // if debug optimization is set, then print out data after each layer
+ if (options.m_Debug)
+ {
+ Optimizer::Pass(optNetObjPtr->GetGraph(), MakeOptimizations(InsertDebugLayer()));
+ }
+
// We know that DeviceSpec should be the only implementation of IDeviceSpec.
const DeviceSpec& spec = *boost::polymorphic_downcast<const DeviceSpec*>(&deviceSpec);
auto const& supportedBackends = spec.GetSupportedBackends();
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
index 1e3add6301..9a4ce87b59 100644
--- a/src/armnn/NetworkUtils.cpp
+++ b/src/armnn/NetworkUtils.cpp
@@ -74,4 +74,33 @@ std::vector<ConvertFp32ToFp16Layer*> InsertConvertFp32ToFp16LayersAfter(Graph& g
return convertLayers;
}
+
+std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer)
+{
+ std::vector<DebugLayer*> debugLayers;
+ debugLayers.reserve(layer.GetNumOutputSlots());
+
+ // Change outputs to DataType::Float16
+ for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot)
+ {
+ // Insert debug layer after the layer
+ const std::string name =
+ std::string("DebugLayerAfter") + layer.GetName();
+
+ const DebugDescriptor descriptor;
+
+ DebugLayer* debugLayer =
+ graph.InsertNewLayer<DebugLayer>(*outputSlot, descriptor, name.c_str());
+
+ // Sets output tensor info for the debug layer.
+ TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
+
+ debugLayer->GetOutputSlot().SetTensorInfo(debugInfo);
+
+ debugLayers.emplace_back(debugLayer);
+ }
+
+ return debugLayers;
+}
+
} // namespace armnn
diff --git a/src/armnn/NetworkUtils.hpp b/src/armnn/NetworkUtils.hpp
index dbb85380ff..b81d5cb5e7 100644
--- a/src/armnn/NetworkUtils.hpp
+++ b/src/armnn/NetworkUtils.hpp
@@ -14,4 +14,6 @@ std::vector<ConvertFp16ToFp32Layer*> InsertConvertFp16ToFp32LayersBefore(Graph&
std::vector<ConvertFp32ToFp16Layer*> InsertConvertFp32ToFp16LayersAfter(Graph& graph, Layer& layer);
+std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer);
+
} // namespace armnn
diff --git a/src/armnn/optimizations/AddDebug.hpp b/src/armnn/optimizations/AddDebug.hpp
new file mode 100644
index 0000000000..60271b0d77
--- /dev/null
+++ b/src/armnn/optimizations/AddDebug.hpp
@@ -0,0 +1,37 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "Optimization.hpp"
+#include "NetworkUtils.hpp"
+
+namespace armnn
+{
+namespace optimizations
+{
+
+class AddDebugImpl
+{
+public:
+
+ void Run(Graph& graph, Layer& layer) const
+ {
+ if (layer.GetType() != LayerType::Debug && layer.GetType() != LayerType::Output)
+ {
+ // if the inputs/outputs of this layer do not have a debug layer
+ // insert the debug layer after them
+ InsertDebugLayerAfter(graph, layer);
+ }
+ }
+
+protected:
+ AddDebugImpl() = default;
+ ~AddDebugImpl() = default;
+};
+
+using InsertDebugLayer = OptimizeForType<Layer, AddDebugImpl>;
+
+} // namespace optimizations
+} // namespace armnn
diff --git a/src/armnn/optimizations/All.hpp b/src/armnn/optimizations/All.hpp
index a1bff3c5e1..0a6684ee3b 100644
--- a/src/armnn/optimizations/All.hpp
+++ b/src/armnn/optimizations/All.hpp
@@ -12,3 +12,4 @@
#include "MovePermuteUp.hpp"
#include "OptimizeInverseConversions.hpp"
#include "ConvertFp32NetworkToFp16.hpp"
+#include "AddDebug.hpp"
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 30ca52092a..29d1702c64 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -768,6 +768,43 @@ BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest)
&IsLayerOfType<armnn::OutputLayer>));
}
+BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest)
+{
+ armnn::Graph graph;
+
+ const armnn::TensorInfo info({ 2,2,1,3 }, armnn::DataType::Float32);
+
+ // Create the simple test network
+ auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
+ input->GetOutputSlot().SetTensorInfo(info);
+
+ auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
+ floor->GetOutputSlot().SetTensorInfo(info);
+
+ auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
+
+ // Connect up the layers
+ input->GetOutputSlot().Connect(floor->GetInputSlot(0));
+ floor->GetOutputSlot().Connect(output->GetInputSlot(0));
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+
+ // Run the optimizer
+ armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(InsertDebugLayer()));
+
+ BOOST_TEST(CheckSequence(graph.cbegin(),
+ graph.cend(),
+ &IsLayerOfType<armnn::InputLayer>,
+ &IsLayerOfType<armnn::DebugLayer>,
+ &IsLayerOfType<armnn::FloorLayer>,
+ &IsLayerOfType<armnn::DebugLayer>,
+ &IsLayerOfType<armnn::OutputLayer>));
+}
+
void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
const unsigned int* weightsShape, const unsigned int* outputShape,
DataLayout dataLayout = DataLayout::NCHW)