aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNattapat Chaimanowong <nattapat.chaimanowong@arm.com>2019-03-26 11:03:26 +0000
committernattapat.chaimanowong <nattapat.chaimanowong@arm.com>2019-03-26 17:15:21 +0000
commit964e955d8962590d3ccbaba9784962e895ca656f (patch)
tree802213a507152d23fec932dfc6405540b97b7ccf
parent44db7c36c8cf0fc608d3edc2ee0eb04428085218 (diff)
downloadarmnn-964e955d8962590d3ccbaba9784962e895ca656f.tar.gz
IVGCVSW-2881 Remove DebugDescriptor
* Also update Debug layer to use layer guid information Change-Id: I9ec1f639299c3f855b670ff031a0e88d685cfc6b Signed-off-by: Nattapat Chaimanowong <nattapat.chaimanowong@arm.com>
-rw-r--r--include/armnn/Descriptors.hpp18
-rw-r--r--include/armnn/DescriptorsFwd.hpp1
-rw-r--r--include/armnn/ILayerSupport.hpp1
-rw-r--r--include/armnn/LayerSupport.hpp1
-rw-r--r--src/armnn/LayerSupport.cpp3
-rw-r--r--src/armnn/NetworkUtils.cpp7
-rw-r--r--src/armnn/layers/DebugLayer.cpp13
-rw-r--r--src/armnn/layers/DebugLayer.hpp7
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp1
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.hpp1
-rw-r--r--src/backends/backendsCommon/WorkloadData.hpp6
-rw-r--r--src/backends/backendsCommon/WorkloadFactory.cpp3
-rw-r--r--src/backends/backendsCommon/test/DebugTestImpl.hpp31
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp2
-rw-r--r--src/backends/reference/RefLayerSupport.cpp2
-rw-r--r--src/backends/reference/RefLayerSupport.hpp1
-rw-r--r--src/backends/reference/workloads/Debug.cpp24
-rw-r--r--src/backends/reference/workloads/Debug.hpp7
-rw-r--r--src/backends/reference/workloads/RefDebugWorkload.cpp2
19 files changed, 62 insertions, 69 deletions
diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp
index 0bbe134e46..42b017f4cc 100644
--- a/include/armnn/Descriptors.hpp
+++ b/include/armnn/Descriptors.hpp
@@ -631,24 +631,6 @@ struct StridedSliceDescriptor
DataLayout m_DataLayout;
};
-/// A DebugDescriptor for the DebugLayer.
-struct DebugDescriptor
-{
- DebugDescriptor()
- : m_SlotIndex(0)
- {}
-
- DebugDescriptor(const std::string name, unsigned int index)
- : m_LayerName(name)
- , m_SlotIndex(index)
- {}
-
- /// The name of the debug layer.
- std::string m_LayerName;
- /// The slot index of the debug layer.
- unsigned int m_SlotIndex;
-};
-
/// A PreCompiledDescriptor for the PreCompiledLayer.
struct PreCompiledDescriptor
{
diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp
index f0a4cffab7..affd6e26e6 100644
--- a/include/armnn/DescriptorsFwd.hpp
+++ b/include/armnn/DescriptorsFwd.hpp
@@ -11,7 +11,6 @@ struct ActivationDescriptor;
struct BatchNormalizationDescriptor;
struct BatchToSpaceNdDescriptor;
struct Convolution2dDescriptor;
-struct DebugDescriptor;
struct DepthwiseConvolution2dDescriptor;
struct DetectionPostProcessDescriptor;
struct FakeQuantizationDescriptor;
diff --git a/include/armnn/ILayerSupport.hpp b/include/armnn/ILayerSupport.hpp
index 8a08146a7d..7896a80a8f 100644
--- a/include/armnn/ILayerSupport.hpp
+++ b/include/armnn/ILayerSupport.hpp
@@ -67,7 +67,6 @@ public:
virtual bool IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
- const DebugDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
virtual bool IsDepthwiseConvolutionSupported(
diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp
index 442230687d..7b9343b5d6 100644
--- a/include/armnn/LayerSupport.hpp
+++ b/include/armnn/LayerSupport.hpp
@@ -82,7 +82,6 @@ bool IsConvolution2dSupported(const BackendId& backend,
bool IsDebugSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
- const DebugDescriptor& descriptor,
char* reasonIfUnsupported = nullptr,
size_t reasonIfUnsupportedMaxLength = 1024);
diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp
index 12b3c403a7..5916488fb5 100644
--- a/src/armnn/LayerSupport.cpp
+++ b/src/armnn/LayerSupport.cpp
@@ -171,11 +171,10 @@ bool IsConvolution2dSupported(const BackendId& backend,
bool IsDebugSupported(const BackendId& backend,
const TensorInfo& input,
const TensorInfo& output,
- const DebugDescriptor& descriptor,
char* reasonIfUnsupported,
size_t reasonIfUnsupportedMaxLength)
{
- FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output, descriptor);
+ FORWARD_LAYER_SUPPORT_FUNC(backend, IsDebugSupported, input, output);
}
bool IsDepthwiseConvolutionSupported(const BackendId& backend,
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
index a7f89ff222..126b56b084 100644
--- a/src/armnn/NetworkUtils.cpp
+++ b/src/armnn/NetworkUtils.cpp
@@ -86,15 +86,12 @@ std::vector<DebugLayer*> InsertDebugLayerAfter(Graph& graph, Layer& layer)
debugLayers.reserve(layer.GetNumOutputSlots());
// Connect a DebugLayer to each output slot of the layer
- unsigned int outputSlotIndex = 0u;
for (auto&& outputSlot = layer.BeginOutputSlots(); outputSlot != layer.EndOutputSlots(); ++outputSlot)
{
- const std::string layerName(layer.GetName());
- const std::string debugName = std::string("DebugLayerAfter") + layerName;
+ const std::string debugName = std::string("DebugLayerAfter") + layer.GetNameStr();
- const DebugDescriptor descriptor(layerName, outputSlotIndex++);
DebugLayer* debugLayer =
- graph.InsertNewLayer<DebugLayer>(*outputSlot, descriptor, debugName.c_str());
+ graph.InsertNewLayer<DebugLayer>(*outputSlot, debugName.c_str());
// Sets output tensor info for the debug layer.
TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 6fccca677f..34912731cb 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -12,23 +12,26 @@
namespace armnn
{
-DebugLayer::DebugLayer(const DebugDescriptor& param, const char* name)
- : LayerWithParameters(1, 1, LayerType::Debug, param, name)
+DebugLayer::DebugLayer(const char* name)
+ : Layer(1, 1, LayerType::Debug, name)
{}
std::unique_ptr<IWorkload> DebugLayer::CreateWorkload(const Graph& graph,
const IWorkloadFactory& factory) const
{
+ const Layer& prevLayer = GetInputSlot(0).GetConnectedOutputSlot()->GetOwningLayer();
+
DebugQueueDescriptor descriptor;
- descriptor.m_Parameters.m_LayerName = m_Param.m_LayerName;
- descriptor.m_Parameters.m_SlotIndex = m_Param.m_SlotIndex;
+ descriptor.m_Guid = prevLayer.GetGuid();
+ descriptor.m_LayerName = prevLayer.GetNameStr();
+ descriptor.m_SlotIndex = GetInputSlot(0).GetConnectedOutputSlot()->CalculateIndexOnOwner();
return factory.CreateDebug(descriptor, PrepInfoAndDesc(descriptor, graph));
}
DebugLayer* DebugLayer::Clone(Graph& graph) const
{
- return CloneBase<DebugLayer>(graph, m_Param, GetName());
+ return CloneBase<DebugLayer>(graph, GetName());
}
void DebugLayer::ValidateTensorShapesFromInputs()
diff --git a/src/armnn/layers/DebugLayer.hpp b/src/armnn/layers/DebugLayer.hpp
index bc64541cbe..3bd5a3dae2 100644
--- a/src/armnn/layers/DebugLayer.hpp
+++ b/src/armnn/layers/DebugLayer.hpp
@@ -4,13 +4,13 @@
//
#pragma once
-#include "LayerWithParameters.hpp"
+#include "Layer.hpp"
namespace armnn
{
/// This layer visualizes the data flowing through the network.
-class DebugLayer : public LayerWithParameters<DebugDescriptor>
+class DebugLayer : public Layer
{
public:
/// Makes a workload for the Debug type.
@@ -32,9 +32,8 @@ public:
protected:
/// Constructor to create a DebugLayer.
- /// @param [in] param DebugDescriptor to configure the debug layer.
/// @param [in] name Optional name for the layer.
- DebugLayer(const DebugDescriptor& param, const char* name);
+ DebugLayer(const char* name);
/// Default destructor
~DebugLayer() = default;
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 6358f6f0a8..c170ac05e2 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -100,7 +100,6 @@ bool LayerSupportBase::IsConvolution2dSupported(const TensorInfo& input,
bool LayerSupportBase::IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
- const DebugDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
diff --git a/src/backends/backendsCommon/LayerSupportBase.hpp b/src/backends/backendsCommon/LayerSupportBase.hpp
index bf81459582..75c366cf3b 100644
--- a/src/backends/backendsCommon/LayerSupportBase.hpp
+++ b/src/backends/backendsCommon/LayerSupportBase.hpp
@@ -58,7 +58,6 @@ public:
bool IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
- const DebugDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
diff --git a/src/backends/backendsCommon/WorkloadData.hpp b/src/backends/backendsCommon/WorkloadData.hpp
index 09f56479cd..18bd921d5e 100644
--- a/src/backends/backendsCommon/WorkloadData.hpp
+++ b/src/backends/backendsCommon/WorkloadData.hpp
@@ -380,9 +380,13 @@ struct GreaterQueueDescriptor : QueueDescriptor
void Validate(const WorkloadInfo& workloadInfo) const;
};
-struct DebugQueueDescriptor : QueueDescriptorWithParameters<DebugDescriptor>
+struct DebugQueueDescriptor : QueueDescriptor
{
void Validate(const WorkloadInfo& workloadInfo) const;
+
+ LayerGuid m_Guid;
+ std::string m_LayerName;
+ unsigned int m_SlotIndex;
};
struct RsqrtQueueDescriptor : QueueDescriptor
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 72d0b19d04..0996a8aaee 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -192,14 +192,11 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
}
case LayerType::Debug:
{
- auto cLayer = boost::polymorphic_downcast<const DebugLayer*>(&layer);
-
const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
result = layerSupportObject->IsDebugSupported(OverrideDataType(input, dataType),
OverrideDataType(output, dataType),
- cLayer->GetParameters(),
reason);
break;
}
diff --git a/src/backends/backendsCommon/test/DebugTestImpl.hpp b/src/backends/backendsCommon/test/DebugTestImpl.hpp
index 14808f4856..4af479d7ce 100644
--- a/src/backends/backendsCommon/test/DebugTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DebugTestImpl.hpp
@@ -92,8 +92,9 @@ LayerTestResult<T, 4> Debug4DTest(
unsigned int outputShape[] = {1, 2, 2, 3};
armnn::DebugQueueDescriptor desc;
- desc.m_Parameters.m_LayerName = "TestOutput";
- desc.m_Parameters.m_SlotIndex = 1;
+ desc.m_Guid = 1;
+ desc.m_LayerName = "TestOutput";
+ desc.m_SlotIndex = 0;
inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
@@ -115,8 +116,9 @@ LayerTestResult<T, 4> Debug4DTest(
});
const std::string expectedStringOutput =
- "{ \"layer\": \"TestOutput\","
- " \"outputSlot\": 1,"
+ "{ \"layerGuid\": 1,"
+ " \"layerName\": \"TestOutput\","
+ " \"outputSlot\": 0,"
" \"shape\": [1, 2, 2, 3],"
" \"min\": 1, \"max\": 12,"
" \"data\": [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] }\n";
@@ -143,7 +145,9 @@ LayerTestResult<T, 3> Debug3DTest(
unsigned int outputShape[] = {3, 3, 1};
armnn::DebugQueueDescriptor desc;
- desc.m_Parameters.m_LayerName = "TestOutput";
+ desc.m_Guid = 1;
+ desc.m_LayerName = "TestOutput";
+ desc.m_SlotIndex = 0;
inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
@@ -163,7 +167,8 @@ LayerTestResult<T, 3> Debug3DTest(
});
const std::string expectedStringOutput =
- "{ \"layer\": \"TestOutput\","
+ "{ \"layerGuid\": 1,"
+ " \"layerName\": \"TestOutput\","
" \"outputSlot\": 0,"
" \"shape\": [3, 3, 1],"
" \"min\": 1, \"max\": 9,"
@@ -191,7 +196,9 @@ LayerTestResult<T, 2> Debug2DTest(
unsigned int outputShape[] = {2, 2};
armnn::DebugQueueDescriptor desc;
- desc.m_Parameters.m_LayerName = "TestOutput";
+ desc.m_Guid = 1;
+ desc.m_LayerName = "TestOutput";
+ desc.m_SlotIndex = 0;
inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
@@ -209,7 +216,8 @@ LayerTestResult<T, 2> Debug2DTest(
});
const std::string expectedStringOutput =
- "{ \"layer\": \"TestOutput\","
+ "{ \"layerGuid\": 1,"
+ " \"layerName\": \"TestOutput\","
" \"outputSlot\": 0,"
" \"shape\": [2, 2],"
" \"min\": 1, \"max\": 4,"
@@ -237,7 +245,9 @@ LayerTestResult<T, 1> Debug1DTest(
unsigned int outputShape[] = {4};
armnn::DebugQueueDescriptor desc;
- desc.m_Parameters.m_LayerName = "TestOutput";
+ desc.m_Guid = 1;
+ desc.m_LayerName = "TestOutput";
+ desc.m_SlotIndex = 0;
inputTensorInfo = armnn::TensorInfo(1, inputShape, ArmnnType);
outputTensorInfo = armnn::TensorInfo(1, outputShape, ArmnnType);
@@ -253,7 +263,8 @@ LayerTestResult<T, 1> Debug1DTest(
});
const std::string expectedStringOutput =
- "{ \"layer\": \"TestOutput\","
+ "{ \"layerGuid\": 1,"
+ " \"layerName\": \"TestOutput\","
" \"outputSlot\": 0,"
" \"shape\": [4],"
" \"min\": 1, \"max\": 4,"
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 3df71830ca..79213c18e0 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -332,7 +332,7 @@ DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
-DECLARE_LAYER_POLICY_2_PARAM(Debug)
+DECLARE_LAYER_POLICY_1_PARAM(Debug)
DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index d89e548323..820f36b7ac 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -354,11 +354,9 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
bool RefLayerSupport::IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
- const DebugDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
ignore_unused(output);
- ignore_unused(descriptor);
return IsSupportedForDataTypeRef(reasonIfUnsupported,
input.GetDataType(),
&TrueFunc<>,
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 3b73f22927..c0d7fcf4aa 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -56,7 +56,6 @@ public:
bool IsDebugSupported(const TensorInfo& input,
const TensorInfo& output,
- const DebugDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
bool IsDepthwiseConvolutionSupported(const TensorInfo& input,
diff --git a/src/backends/reference/workloads/Debug.cpp b/src/backends/reference/workloads/Debug.cpp
index cc83c7b4ee..b263db67cf 100644
--- a/src/backends/reference/workloads/Debug.cpp
+++ b/src/backends/reference/workloads/Debug.cpp
@@ -16,9 +16,11 @@ namespace armnn
template <typename T>
void Debug(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- const DebugDescriptor& descriptor,
const T* inputData,
- T* outputData)
+ T* outputData,
+ LayerGuid guid,
+ const std::string& layerName,
+ unsigned int slotIndex)
{
const unsigned int numDims = inputInfo.GetNumDimensions();
const unsigned int numElements = inputInfo.GetNumElements();
@@ -33,8 +35,9 @@ void Debug(const TensorInfo& inputInfo,
}
std::cout << "{ ";
- std::cout << "\"layer\": \"" << descriptor.m_LayerName << "\", ";
- std::cout << "\"outputSlot\": " << descriptor.m_SlotIndex << ", ";
+ std::cout << "\"layerGuid\": " << guid << ", ";
+ std::cout << "\"layerName\": \"" << layerName << "\", ";
+ std::cout << "\"outputSlot\": " << slotIndex << ", ";
std::cout << "\"shape\": ";
std::cout << "[";
@@ -89,13 +92,18 @@ void Debug(const TensorInfo& inputInfo,
template void Debug<float>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- const DebugDescriptor& descriptor,
const float* inputData,
- float* outputData);
+ float* outputData,
+ LayerGuid guid,
+ const std::string& layerName,
+ unsigned int slotIndex);
template void Debug<uint8_t>(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- const DebugDescriptor& descriptor,
const uint8_t* inputData,
- uint8_t* outputData);
+ uint8_t* outputData,
+ LayerGuid guid,
+ const std::string& layerName,
+ unsigned int slotIndex);
+
} // namespace armnn
diff --git a/src/backends/reference/workloads/Debug.hpp b/src/backends/reference/workloads/Debug.hpp
index 682f0bd31b..29a7d40662 100644
--- a/src/backends/reference/workloads/Debug.hpp
+++ b/src/backends/reference/workloads/Debug.hpp
@@ -4,7 +4,6 @@
//
#pragma once
-#include <armnn/Descriptors.hpp>
#include <armnn/Tensor.hpp>
namespace armnn
@@ -13,8 +12,10 @@ namespace armnn
template <typename T>
void Debug(const TensorInfo& inputInfo,
const TensorInfo& outputInfo,
- const DebugDescriptor& descriptor,
const T* inputData,
- T* outputData);
+ T* outputData,
+ LayerGuid guid,
+ const std::string& layerName,
+ unsigned int slotIndex);
} //namespace armnn
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index d9a47c0596..412d399adc 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -25,7 +25,7 @@ void RefDebugWorkload<DataType>::Execute() const
const T* inputData = GetInputTensorData<T>(0, m_Data);
T* outputData = GetOutputTensorData<T>(0, m_Data);
- Debug(inputInfo, outputInfo, m_Data.m_Parameters, inputData, outputData);
+ Debug(inputInfo, outputInfo, inputData, outputData, m_Data.m_Guid, m_Data.m_LayerName, m_Data.m_SlotIndex);
}
template class RefDebugWorkload<DataType::Float32>;