aboutsummaryrefslogtreecommitdiff
path: root/src/armnn/layers
diff options
context:
space:
mode:
Diffstat (limited to 'src/armnn/layers')
-rw-r--r--src/armnn/layers/ConcatLayer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp2
-rw-r--r--src/armnn/layers/DebugLayer.cpp5
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp2
-rw-r--r--src/armnn/layers/InputLayer.cpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp4
-rw-r--r--src/armnn/layers/MemImportLayer.cpp4
-rw-r--r--src/armnn/layers/MergeLayer.cpp2
-rw-r--r--src/armnn/layers/OutputLayer.cpp5
-rw-r--r--src/armnn/layers/OutputLayer.hpp2
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp2
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp3
-rw-r--r--src/armnn/layers/SliceLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp6
-rw-r--r--src/armnn/layers/SplitterLayer.cpp4
-rw-r--r--src/armnn/layers/StackLayer.cpp2
-rw-r--r--src/armnn/layers/StandInLayer.cpp4
19 files changed, 27 insertions, 30 deletions
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 317d61f1fa..f4024af65a 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -130,7 +130,7 @@ void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registr
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 026e8de8b2..7873c94563 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -48,7 +48,7 @@ void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
}
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 90bd8948d0..bbf4dbffd8 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,7 +47,7 @@ void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
{
// These conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
}
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index d0e0f037e2..76d33f27e9 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -8,8 +8,7 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -53,7 +52,7 @@ void DebugLayer::ValidateTensorShapesFromInputs()
void DebugLayer::Accept(ILayerVisitor& visitor) const
{
// by design debug layers are never in input graphs
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("DebugLayer should never appear in an input graph");
}
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 90f8445472..8611b9b73c 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -45,7 +45,7 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index e0c2544236..84cc43c667 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -19,7 +19,7 @@ InputLayer::InputLayer(LayerBindingId id, const char* name)
std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 231b28548f..cf69c17cf5 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -26,7 +26,7 @@ MemCopyLayer* MemCopyLayer::Clone(Graph& graph) const
std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
MemCopyQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("MemCopyLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 3b0e6d295b..80f9fda803 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -26,7 +26,7 @@ MemImportLayer* MemImportLayer::Clone(Graph& graph) const
std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
MemImportQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
void MemImportLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("MemImportLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index ce75950be2..f2fd29fe9e 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -18,7 +18,7 @@ MergeLayer::MergeLayer(const char* name)
std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index 4239323635..f00e0a5259 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -6,11 +6,10 @@
#include "LayerCloneBase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
@@ -21,7 +20,7 @@ OutputLayer::OutputLayer(LayerBindingId id, const char* name)
std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 8994556528..89bcfd6bb6 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -28,7 +28,7 @@ public:
const IWorkloadFactory& factory,
const bool IsMemoryManaged = true) override
{
- boost::ignore_unused(registry, factory, IsMemoryManaged);
+ IgnoreUnused(registry, factory, IsMemoryManaged);
}
/// Creates a dynamically-allocated copy of this layer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 00a316c5c0..3444afc454 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -48,7 +48,7 @@ void PreCompiledLayer::SetPreCompiledObject(PreCompiledObjectPtr preCompiledObje
void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 3a952583e6..fbf3eaa80a 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -6,6 +6,7 @@
#include "LayerCloneBase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -31,7 +32,7 @@ ReshapeLayer* ReshapeLayer::Clone(Graph& graph) const
std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
return std::vector<TensorShape>({ m_Param.m_TargetShape });
}
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index e39caa5db1..ec82082c4a 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -50,7 +50,7 @@ void SliceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
BOOST_ASSERT(inputShapes.size() == 1);
TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index d38187c532..ec724bafd0 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -35,7 +35,7 @@ std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const IWorkloadFa
SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
}
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index f8a6eb3ed8..8aa0c9f8cd 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -7,7 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
@@ -15,8 +15,6 @@
#include <numeric>
-#include <boost/core/ignore_unused.hpp>
-
using namespace armnnUtils;
namespace armnn
@@ -37,7 +35,7 @@ std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const IWorkloadFact
SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
}
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 84a598c847..f655e712c8 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -104,7 +104,7 @@ void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& regis
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
@@ -127,7 +127,7 @@ SplitterLayer* SplitterLayer::Clone(Graph& graph) const
std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
std::vector<TensorShape> outShapes;
//Output shapes must match View shapes.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 1a060f93c8..6f793caecc 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -32,7 +32,7 @@ StackLayer* StackLayer::Clone(Graph& graph) const
std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
const TensorShape& inputShape = m_Param.m_InputShape;
const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index d0fc325caa..d23d1d0bad 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -16,7 +16,7 @@ StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
// This throws in the event that it's called. We would expect that any backend that
// "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
// during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
@@ -30,7 +30,7 @@ StandInLayer* StandInLayer::Clone(Graph& graph) const
std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
throw Exception("Stand in layer does not support infering output shapes");
}