aboutsummaryrefslogtreecommitdiff
path: root/src/armnn
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2020-03-09 12:13:48 +0000
committerJan Eilers <jan.eilers@arm.com>2020-03-10 10:07:43 +0000
commit8eb256065f0e75ecf8e427d56955e2bac117c2d7 (patch)
tree1387fb4ea4a741475449d78be63d601f9d84b6e5 /src/armnn
parent8832522f47b701f5f042069e7bf8deae9b75d449 (diff)
downloadarmnn-8eb256065f0e75ecf8e427d56955e2bac117c2d7.tar.gz
IVGCVSW-4482 Remove boost::ignore_unused
!referencetests:229377 Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: Ia9b360b4a057fe7bbce5b268092627c09a0dba82
Diffstat (limited to 'src/armnn')
-rw-r--r--src/armnn/DynamicQuantizationVisitor.cpp92
-rw-r--r--src/armnn/ExecutionFrame.cpp2
-rw-r--r--src/armnn/Graph.cpp2
-rw-r--r--src/armnn/Graph.hpp6
-rw-r--r--src/armnn/Layer.cpp2
-rw-r--r--src/armnn/Layer.hpp2
-rw-r--r--src/armnn/LayerSupportCommon.hpp27
-rw-r--r--src/armnn/LoadedNetwork.cpp2
-rw-r--r--src/armnn/Logging.cpp9
-rw-r--r--src/armnn/Network.cpp3
-rw-r--r--src/armnn/OverrideInputRangeVisitor.cpp5
-rw-r--r--src/armnn/Profiling.cpp5
-rw-r--r--src/armnn/Profiling.hpp5
-rw-r--r--src/armnn/StaticRangeVisitor.cpp88
-rw-r--r--src/armnn/SubgraphView.cpp5
-rw-r--r--src/armnn/SubgraphViewSelector.cpp7
-rw-r--r--src/armnn/layers/ConcatLayer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp2
-rw-r--r--src/armnn/layers/DebugLayer.cpp5
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp2
-rw-r--r--src/armnn/layers/InputLayer.cpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp4
-rw-r--r--src/armnn/layers/MemImportLayer.cpp4
-rw-r--r--src/armnn/layers/MergeLayer.cpp2
-rw-r--r--src/armnn/layers/OutputLayer.cpp5
-rw-r--r--src/armnn/layers/OutputLayer.hpp2
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp2
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp3
-rw-r--r--src/armnn/layers/SliceLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp6
-rw-r--r--src/armnn/layers/SplitterLayer.cpp4
-rw-r--r--src/armnn/layers/StackLayer.cpp2
-rw-r--r--src/armnn/layers/StandInLayer.cpp4
-rw-r--r--src/armnn/optimizations/ConvertConstants.hpp4
-rw-r--r--src/armnn/optimizations/OptimizeInverseConversions.hpp4
-rw-r--r--src/armnn/optimizations/OptimizeInversePermutes.hpp4
-rw-r--r--src/armnn/optimizations/SquashEqualSiblings.hpp4
-rw-r--r--src/armnn/test/CreateWorkload.hpp4
-rw-r--r--src/armnn/test/DebugCallbackTest.cpp2
-rw-r--r--src/armnn/test/EndToEndTest.cpp2
-rw-r--r--src/armnn/test/OptimizerTests.cpp6
-rw-r--r--src/armnn/test/OptionalTest.cpp6
-rw-r--r--src/armnn/test/ProfilerTests.cpp3
-rw-r--r--src/armnn/test/QuantizerTest.cpp112
-rw-r--r--src/armnn/test/RuntimeTests.cpp4
-rw-r--r--src/armnn/test/TensorHandleStrategyTest.cpp23
48 files changed, 249 insertions, 248 deletions
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
index 4b1dce0b6f..862a926abc 100644
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ b/src/armnn/DynamicQuantizationVisitor.cpp
@@ -6,7 +6,7 @@
#include "DynamicQuantizationVisitor.hpp"
#include "NetworkUtils.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Types.hpp>
@@ -85,7 +85,7 @@ void DynamicQuantizationVisitor::VisitNonCalibratedLayers() {
void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
SetRange(layer, 0, -20.f, 20.f);
AddToCalibratedLayers(layer);
}
@@ -98,12 +98,12 @@ void DynamicQuantizationVisitor::VisitBatchNormalizationLayer(const IConnectable
const ConstTensor& gamma,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(mean);
- boost::ignore_unused(variance);
- boost::ignore_unused(beta);
- boost::ignore_unused(gamma);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(mean);
+ IgnoreUnused(variance);
+ IgnoreUnused(beta);
+ IgnoreUnused(gamma);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
@@ -114,10 +114,10 @@ void DynamicQuantizationVisitor::VisitConvolution2dLayer(const IConnectableLayer
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(convolution2dDescriptor);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(convolution2dDescriptor);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
@@ -128,10 +128,10 @@ void DynamicQuantizationVisitor::VisitDepthwiseConvolution2dLayer(const IConnect
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
@@ -140,7 +140,7 @@ void DynamicQuantizationVisitor::VisitActivationLayer(const IConnectableLayer* l
const ActivationDescriptor& activationDescriptor,
const char* name)
{
- boost::ignore_unused(name, activationDescriptor);
+ IgnoreUnused(name, activationDescriptor);
switch (activationDescriptor.m_Function)
{
// Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -172,10 +172,10 @@ void DynamicQuantizationVisitor::VisitFullyConnectedLayer(const IConnectableLaye
const Optional<ConstTensor>& biases,
const char *name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
@@ -184,8 +184,8 @@ void DynamicQuantizationVisitor::VisitPermuteLayer(const IConnectableLayer* laye
const PermuteDescriptor& permuteDescriptor,
const char* name)
{
- boost::ignore_unused(permuteDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(permuteDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -193,8 +193,8 @@ void DynamicQuantizationVisitor::VisitSpaceToBatchNdLayer(const IConnectableLaye
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
- boost::ignore_unused(spaceToBatchNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(spaceToBatchNdDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -202,8 +202,8 @@ void DynamicQuantizationVisitor::VisitPooling2dLayer(const IConnectableLayer* la
const Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
- boost::ignore_unused(pooling2dDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(pooling2dDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -211,8 +211,8 @@ void DynamicQuantizationVisitor::VisitSoftmaxLayer(const IConnectableLayer* laye
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
- boost::ignore_unused(softmaxDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(softmaxDescriptor);
+ IgnoreUnused(name);
SetRange(layer, 0, 0.f, 1.f);
AddToCalibratedLayers(layer);
}
@@ -221,7 +221,7 @@ void DynamicQuantizationVisitor::VisitConstantLayer(const IConnectableLayer* lay
const ConstTensor& input,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (input.GetDataType() != DataType::Float32)
{
@@ -249,8 +249,8 @@ void DynamicQuantizationVisitor::VisitConcatLayer(const IConnectableLayer* layer
const ConcatDescriptor& originsDescriptor,
const char* name)
{
- boost::ignore_unused(name);
- boost::ignore_unused(originsDescriptor);
+ IgnoreUnused(name);
+ IgnoreUnused(originsDescriptor);
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::lowest();
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -270,8 +270,8 @@ void DynamicQuantizationVisitor::VisitReshapeLayer(const IConnectableLayer* laye
const ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
- boost::ignore_unused(reshapeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(reshapeDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -279,8 +279,8 @@ void DynamicQuantizationVisitor::VisitSplitterLayer(const IConnectableLayer* lay
const SplitterDescriptor& splitterDescriptor,
const char* name)
{
- boost::ignore_unused(splitterDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(splitterDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -288,8 +288,8 @@ void DynamicQuantizationVisitor::VisitResizeBilinearLayer(const IConnectableLaye
const ResizeBilinearDescriptor& resizeDesc,
const char* name)
{
- boost::ignore_unused(resizeDesc);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDesc);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -297,8 +297,8 @@ void DynamicQuantizationVisitor::VisitStridedSliceLayer(const IConnectableLayer*
const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
- boost::ignore_unused(stridedSliceDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(stridedSliceDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -306,23 +306,23 @@ void DynamicQuantizationVisitor::VisitBatchToSpaceNdLayer(const IConnectableLaye
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name)
{
- boost::ignore_unused(batchToSpaceNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(batchToSpaceNdDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
void DynamicQuantizationVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(id);
- boost::ignore_unused(name);
+ IgnoreUnused(id);
+ IgnoreUnused(name);
SetRange(layer, 0, -0.0f, 0.0f);
AddToCalibratedLayers(layer);
}
void DynamicQuantizationVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(id);
- boost::ignore_unused(name);
+ IgnoreUnused(id);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
m_OutputLayers.push_back(id);
}
diff --git a/src/armnn/ExecutionFrame.cpp b/src/armnn/ExecutionFrame.cpp
index 58005e951c..92a7990881 100644
--- a/src/armnn/ExecutionFrame.cpp
+++ b/src/armnn/ExecutionFrame.cpp
@@ -13,7 +13,7 @@ ExecutionFrame::ExecutionFrame() {}
IExecutionFrame* ExecutionFrame::ExecuteWorkloads(IExecutionFrame* previousFrame)
{
- boost::ignore_unused(previousFrame);
+ IgnoreUnused(previousFrame);
for (auto& workload: m_WorkloadQueue)
{
workload->Execute();
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 8e7f75b2b8..0d326adae7 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -435,7 +435,7 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
{
- boost::ignore_unused(layer);
+ IgnoreUnused(layer);
BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
"Substitute layer is not a member of graph");
});
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index c65f12bbc3..63bc8d062c 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -297,7 +297,7 @@ private:
graph.m_Layers.erase(layerIt);
const size_t numErased = graph.m_PosInGraphMap.erase(this);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
@@ -355,7 +355,7 @@ public:
~LayerInGraph() override
{
const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
};
@@ -381,7 +381,7 @@ public:
~LayerInGraph() override
{
const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
};
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 1f63d6ed3c..9de812c6e5 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -196,7 +196,7 @@ Layer::Layer(unsigned int numInputSlots,
, m_BackendHint(EmptyOptional())
, m_Guid(profiling::ProfilingService::Instance().NextGuid())
{
- boost::ignore_unused(layout);
+ IgnoreUnused(layout);
m_InputSlots.reserve(numInputSlots);
for (unsigned int i = 0; i < numInputSlots; ++i)
{
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 5ad38f0b9e..ec35d71082 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -17,6 +17,7 @@
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <algorithm>
#include <memory>
@@ -27,7 +28,6 @@
#include <list>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/cast.hpp>
namespace armnn
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index e0c6b8040c..9252b3b9a5 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -4,13 +4,12 @@
//
#pragma once
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/DescriptorsFwd.hpp>
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/Optional.hpp>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
@@ -54,23 +53,23 @@ bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
template<typename ... Params>
bool TrueFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(reasonIfUnsupported);
- boost::ignore_unused(params...);
+ IgnoreUnused(reasonIfUnsupported);
+ IgnoreUnused(params...);
return true;
}
template<typename ... Params>
bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(reasonIfUnsupported);
- boost::ignore_unused(params...);
+ IgnoreUnused(reasonIfUnsupported);
+ IgnoreUnused(params...);
return false;
}
template<typename ... Params>
bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type");
return false;
}
@@ -78,7 +77,7 @@ bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type");
return false;
}
@@ -86,7 +85,7 @@ bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type");
return false;
}
@@ -94,7 +93,7 @@ bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
template<typename ... Params>
bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type");
return false;
}
@@ -102,7 +101,7 @@ bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input");
return false;
}
@@ -110,7 +109,7 @@ bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... p
template<typename ... Params>
bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input");
return false;
}
@@ -118,7 +117,7 @@ bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... p
template<typename ... Params>
bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output");
return false;
}
@@ -126,7 +125,7 @@ bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&...
template<typename ... Params>
bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output");
return false;
}
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 2e95dd8d6c..69e42ba38f 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -311,7 +311,7 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co
std::string reasonIfUnsupported;
BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
"Factory does not support layer");
- boost::ignore_unused(reasonIfUnsupported);
+ IgnoreUnused(reasonIfUnsupported);
return *workloadFactory;
}
diff --git a/src/armnn/Logging.cpp b/src/armnn/Logging.cpp
index 2c07751aae..ba401233ae 100644
--- a/src/armnn/Logging.cpp
+++ b/src/armnn/Logging.cpp
@@ -2,9 +2,9 @@
// Copyright © 2019 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <armnn/Logging.hpp>
-
+#include <armnn/Logging.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Utils.hpp>
#if defined(_MSC_VER)
@@ -20,7 +20,6 @@
#endif
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <iostream>
namespace armnn
@@ -107,14 +106,14 @@ class DebugOutputSink : public LogSink
public:
void Consume(const std::string& s) override
{
- boost::ignore_unused(s);
+ IgnoreUnused(s);
#if defined(_MSC_VER)
OutputDebugString(s.c_str());
OutputDebugString("\n");
#elif defined(__ANDROID__)
__android_log_write(ANDROID_LOG_DEBUG, "armnn", s.c_str());
#else
- boost::ignore_unused(s);
+ IgnoreUnused(s);
#endif
}
};
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 50a7df6662..3663727e48 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -22,6 +22,7 @@
#include <armnn/TypesUtils.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <ProfilingService.hpp>
@@ -628,7 +629,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backen
OutputSlot& slot,
TensorHandleFactoryRegistry& registry)
{
- boost::ignore_unused(backends, slot, registry);
+ IgnoreUnused(backends, slot, registry);
return ITensorHandleFactory::DeferredFactoryId;
}
diff --git a/src/armnn/OverrideInputRangeVisitor.cpp b/src/armnn/OverrideInputRangeVisitor.cpp
index d047c5bbe8..d0453fe326 100644
--- a/src/armnn/OverrideInputRangeVisitor.cpp
+++ b/src/armnn/OverrideInputRangeVisitor.cpp
@@ -7,8 +7,9 @@
#include "NetworkQuantizerUtils.hpp"
#include "Layer.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
namespace armnn
{
@@ -23,7 +24,7 @@ OverrideInputRangeVisitor::OverrideInputRangeVisitor(RangeTracker& ranges,
void OverrideInputRangeVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (m_LayerId == id)
{
m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second);
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index 1cd21ab63c..b1aedaab5a 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -5,6 +5,7 @@
#include "Profiling.hpp"
#include <armnn/BackendId.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include "JsonPrinter.hpp"
@@ -20,7 +21,7 @@
#include <stack>
#include <boost/algorithm/string.hpp>
-#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
@@ -223,7 +224,7 @@ void Profiler::EndEvent(Event* event)
m_Parents.pop();
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
- boost::ignore_unused(parent);
+ IgnoreUnused(parent);
BOOST_ASSERT(event->GetParentEvent() == parent);
#if ARMNN_STREAMLINE_ENABLED
diff --git a/src/armnn/Profiling.hpp b/src/armnn/Profiling.hpp
index 4afd6911c2..e6ea090d7b 100644
--- a/src/armnn/Profiling.hpp
+++ b/src/armnn/Profiling.hpp
@@ -6,6 +6,7 @@
#include "ProfilingEvent.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include "armnn/IProfiler.hpp"
#include "WallClockTimer.hpp"
@@ -17,8 +18,6 @@
#include <stack>
#include <map>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
@@ -141,7 +140,7 @@ private:
void ConstructNextInVector(std::vector<InstrumentPtr>& instruments)
{
- boost::ignore_unused(instruments);
+ IgnoreUnused(instruments);
}
template<typename Arg, typename... Args>
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 81428c1f90..0e820c3202 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -5,7 +5,7 @@
#include "StaticRangeVisitor.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Types.hpp>
@@ -31,7 +31,7 @@ void StaticRangeVisitor::ForwardParentParameters(const IConnectableLayer* layer)
void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
SetRange(layer, 0, -20.f, 20.f);
}
@@ -43,12 +43,12 @@ void StaticRangeVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* l
const ConstTensor& gamma,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(mean);
- boost::ignore_unused(variance);
- boost::ignore_unused(beta);
- boost::ignore_unused(gamma);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(mean);
+ IgnoreUnused(variance);
+ IgnoreUnused(beta);
+ IgnoreUnused(gamma);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
@@ -58,10 +58,10 @@ void StaticRangeVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(convolution2dDescriptor);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(convolution2dDescriptor);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
@@ -71,10 +71,10 @@ void StaticRangeVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLaye
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
@@ -82,7 +82,7 @@ void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer* layer,
const ActivationDescriptor& activationDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
switch (activationDescriptor.m_Function)
{
// Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -113,10 +113,10 @@ void StaticRangeVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer
const Optional<ConstTensor>& biases,
const char *name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
@@ -124,8 +124,8 @@ void StaticRangeVisitor::VisitPermuteLayer(const IConnectableLayer* layer,
const PermuteDescriptor& permuteDescriptor,
const char* name)
{
- boost::ignore_unused(permuteDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(permuteDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -133,8 +133,8 @@ void StaticRangeVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
- boost::ignore_unused(spaceToBatchNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(spaceToBatchNdDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -142,8 +142,8 @@ void StaticRangeVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
const Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
- boost::ignore_unused(pooling2dDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(pooling2dDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -151,8 +151,8 @@ void StaticRangeVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
- boost::ignore_unused(softmaxDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(softmaxDescriptor);
+ IgnoreUnused(name);
SetRange(layer, 0, 0.f, 1.f);
}
@@ -160,8 +160,8 @@ void StaticRangeVisitor::VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name)
{
- boost::ignore_unused(originsDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(originsDescriptor);
+ IgnoreUnused(name);
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::lowest();
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -180,7 +180,7 @@ void StaticRangeVisitor::VisitConstantLayer(const IConnectableLayer* layer,
const ConstTensor& input,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (input.GetDataType() != DataType::Float32)
{
@@ -208,8 +208,8 @@ void StaticRangeVisitor::VisitReshapeLayer(const IConnectableLayer* layer,
const ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
- boost::ignore_unused(reshapeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(reshapeDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -217,8 +217,8 @@ void StaticRangeVisitor::VisitSplitterLayer(const IConnectableLayer* layer,
const SplitterDescriptor& splitterDescriptor,
const char* name)
{
- boost::ignore_unused(splitterDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(splitterDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -226,8 +226,8 @@ void StaticRangeVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer
const ResizeBilinearDescriptor& resizeDesc,
const char* name)
{
- boost::ignore_unused(resizeDesc);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDesc);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -235,8 +235,8 @@ void StaticRangeVisitor::VisitResizeLayer(const IConnectableLayer* layer,
const ResizeDescriptor& resizeDescriptor,
const char* name)
{
- boost::ignore_unused(resizeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -244,8 +244,8 @@ void StaticRangeVisitor::VisitStridedSliceLayer(const IConnectableLayer* layer,
const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
- boost::ignore_unused(stridedSliceDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(stridedSliceDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -253,8 +253,8 @@ void StaticRangeVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name)
{
- boost::ignore_unused(batchToSpaceNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(batchToSpaceNdDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index a87cc9b268..7705e687a9 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -6,8 +6,9 @@
#include "SubgraphView.hpp"
#include "Graph.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <boost/numeric/conversion/cast.hpp>
#include <utility>
namespace armnn
@@ -24,7 +25,7 @@ void AssertIfNullsOrDuplicates(const C& container, const std::string& errorMessa
std::for_each(container.begin(), container.end(), [&duplicateSet, &errorMessage](const T& i)
{
// Ignore unused for release builds
- boost::ignore_unused(errorMessage);
+ IgnoreUnused(errorMessage);
// Check if the item is valid
BOOST_ASSERT_MSG(i, errorMessage.c_str());
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 8798b7285d..02b7bdafa5 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -5,6 +5,9 @@
#include "SubgraphViewSelector.hpp"
#include "Graph.hpp"
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
#include <algorithm>
#include <map>
@@ -78,14 +81,14 @@ public:
{
size_t numErased = a->m_Dependants.erase(this);
BOOST_ASSERT(numErased == 1);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
a->m_Dependants.insert(m_Parent);
}
for (PartialSubgraph* a : m_Dependants)
{
size_t numErased = a->m_Antecedents.erase(this);
BOOST_ASSERT(numErased == 1);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
a->m_Antecedents.insert(m_Parent);
}
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 317d61f1fa..f4024af65a 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -130,7 +130,7 @@ void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registr
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 026e8de8b2..7873c94563 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -48,7 +48,7 @@ void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
}
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 90bd8948d0..bbf4dbffd8 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,7 +47,7 @@ void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
{
// These conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
}
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index d0e0f037e2..76d33f27e9 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -8,8 +8,7 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -53,7 +52,7 @@ void DebugLayer::ValidateTensorShapesFromInputs()
void DebugLayer::Accept(ILayerVisitor& visitor) const
{
// by design debug layers are never in input graphs
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("DebugLayer should never appear in an input graph");
}
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 90f8445472..8611b9b73c 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -45,7 +45,7 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index e0c2544236..84cc43c667 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -19,7 +19,7 @@ InputLayer::InputLayer(LayerBindingId id, const char* name)
std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 231b28548f..cf69c17cf5 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -26,7 +26,7 @@ MemCopyLayer* MemCopyLayer::Clone(Graph& graph) const
std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
MemCopyQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("MemCopyLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 3b0e6d295b..80f9fda803 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -26,7 +26,7 @@ MemImportLayer* MemImportLayer::Clone(Graph& graph) const
std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
MemImportQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
void MemImportLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("MemImportLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index ce75950be2..f2fd29fe9e 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -18,7 +18,7 @@ MergeLayer::MergeLayer(const char* name)
std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index 4239323635..f00e0a5259 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -6,11 +6,10 @@
#include "LayerCloneBase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
@@ -21,7 +20,7 @@ OutputLayer::OutputLayer(LayerBindingId id, const char* name)
std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 8994556528..89bcfd6bb6 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -28,7 +28,7 @@ public:
const IWorkloadFactory& factory,
const bool IsMemoryManaged = true) override
{
- boost::ignore_unused(registry, factory, IsMemoryManaged);
+ IgnoreUnused(registry, factory, IsMemoryManaged);
}
/// Creates a dynamically-allocated copy of this layer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 00a316c5c0..3444afc454 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -48,7 +48,7 @@ void PreCompiledLayer::SetPreCompiledObject(PreCompiledObjectPtr preCompiledObje
void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 3a952583e6..fbf3eaa80a 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -6,6 +6,7 @@
#include "LayerCloneBase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -31,7 +32,7 @@ ReshapeLayer* ReshapeLayer::Clone(Graph& graph) const
std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
return std::vector<TensorShape>({ m_Param.m_TargetShape });
}
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index e39caa5db1..ec82082c4a 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -50,7 +50,7 @@ void SliceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
BOOST_ASSERT(inputShapes.size() == 1);
TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index d38187c532..ec724bafd0 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -35,7 +35,7 @@ std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const IWorkloadFa
SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
}
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index f8a6eb3ed8..8aa0c9f8cd 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -7,7 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
@@ -15,8 +15,6 @@
#include <numeric>
-#include <boost/core/ignore_unused.hpp>
-
using namespace armnnUtils;
namespace armnn
@@ -37,7 +35,7 @@ std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const IWorkloadFact
SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
}
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 84a598c847..f655e712c8 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -104,7 +104,7 @@ void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& regis
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
@@ -127,7 +127,7 @@ SplitterLayer* SplitterLayer::Clone(Graph& graph) const
std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
std::vector<TensorShape> outShapes;
//Output shapes must match View shapes.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 1a060f93c8..6f793caecc 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -32,7 +32,7 @@ StackLayer* StackLayer::Clone(Graph& graph) const
std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
const TensorShape& inputShape = m_Param.m_InputShape;
const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index d0fc325caa..d23d1d0bad 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -16,7 +16,7 @@ StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
// This throws in the event that it's called. We would expect that any backend that
// "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
// during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
@@ -30,7 +30,7 @@ StandInLayer* StandInLayer::Clone(Graph& graph) const
std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
throw Exception("Stand in layer does not support infering output shapes");
}
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index b3842e3c0f..5e19c7bd05 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -11,7 +11,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <Half.hpp>
@@ -72,7 +72,7 @@ public:
void Run(Graph& graph, Layer& layer) const override
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
if (Predicate::Test(layer))
{
layer.OperateOnConstantTensors(Converter::Func);
diff --git a/src/armnn/optimizations/OptimizeInverseConversions.hpp b/src/armnn/optimizations/OptimizeInverseConversions.hpp
index f0d11ce159..3ea4a5b279 100644
--- a/src/armnn/optimizations/OptimizeInverseConversions.hpp
+++ b/src/armnn/optimizations/OptimizeInverseConversions.hpp
@@ -6,7 +6,7 @@
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -20,7 +20,7 @@ public:
/// Fp16ToFp32 followed by Fp32ToFp16 or vice-versa.
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
diff --git a/src/armnn/optimizations/OptimizeInversePermutes.hpp b/src/armnn/optimizations/OptimizeInversePermutes.hpp
index 77d62a50cb..98e87c36c6 100644
--- a/src/armnn/optimizations/OptimizeInversePermutes.hpp
+++ b/src/armnn/optimizations/OptimizeInversePermutes.hpp
@@ -6,7 +6,7 @@
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -21,7 +21,7 @@ public:
/// Bypasses both layers for that connection if one is the inverse of the other.
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
auto child = boost::polymorphic_downcast<PermuteType*>(&connection.GetOwningLayer());
diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp
index d5a8a5d81e..bac27c06a7 100644
--- a/src/armnn/optimizations/SquashEqualSiblings.hpp
+++ b/src/armnn/optimizations/SquashEqualSiblings.hpp
@@ -6,7 +6,7 @@
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -23,7 +23,7 @@ public:
/// the child layer, so the siblings are left unconnected (and later removed).
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
auto& child = connection.GetOwningLayer();
if (!child.IsOutputUnconnected())
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 4782c432a2..72ad9d45ef 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -11,6 +11,7 @@
#include <ResolveType.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -18,7 +19,6 @@
#include <boost/test/unit_test.hpp>
#include <boost/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <utility>
@@ -1298,7 +1298,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
armnn::Graph& graph,
bool biasEnabled = false)
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
// To create a PreCompiled layer, create a network and Optimize it.
armnn::Network net;
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index bd8bdd543c..c89da83a89 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -60,7 +60,7 @@ BOOST_AUTO_TEST_CASE(RuntimeRegisterDebugCallback)
std::vector<unsigned int> slotIndexes;
auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensor)
{
- boost::ignore_unused(guid);
+ IgnoreUnused(guid);
slotIndexes.push_back(slotIndex);
tensorShapes.push_back(tensor->GetShape());
callCount++;
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index df84be4277..a8192a6480 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -6,8 +6,8 @@
#include <armnn/Descriptors.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#include <set>
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 0ca4fc4764..56032adc33 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -691,7 +691,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
auto inputLayer = boost::polymorphic_downcast<const InputLayer*>(layer);
BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
}
@@ -700,7 +700,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
auto outputLayer = boost::polymorphic_downcast<const OutputLayer*>(layer);
BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
}
@@ -709,7 +709,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
const ActivationDescriptor& activationDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(activationDescriptor, name);
+ IgnoreUnused(activationDescriptor, name);
auto activation = boost::polymorphic_downcast<const ActivationLayer*>(layer);
BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
}
diff --git a/src/armnn/test/OptionalTest.cpp b/src/armnn/test/OptionalTest.cpp
index fd136439c8..73c96431fb 100644
--- a/src/armnn/test/OptionalTest.cpp
+++ b/src/armnn/test/OptionalTest.cpp
@@ -7,19 +7,19 @@
#include <armnn/Optional.hpp>
#include <string>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace
{
void PassStringRef(armnn::Optional<std::string&> value)
{
- boost::ignore_unused(value);
+ armnn::IgnoreUnused(value);
}
void PassStringRefWithDefault(armnn::Optional<std::string&> value = armnn::EmptyOptional())
{
- boost::ignore_unused(value);
+ armnn::IgnoreUnused(value);
}
} // namespace <anonymous>
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index a052862bdd..9376fa4cea 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -5,6 +5,7 @@
#include <armnn/IRuntime.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/test/tools/output_test_stream.hpp>
@@ -309,7 +310,7 @@ BOOST_AUTO_TEST_CASE(ProfilerJsonPrinter)
profiler->Print(json);
std::string output = buffer.str();
- boost::ignore_unused(output);
+ armnn::IgnoreUnused(output);
// Disable profiling here to not print out anything on stdout.
profiler->EnableProfiling(false);
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 2dc054af07..ef9b2da782 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -3,15 +3,6 @@
// SPDX-License-Identifier: MIT
//
-#include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/Types.hpp>
-
-#include <armnnQuantizer/INetworkQuantizer.hpp>
-
-#include <QuantizeHelper.hpp>
-
#include "../Graph.hpp"
#include "../Network.hpp"
#include "../NetworkQuantizerUtils.hpp"
@@ -19,7 +10,14 @@
#include "../RangeTracker.hpp"
#include "../../armnnQuantizer/CommandLineProcessor.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+#include <QuantizeHelper.hpp>
+
#include <boost/test/unit_test.hpp>
#include <unordered_map>
@@ -58,7 +56,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST(m_InputShape == info.GetShape());
// Based off current default [-15.0f, 15.0f]
@@ -72,7 +70,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(m_OutputShape == info.GetShape());
}
@@ -116,7 +114,7 @@ protected:
const OffsetScalePair& params,
DataType dataType = DataType::QAsymmU8)
{
- boost::ignore_unused(dataType);
+ IgnoreUnused(dataType);
TestQuantizationParamsImpl(info, dataType, params.first, params.second);
}
@@ -212,7 +210,7 @@ public:
void VisitAdditionLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-20.0f, 20.0f]
@@ -282,7 +280,7 @@ public:
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
@@ -385,7 +383,7 @@ BOOST_AUTO_TEST_CASE(InputOutputLayerDynamicQuant)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
std::string(armnn::GetDataTypeName(info.GetDataType()))
@@ -543,7 +541,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 3.5f]
@@ -599,7 +597,7 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-1.0f, 1.0f]
@@ -654,7 +652,7 @@ public:
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-5.0f, 15.0f]
@@ -725,7 +723,7 @@ BOOST_AUTO_TEST_CASE(QuantizeELuActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
@@ -779,7 +777,7 @@ BOOST_AUTO_TEST_CASE(QuantizeHardSwishActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
@@ -839,7 +837,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
const ConstTensor& gamma,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
@@ -924,7 +922,7 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace)
const DepthToSpaceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1116,7 +1114,7 @@ void ValidateFullyConnectedLayer(const bool biasEnabled)
const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -1173,7 +1171,7 @@ void TestQuantizeConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(convolution2dDescriptor, name);
+ IgnoreUnused(convolution2dDescriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -1259,7 +1257,7 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(convolution2dDescriptor, name);
+ IgnoreUnused(convolution2dDescriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -1343,7 +1341,7 @@ BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
const InstanceNormalizationDescriptor& descriptor,
const char* name = nullptr)
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1411,7 +1409,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax)
const SoftmaxDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1503,7 +1501,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax)
const SoftmaxDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 1.0f]
@@ -1636,7 +1634,7 @@ BOOST_AUTO_TEST_CASE(QuantizePermute)
const PermuteDescriptor& desc,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1691,7 +1689,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch)
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(spaceToBatchNdDescriptor, name);
+ IgnoreUnused(spaceToBatchNdDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1804,7 +1802,7 @@ BOOST_AUTO_TEST_CASE(QuantizePooling2d)
const Pooling2dDescriptor& desc,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1873,7 +1871,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant)
const ConstTensor& input,
const char* name = nullptr) override
{
- boost::ignore_unused(input, name);
+ IgnoreUnused(input, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
@@ -1946,20 +1944,20 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitArgMinMaxLayer(const IConnectableLayer* layer,
const ArgMinMaxDescriptor& argMinMaxDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(argMinMaxDescriptor, name);
+ IgnoreUnused(argMinMaxDescriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
@@ -2034,7 +2032,7 @@ BOOST_AUTO_TEST_CASE(QuantizeComparison)
const ComparisonDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -2106,19 +2104,19 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(originsDescriptor, name);
+ IgnoreUnused(originsDescriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(
outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65},
@@ -2214,7 +2212,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReshape)
const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(reshapeDescriptor, name);
+ IgnoreUnused(reshapeDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2269,7 +2267,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter)
const SplitterDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2325,7 +2323,7 @@ BOOST_AUTO_TEST_CASE(QuantizeResize)
const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(resizeDescriptor, name);
+ IgnoreUnused(resizeDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2382,7 +2380,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStridedSlice)
const StridedSliceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2437,7 +2435,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace)
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(batchToSpaceNdDescriptor, name);
+ IgnoreUnused(batchToSpaceNdDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2499,7 +2497,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
switch (id)
@@ -2526,7 +2524,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(m_OutputShape == info.GetShape());
}
@@ -2534,7 +2532,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
void VisitPreluLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(info,
{ 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
@@ -2617,7 +2615,7 @@ void TestQuantizeTransposeConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -2704,20 +2702,20 @@ BOOST_AUTO_TEST_CASE(QuantizeStack)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitStackLayer(const IConnectableLayer* layer,
const StackDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
@@ -2784,7 +2782,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSlice)
const SliceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -2876,7 +2874,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
BOOST_TEST(m_InputShape == info.GetShape());
@@ -2886,7 +2884,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
BOOST_TEST(m_OutputShape == info.GetShape());
@@ -2895,14 +2893,14 @@ public:
void VisitQuantizeLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, name);
+ IgnoreUnused(layer, name);
m_VisitedQuantizeLayer = true;
}
void VisitDequantizeLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, name);
+ IgnoreUnused(layer, name);
m_VisitedDequantizeLayer = true;
}
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index d9ed18bdd5..e3cbe03c62 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -158,8 +158,8 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
// These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning.
- boost::ignore_unused(dubious);
- boost::ignore_unused(suppressed);
+ IgnoreUnused(dubious);
+ IgnoreUnused(suppressed);
}
#endif // WITH_VALGRIND
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 3e59c0b604..976e58eb50 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -16,10 +16,11 @@
#include <Network.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <vector>
#include <string>
-#include <boost/core/ignore_unused.hpp>
using namespace armnn;
@@ -44,20 +45,20 @@ public:
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
{
- boost::ignore_unused(tensorInfo);
+ IgnoreUnused(tensorInfo);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const override
{
- boost::ignore_unused(tensorInfo, dataLayout);
+ IgnoreUnused(tensorInfo, dataLayout);
return nullptr;
}
@@ -85,20 +86,20 @@ public:
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
{
- boost::ignore_unused(tensorInfo);
+ IgnoreUnused(tensorInfo);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const override
{
- boost::ignore_unused(tensorInfo, dataLayout);
+ IgnoreUnused(tensorInfo, dataLayout);
return nullptr;
}
@@ -123,7 +124,7 @@ public:
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
@@ -164,7 +165,7 @@ public:
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
@@ -202,7 +203,7 @@ public:
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
@@ -239,7 +240,7 @@ public:
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}