aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Eilers <jan.eilers@arm.com>2020-03-09 12:13:48 +0000
committerJan Eilers <jan.eilers@arm.com>2020-03-10 10:07:43 +0000
commit8eb256065f0e75ecf8e427d56955e2bac117c2d7 (patch)
tree1387fb4ea4a741475449d78be63d601f9d84b6e5
parent8832522f47b701f5f042069e7bf8deae9b75d449 (diff)
downloadarmnn-8eb256065f0e75ecf8e427d56955e2bac117c2d7.tar.gz
IVGCVSW-4482 Remove boost::ignore_unused
!referencetests:229377 Signed-off-by: Jan Eilers <jan.eilers@arm.com> Change-Id: Ia9b360b4a057fe7bbce5b268092627c09a0dba82
-rw-r--r--include/armnn/backends/ITensorHandle.hpp5
-rw-r--r--include/armnn/backends/ITensorHandleFactory.hpp10
-rw-r--r--src/armnn/DynamicQuantizationVisitor.cpp92
-rw-r--r--src/armnn/ExecutionFrame.cpp2
-rw-r--r--src/armnn/Graph.cpp2
-rw-r--r--src/armnn/Graph.hpp6
-rw-r--r--src/armnn/Layer.cpp2
-rw-r--r--src/armnn/Layer.hpp2
-rw-r--r--src/armnn/LayerSupportCommon.hpp27
-rw-r--r--src/armnn/LoadedNetwork.cpp2
-rw-r--r--src/armnn/Logging.cpp9
-rw-r--r--src/armnn/Network.cpp3
-rw-r--r--src/armnn/OverrideInputRangeVisitor.cpp5
-rw-r--r--src/armnn/Profiling.cpp5
-rw-r--r--src/armnn/Profiling.hpp5
-rw-r--r--src/armnn/StaticRangeVisitor.cpp88
-rw-r--r--src/armnn/SubgraphView.cpp5
-rw-r--r--src/armnn/SubgraphViewSelector.cpp7
-rw-r--r--src/armnn/layers/ConcatLayer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp16ToFp32Layer.cpp2
-rw-r--r--src/armnn/layers/ConvertFp32ToFp16Layer.cpp2
-rw-r--r--src/armnn/layers/DebugLayer.cpp5
-rw-r--r--src/armnn/layers/FakeQuantizationLayer.cpp2
-rw-r--r--src/armnn/layers/InputLayer.cpp2
-rw-r--r--src/armnn/layers/MemCopyLayer.cpp4
-rw-r--r--src/armnn/layers/MemImportLayer.cpp4
-rw-r--r--src/armnn/layers/MergeLayer.cpp2
-rw-r--r--src/armnn/layers/OutputLayer.cpp5
-rw-r--r--src/armnn/layers/OutputLayer.hpp2
-rw-r--r--src/armnn/layers/PreCompiledLayer.cpp2
-rw-r--r--src/armnn/layers/ReshapeLayer.cpp3
-rw-r--r--src/armnn/layers/SliceLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToBatchNdLayer.cpp2
-rw-r--r--src/armnn/layers/SpaceToDepthLayer.cpp6
-rw-r--r--src/armnn/layers/SplitterLayer.cpp4
-rw-r--r--src/armnn/layers/StackLayer.cpp2
-rw-r--r--src/armnn/layers/StandInLayer.cpp4
-rw-r--r--src/armnn/optimizations/ConvertConstants.hpp4
-rw-r--r--src/armnn/optimizations/OptimizeInverseConversions.hpp4
-rw-r--r--src/armnn/optimizations/OptimizeInversePermutes.hpp4
-rw-r--r--src/armnn/optimizations/SquashEqualSiblings.hpp4
-rw-r--r--src/armnn/test/CreateWorkload.hpp4
-rw-r--r--src/armnn/test/DebugCallbackTest.cpp2
-rw-r--r--src/armnn/test/EndToEndTest.cpp2
-rw-r--r--src/armnn/test/OptimizerTests.cpp6
-rw-r--r--src/armnn/test/OptionalTest.cpp6
-rw-r--r--src/armnn/test/ProfilerTests.cpp3
-rw-r--r--src/armnn/test/QuantizerTest.cpp112
-rw-r--r--src/armnn/test/RuntimeTests.cpp4
-rw-r--r--src/armnn/test/TensorHandleStrategyTest.cpp23
-rw-r--r--src/armnnDeserializer/Deserializer.cpp10
-rw-r--r--src/armnnDeserializer/test/DeserializeAdd.cpp4
-rw-r--r--src/armnnDeserializer/test/DeserializeMultiplication.cpp7
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp17
-rw-r--r--src/armnnQuantizer/QuantizationDataSet.cpp5
-rw-r--r--src/armnnSerializer/Serializer.cpp112
-rw-r--r--src/armnnSerializer/test/ActivationSerializationTests.cpp10
-rw-r--r--src/armnnTfLiteParser/TfLiteParser.cpp8
-rwxr-xr-xsrc/armnnTfParser/TfParser.cpp80
-rw-r--r--src/armnnTfParser/test/Split.cpp7
-rw-r--r--src/armnnUtils/QuantizeHelper.hpp10
-rw-r--r--src/armnnUtils/test/QuantizeHelperTest.cpp4
-rw-r--r--src/backends/backendsCommon/CpuTensorHandle.cpp1
-rw-r--r--src/backends/backendsCommon/LayerSupportBase.cpp2
-rw-r--r--src/backends/backendsCommon/MakeWorkloadHelper.hpp6
-rw-r--r--src/backends/backendsCommon/WorkloadUtils.hpp4
-rw-r--r--src/backends/backendsCommon/test/BackendProfilingTests.cpp7
-rw-r--r--src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp12
-rw-r--r--src/backends/backendsCommon/test/MockBackend.cpp2
-rw-r--r--src/backends/backendsCommon/test/TestDynamicBackend.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp12
-rw-r--r--src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp8
-rw-r--r--src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp14
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp10
-rw-r--r--src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp6
-rw-r--r--src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp4
-rw-r--r--src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp2
-rw-r--r--src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp2
-rw-r--r--src/backends/cl/ClContextControl.cpp5
-rw-r--r--src/backends/cl/ClLayerSupport.cpp12
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp14
-rw-r--r--src/backends/cl/OpenClTimer.cpp5
-rw-r--r--src/backends/cl/test/ClRuntimeTests.cpp6
-rw-r--r--src/backends/cl/test/Fp16SupportTest.cpp2
-rw-r--r--src/backends/neon/NeonLayerSupport.cpp29
-rw-r--r--src/backends/neon/NeonTensorHandleFactory.cpp2
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp10
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp3
-rw-r--r--src/backends/neon/test/NeonLayerTests.cpp2
-rw-r--r--src/backends/reference/RefLayerSupport.cpp71
-rw-r--r--src/backends/reference/RefTensorHandleFactory.cpp6
-rw-r--r--src/backends/reference/RefWorkloadFactory.cpp12
-rw-r--r--src/backends/reference/RefWorkloadFactory.hpp9
-rw-r--r--src/backends/reference/test/RefWorkloadFactoryHelper.hpp2
-rw-r--r--src/backends/reference/workloads/ArgMinMax.cpp2
-rw-r--r--src/backends/reference/workloads/BaseIterator.hpp5
-rw-r--r--src/backends/reference/workloads/Dequantize.cpp5
-rw-r--r--src/backends/reference/workloads/DetectionPostProcess.cpp2
-rw-r--r--src/backends/reference/workloads/Gather.cpp4
-rw-r--r--src/backends/reference/workloads/LogSoftmax.cpp4
-rw-r--r--src/backends/reference/workloads/RefStridedSliceWorkload.cpp2
-rw-r--r--src/backends/reference/workloads/Slice.cpp5
-rw-r--r--src/dynamic/sample/SampleDynamicWorkloadFactory.hpp2
-rw-r--r--src/profiling/CommandHandlerFunctor.hpp8
-rw-r--r--src/profiling/CounterDirectory.cpp4
-rw-r--r--src/profiling/FileOnlyProfilingConnection.cpp2
-rw-r--r--src/profiling/PacketVersionResolver.cpp4
-rw-r--r--src/profiling/ProfilingStateMachine.hpp4
-rw-r--r--src/profiling/SendCounterPacket.cpp2
-rw-r--r--src/profiling/SendThread.cpp1
-rw-r--r--src/profiling/test/FileOnlyProfilingDecoratorTests.cpp2
-rw-r--r--src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp8
-rw-r--r--src/profiling/test/ProfilingMocks.hpp20
-rw-r--r--src/profiling/test/ProfilingTests.cpp7
-rw-r--r--src/profiling/test/ProfilingTests.hpp8
-rw-r--r--src/profiling/test/SendCounterPacketTests.hpp2
-rw-r--r--tests/DeepSpeechV1InferenceTest.hpp5
-rw-r--r--tests/InferenceTest.hpp9
-rw-r--r--tests/InferenceTest.inl2
-rw-r--r--tests/InferenceTestImage.cpp6
-rw-r--r--tests/MobileNetSsdInferenceTest.hpp4
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp2
-rw-r--r--tests/YoloInferenceTest.hpp4
159 files changed, 653 insertions, 644 deletions
diff --git a/include/armnn/backends/ITensorHandle.hpp b/include/armnn/backends/ITensorHandle.hpp
index 6ef0e32fb3..73b902eb7c 100644
--- a/include/armnn/backends/ITensorHandle.hpp
+++ b/include/armnn/backends/ITensorHandle.hpp
@@ -5,8 +5,7 @@
#pragma once
#include <armnn/MemorySources.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -75,7 +74,7 @@ public:
/// \return true on success or false on failure
virtual bool Import(void* memory, MemorySource source)
{
- boost::ignore_unused(memory, source);
+ IgnoreUnused(memory, source);
return false;
};
};
diff --git a/include/armnn/backends/ITensorHandleFactory.hpp b/include/armnn/backends/ITensorHandleFactory.hpp
index 2e4742301b..03abe18aa9 100644
--- a/include/armnn/backends/ITensorHandleFactory.hpp
+++ b/include/armnn/backends/ITensorHandleFactory.hpp
@@ -5,12 +5,12 @@
#pragma once
+#include "ITensorHandle.hpp"
+
#include <armnn/IRuntime.hpp>
#include <armnn/MemorySources.hpp>
#include <armnn/Types.hpp>
-#include "ITensorHandle.hpp"
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -39,7 +39,7 @@ public:
virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
const bool IsMemoryManaged) const
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
return CreateTensorHandle(tensorInfo);
}
@@ -47,7 +47,7 @@ public:
DataLayout dataLayout,
const bool IsMemoryManaged) const
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
return CreateTensorHandle(tensorInfo, dataLayout);
}
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
index 4b1dce0b6f..862a926abc 100644
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ b/src/armnn/DynamicQuantizationVisitor.cpp
@@ -6,7 +6,7 @@
#include "DynamicQuantizationVisitor.hpp"
#include "NetworkUtils.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Types.hpp>
@@ -85,7 +85,7 @@ void DynamicQuantizationVisitor::VisitNonCalibratedLayers() {
void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
SetRange(layer, 0, -20.f, 20.f);
AddToCalibratedLayers(layer);
}
@@ -98,12 +98,12 @@ void DynamicQuantizationVisitor::VisitBatchNormalizationLayer(const IConnectable
const ConstTensor& gamma,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(mean);
- boost::ignore_unused(variance);
- boost::ignore_unused(beta);
- boost::ignore_unused(gamma);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(mean);
+ IgnoreUnused(variance);
+ IgnoreUnused(beta);
+ IgnoreUnused(gamma);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
@@ -114,10 +114,10 @@ void DynamicQuantizationVisitor::VisitConvolution2dLayer(const IConnectableLayer
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(convolution2dDescriptor);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(convolution2dDescriptor);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
@@ -128,10 +128,10 @@ void DynamicQuantizationVisitor::VisitDepthwiseConvolution2dLayer(const IConnect
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
@@ -140,7 +140,7 @@ void DynamicQuantizationVisitor::VisitActivationLayer(const IConnectableLayer* l
const ActivationDescriptor& activationDescriptor,
const char* name)
{
- boost::ignore_unused(name, activationDescriptor);
+ IgnoreUnused(name, activationDescriptor);
switch (activationDescriptor.m_Function)
{
// Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -172,10 +172,10 @@ void DynamicQuantizationVisitor::VisitFullyConnectedLayer(const IConnectableLaye
const Optional<ConstTensor>& biases,
const char *name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
AddToCalibratedLayers(layer);
}
@@ -184,8 +184,8 @@ void DynamicQuantizationVisitor::VisitPermuteLayer(const IConnectableLayer* laye
const PermuteDescriptor& permuteDescriptor,
const char* name)
{
- boost::ignore_unused(permuteDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(permuteDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -193,8 +193,8 @@ void DynamicQuantizationVisitor::VisitSpaceToBatchNdLayer(const IConnectableLaye
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
- boost::ignore_unused(spaceToBatchNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(spaceToBatchNdDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -202,8 +202,8 @@ void DynamicQuantizationVisitor::VisitPooling2dLayer(const IConnectableLayer* la
const Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
- boost::ignore_unused(pooling2dDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(pooling2dDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -211,8 +211,8 @@ void DynamicQuantizationVisitor::VisitSoftmaxLayer(const IConnectableLayer* laye
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
- boost::ignore_unused(softmaxDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(softmaxDescriptor);
+ IgnoreUnused(name);
SetRange(layer, 0, 0.f, 1.f);
AddToCalibratedLayers(layer);
}
@@ -221,7 +221,7 @@ void DynamicQuantizationVisitor::VisitConstantLayer(const IConnectableLayer* lay
const ConstTensor& input,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (input.GetDataType() != DataType::Float32)
{
@@ -249,8 +249,8 @@ void DynamicQuantizationVisitor::VisitConcatLayer(const IConnectableLayer* layer
const ConcatDescriptor& originsDescriptor,
const char* name)
{
- boost::ignore_unused(name);
- boost::ignore_unused(originsDescriptor);
+ IgnoreUnused(name);
+ IgnoreUnused(originsDescriptor);
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::lowest();
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -270,8 +270,8 @@ void DynamicQuantizationVisitor::VisitReshapeLayer(const IConnectableLayer* laye
const ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
- boost::ignore_unused(reshapeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(reshapeDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -279,8 +279,8 @@ void DynamicQuantizationVisitor::VisitSplitterLayer(const IConnectableLayer* lay
const SplitterDescriptor& splitterDescriptor,
const char* name)
{
- boost::ignore_unused(splitterDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(splitterDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -288,8 +288,8 @@ void DynamicQuantizationVisitor::VisitResizeBilinearLayer(const IConnectableLaye
const ResizeBilinearDescriptor& resizeDesc,
const char* name)
{
- boost::ignore_unused(resizeDesc);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDesc);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -297,8 +297,8 @@ void DynamicQuantizationVisitor::VisitStridedSliceLayer(const IConnectableLayer*
const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
- boost::ignore_unused(stridedSliceDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(stridedSliceDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
@@ -306,23 +306,23 @@ void DynamicQuantizationVisitor::VisitBatchToSpaceNdLayer(const IConnectableLaye
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name)
{
- boost::ignore_unused(batchToSpaceNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(batchToSpaceNdDescriptor);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
}
void DynamicQuantizationVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(id);
- boost::ignore_unused(name);
+ IgnoreUnused(id);
+ IgnoreUnused(name);
SetRange(layer, 0, -0.0f, 0.0f);
AddToCalibratedLayers(layer);
}
void DynamicQuantizationVisitor::VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(id);
- boost::ignore_unused(name);
+ IgnoreUnused(id);
+ IgnoreUnused(name);
AddToNonCalibratedLayers(layer);
m_OutputLayers.push_back(id);
}
diff --git a/src/armnn/ExecutionFrame.cpp b/src/armnn/ExecutionFrame.cpp
index 58005e951c..92a7990881 100644
--- a/src/armnn/ExecutionFrame.cpp
+++ b/src/armnn/ExecutionFrame.cpp
@@ -13,7 +13,7 @@ ExecutionFrame::ExecutionFrame() {}
IExecutionFrame* ExecutionFrame::ExecuteWorkloads(IExecutionFrame* previousFrame)
{
- boost::ignore_unused(previousFrame);
+ IgnoreUnused(previousFrame);
for (auto& workload: m_WorkloadQueue)
{
workload->Execute();
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index 8e7f75b2b8..0d326adae7 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -435,7 +435,7 @@ void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const Subgr
const SubgraphView::Layers& substituteSubgraphLayers = substituteSubgraph.GetLayers();
std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](Layer* layer)
{
- boost::ignore_unused(layer);
+ IgnoreUnused(layer);
BOOST_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
"Substitute layer is not a member of graph");
});
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index c65f12bbc3..63bc8d062c 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -297,7 +297,7 @@ private:
graph.m_Layers.erase(layerIt);
const size_t numErased = graph.m_PosInGraphMap.erase(this);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
@@ -355,7 +355,7 @@ public:
~LayerInGraph() override
{
const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
};
@@ -381,7 +381,7 @@ public:
~LayerInGraph() override
{
const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
BOOST_ASSERT(numErased == 1);
}
};
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index 1f63d6ed3c..9de812c6e5 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -196,7 +196,7 @@ Layer::Layer(unsigned int numInputSlots,
, m_BackendHint(EmptyOptional())
, m_Guid(profiling::ProfilingService::Instance().NextGuid())
{
- boost::ignore_unused(layout);
+ IgnoreUnused(layout);
m_InputSlots.reserve(numInputSlots);
for (unsigned int i = 0; i < numInputSlots; ++i)
{
diff --git a/src/armnn/Layer.hpp b/src/armnn/Layer.hpp
index 5ad38f0b9e..ec35d71082 100644
--- a/src/armnn/Layer.hpp
+++ b/src/armnn/Layer.hpp
@@ -17,6 +17,7 @@
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <algorithm>
#include <memory>
@@ -27,7 +28,6 @@
#include <list>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/cast.hpp>
namespace armnn
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index e0c6b8040c..9252b3b9a5 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -4,13 +4,12 @@
//
#pragma once
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/DescriptorsFwd.hpp>
#include <armnn/Types.hpp>
#include <armnn/Tensor.hpp>
#include <armnn/Optional.hpp>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
@@ -54,23 +53,23 @@ bool IsSupportedForDataTypeGeneric(Optional<std::string&> reasonIfUnsupported,
template<typename ... Params>
bool TrueFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(reasonIfUnsupported);
- boost::ignore_unused(params...);
+ IgnoreUnused(reasonIfUnsupported);
+ IgnoreUnused(params...);
return true;
}
template<typename ... Params>
bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(reasonIfUnsupported);
- boost::ignore_unused(params...);
+ IgnoreUnused(reasonIfUnsupported);
+ IgnoreUnused(params...);
return false;
}
template<typename ... Params>
bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type");
return false;
}
@@ -78,7 +77,7 @@ bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type");
return false;
}
@@ -86,7 +85,7 @@ bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type");
return false;
}
@@ -94,7 +93,7 @@ bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
template<typename ... Params>
bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type");
return false;
}
@@ -102,7 +101,7 @@ bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input");
return false;
}
@@ -110,7 +109,7 @@ bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... p
template<typename ... Params>
bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input");
return false;
}
@@ -118,7 +117,7 @@ bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... p
template<typename ... Params>
bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output");
return false;
}
@@ -126,7 +125,7 @@ bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&...
template<typename ... Params>
bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
- boost::ignore_unused(params...);
+ IgnoreUnused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output");
return false;
}
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 2e95dd8d6c..69e42ba38f 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -311,7 +311,7 @@ const IWorkloadFactory& LoadedNetwork::GetWorkloadFactory(const Layer& layer) co
std::string reasonIfUnsupported;
BOOST_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer, {}, reasonIfUnsupported),
"Factory does not support layer");
- boost::ignore_unused(reasonIfUnsupported);
+ IgnoreUnused(reasonIfUnsupported);
return *workloadFactory;
}
diff --git a/src/armnn/Logging.cpp b/src/armnn/Logging.cpp
index 2c07751aae..ba401233ae 100644
--- a/src/armnn/Logging.cpp
+++ b/src/armnn/Logging.cpp
@@ -2,9 +2,9 @@
// Copyright © 2019 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
-#include <armnn/Logging.hpp>
-
+#include <armnn/Logging.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Utils.hpp>
#if defined(_MSC_VER)
@@ -20,7 +20,6 @@
#endif
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <iostream>
namespace armnn
@@ -107,14 +106,14 @@ class DebugOutputSink : public LogSink
public:
void Consume(const std::string& s) override
{
- boost::ignore_unused(s);
+ IgnoreUnused(s);
#if defined(_MSC_VER)
OutputDebugString(s.c_str());
OutputDebugString("\n");
#elif defined(__ANDROID__)
__android_log_write(ANDROID_LOG_DEBUG, "armnn", s.c_str());
#else
- boost::ignore_unused(s);
+ IgnoreUnused(s);
#endif
}
};
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 50a7df6662..3663727e48 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -22,6 +22,7 @@
#include <armnn/TypesUtils.hpp>
#include <armnn/BackendRegistry.hpp>
#include <armnn/Logging.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <ProfilingService.hpp>
@@ -628,7 +629,7 @@ ITensorHandleFactory::FactoryId CalculateSlotOptionForOutput(BackendsMap& backen
OutputSlot& slot,
TensorHandleFactoryRegistry& registry)
{
- boost::ignore_unused(backends, slot, registry);
+ IgnoreUnused(backends, slot, registry);
return ITensorHandleFactory::DeferredFactoryId;
}
diff --git a/src/armnn/OverrideInputRangeVisitor.cpp b/src/armnn/OverrideInputRangeVisitor.cpp
index d047c5bbe8..d0453fe326 100644
--- a/src/armnn/OverrideInputRangeVisitor.cpp
+++ b/src/armnn/OverrideInputRangeVisitor.cpp
@@ -7,8 +7,9 @@
#include "NetworkQuantizerUtils.hpp"
#include "Layer.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
namespace armnn
{
@@ -23,7 +24,7 @@ OverrideInputRangeVisitor::OverrideInputRangeVisitor(RangeTracker& ranges,
void OverrideInputRangeVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (m_LayerId == id)
{
m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second);
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index 1cd21ab63c..b1aedaab5a 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -5,6 +5,7 @@
#include "Profiling.hpp"
#include <armnn/BackendId.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include "JsonPrinter.hpp"
@@ -20,7 +21,7 @@
#include <stack>
#include <boost/algorithm/string.hpp>
-#include <boost/core/ignore_unused.hpp>
+
namespace armnn
{
@@ -223,7 +224,7 @@ void Profiler::EndEvent(Event* event)
m_Parents.pop();
Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
- boost::ignore_unused(parent);
+ IgnoreUnused(parent);
BOOST_ASSERT(event->GetParentEvent() == parent);
#if ARMNN_STREAMLINE_ENABLED
diff --git a/src/armnn/Profiling.hpp b/src/armnn/Profiling.hpp
index 4afd6911c2..e6ea090d7b 100644
--- a/src/armnn/Profiling.hpp
+++ b/src/armnn/Profiling.hpp
@@ -6,6 +6,7 @@
#include "ProfilingEvent.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include "armnn/IProfiler.hpp"
#include "WallClockTimer.hpp"
@@ -17,8 +18,6 @@
#include <stack>
#include <map>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
@@ -141,7 +140,7 @@ private:
void ConstructNextInVector(std::vector<InstrumentPtr>& instruments)
{
- boost::ignore_unused(instruments);
+ IgnoreUnused(instruments);
}
template<typename Arg, typename... Args>
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 81428c1f90..0e820c3202 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -5,7 +5,7 @@
#include "StaticRangeVisitor.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/Types.hpp>
@@ -31,7 +31,7 @@ void StaticRangeVisitor::ForwardParentParameters(const IConnectableLayer* layer)
void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
SetRange(layer, 0, -20.f, 20.f);
}
@@ -43,12 +43,12 @@ void StaticRangeVisitor::VisitBatchNormalizationLayer(const IConnectableLayer* l
const ConstTensor& gamma,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(mean);
- boost::ignore_unused(variance);
- boost::ignore_unused(beta);
- boost::ignore_unused(gamma);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(mean);
+ IgnoreUnused(variance);
+ IgnoreUnused(beta);
+ IgnoreUnused(gamma);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
@@ -58,10 +58,10 @@ void StaticRangeVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(convolution2dDescriptor);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(convolution2dDescriptor);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
@@ -71,10 +71,10 @@ void StaticRangeVisitor::VisitDepthwiseConvolution2dLayer(const IConnectableLaye
const Optional<ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
@@ -82,7 +82,7 @@ void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer* layer,
const ActivationDescriptor& activationDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
switch (activationDescriptor.m_Function)
{
// Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -113,10 +113,10 @@ void StaticRangeVisitor::VisitFullyConnectedLayer(const IConnectableLayer *layer
const Optional<ConstTensor>& biases,
const char *name)
{
- boost::ignore_unused(desc);
- boost::ignore_unused(weights);
- boost::ignore_unused(biases);
- boost::ignore_unused(name);
+ IgnoreUnused(desc);
+ IgnoreUnused(weights);
+ IgnoreUnused(biases);
+ IgnoreUnused(name);
SetRange(layer, 0, -15.0f, 15.0f);
}
@@ -124,8 +124,8 @@ void StaticRangeVisitor::VisitPermuteLayer(const IConnectableLayer* layer,
const PermuteDescriptor& permuteDescriptor,
const char* name)
{
- boost::ignore_unused(permuteDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(permuteDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -133,8 +133,8 @@ void StaticRangeVisitor::VisitSpaceToBatchNdLayer(const IConnectableLayer* layer
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
- boost::ignore_unused(spaceToBatchNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(spaceToBatchNdDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -142,8 +142,8 @@ void StaticRangeVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
const Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
- boost::ignore_unused(pooling2dDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(pooling2dDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -151,8 +151,8 @@ void StaticRangeVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
const SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
- boost::ignore_unused(softmaxDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(softmaxDescriptor);
+ IgnoreUnused(name);
SetRange(layer, 0, 0.f, 1.f);
}
@@ -160,8 +160,8 @@ void StaticRangeVisitor::VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name)
{
- boost::ignore_unused(originsDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(originsDescriptor);
+ IgnoreUnused(name);
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::lowest();
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
@@ -180,7 +180,7 @@ void StaticRangeVisitor::VisitConstantLayer(const IConnectableLayer* layer,
const ConstTensor& input,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
if (input.GetDataType() != DataType::Float32)
{
@@ -208,8 +208,8 @@ void StaticRangeVisitor::VisitReshapeLayer(const IConnectableLayer* layer,
const ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
- boost::ignore_unused(reshapeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(reshapeDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -217,8 +217,8 @@ void StaticRangeVisitor::VisitSplitterLayer(const IConnectableLayer* layer,
const SplitterDescriptor& splitterDescriptor,
const char* name)
{
- boost::ignore_unused(splitterDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(splitterDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -226,8 +226,8 @@ void StaticRangeVisitor::VisitResizeBilinearLayer(const IConnectableLayer* layer
const ResizeBilinearDescriptor& resizeDesc,
const char* name)
{
- boost::ignore_unused(resizeDesc);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDesc);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -235,8 +235,8 @@ void StaticRangeVisitor::VisitResizeLayer(const IConnectableLayer* layer,
const ResizeDescriptor& resizeDescriptor,
const char* name)
{
- boost::ignore_unused(resizeDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(resizeDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -244,8 +244,8 @@ void StaticRangeVisitor::VisitStridedSliceLayer(const IConnectableLayer* layer,
const StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
- boost::ignore_unused(stridedSliceDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(stridedSliceDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
@@ -253,8 +253,8 @@ void StaticRangeVisitor::VisitBatchToSpaceNdLayer(const IConnectableLayer* layer
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name)
{
- boost::ignore_unused(batchToSpaceNdDescriptor);
- boost::ignore_unused(name);
+ IgnoreUnused(batchToSpaceNdDescriptor);
+ IgnoreUnused(name);
ForwardParentParameters(layer);
}
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index a87cc9b268..7705e687a9 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -6,8 +6,9 @@
#include "SubgraphView.hpp"
#include "Graph.hpp"
-#include <boost/numeric/conversion/cast.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <boost/numeric/conversion/cast.hpp>
#include <utility>
namespace armnn
@@ -24,7 +25,7 @@ void AssertIfNullsOrDuplicates(const C& container, const std::string& errorMessa
std::for_each(container.begin(), container.end(), [&duplicateSet, &errorMessage](const T& i)
{
// Ignore unused for release builds
- boost::ignore_unused(errorMessage);
+ IgnoreUnused(errorMessage);
// Check if the item is valid
BOOST_ASSERT_MSG(i, errorMessage.c_str());
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 8798b7285d..02b7bdafa5 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -5,6 +5,9 @@
#include "SubgraphViewSelector.hpp"
#include "Graph.hpp"
+
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
#include <algorithm>
#include <map>
@@ -78,14 +81,14 @@ public:
{
size_t numErased = a->m_Dependants.erase(this);
BOOST_ASSERT(numErased == 1);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
a->m_Dependants.insert(m_Parent);
}
for (PartialSubgraph* a : m_Dependants)
{
size_t numErased = a->m_Antecedents.erase(this);
BOOST_ASSERT(numErased == 1);
- boost::ignore_unused(numErased);
+ IgnoreUnused(numErased);
a->m_Antecedents.insert(m_Parent);
}
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 317d61f1fa..f4024af65a 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -130,7 +130,7 @@ void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registr
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 026e8de8b2..7873c94563 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -48,7 +48,7 @@ void ConvertFp16ToFp32Layer::Accept(ILayerVisitor& visitor) const
{
// these conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp16ToFp32Layer should never appear in an input graph");
}
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index 90bd8948d0..bbf4dbffd8 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -47,7 +47,7 @@ void ConvertFp32ToFp16Layer::Accept(ILayerVisitor& visitor) const
{
// These conversion layers are only inserted by the
// optimizer and so will never be in an input graph.
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("ConvertFp32ToFp16Layer should never appear in an input graph");
}
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index d0e0f037e2..76d33f27e9 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -8,8 +8,7 @@
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -53,7 +52,7 @@ void DebugLayer::ValidateTensorShapesFromInputs()
void DebugLayer::Accept(ILayerVisitor& visitor) const
{
// by design debug layers are never in input graphs
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("DebugLayer should never appear in an input graph");
}
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index 90f8445472..8611b9b73c 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -45,7 +45,7 @@ void FakeQuantizationLayer::ValidateTensorShapesFromInputs()
void FakeQuantizationLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("FakeQuantizationLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/InputLayer.cpp b/src/armnn/layers/InputLayer.cpp
index e0c2544236..84cc43c667 100644
--- a/src/armnn/layers/InputLayer.cpp
+++ b/src/armnn/layers/InputLayer.cpp
@@ -19,7 +19,7 @@ InputLayer::InputLayer(LayerBindingId id, const char* name)
std::unique_ptr<IWorkload> InputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 231b28548f..cf69c17cf5 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -26,7 +26,7 @@ MemCopyLayer* MemCopyLayer::Clone(Graph& graph) const
std::unique_ptr<IWorkload> MemCopyLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
MemCopyQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@ void MemCopyLayer::ValidateTensorShapesFromInputs()
void MemCopyLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("MemCopyLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index 3b0e6d295b..80f9fda803 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -26,7 +26,7 @@ MemImportLayer* MemImportLayer::Clone(Graph& graph) const
std::unique_ptr<IWorkload> MemImportLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
MemImportQueueDescriptor descriptor;
//This is different from other workloads. Does not get created by the workload factory.
@@ -49,7 +49,7 @@ void MemImportLayer::ValidateTensorShapesFromInputs()
void MemImportLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("MemImportLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index ce75950be2..f2fd29fe9e 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -18,7 +18,7 @@ MergeLayer::MergeLayer(const char* name)
std::unique_ptr<IWorkload> MergeLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/OutputLayer.cpp b/src/armnn/layers/OutputLayer.cpp
index 4239323635..f00e0a5259 100644
--- a/src/armnn/layers/OutputLayer.cpp
+++ b/src/armnn/layers/OutputLayer.cpp
@@ -6,11 +6,10 @@
#include "LayerCloneBase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/core/ignore_unused.hpp>
-
namespace armnn
{
@@ -21,7 +20,7 @@ OutputLayer::OutputLayer(LayerBindingId id, const char* name)
std::unique_ptr<IWorkload> OutputLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
return nullptr;
}
diff --git a/src/armnn/layers/OutputLayer.hpp b/src/armnn/layers/OutputLayer.hpp
index 8994556528..89bcfd6bb6 100644
--- a/src/armnn/layers/OutputLayer.hpp
+++ b/src/armnn/layers/OutputLayer.hpp
@@ -28,7 +28,7 @@ public:
const IWorkloadFactory& factory,
const bool IsMemoryManaged = true) override
{
- boost::ignore_unused(registry, factory, IsMemoryManaged);
+ IgnoreUnused(registry, factory, IsMemoryManaged);
}
/// Creates a dynamically-allocated copy of this layer.
diff --git a/src/armnn/layers/PreCompiledLayer.cpp b/src/armnn/layers/PreCompiledLayer.cpp
index 00a316c5c0..3444afc454 100644
--- a/src/armnn/layers/PreCompiledLayer.cpp
+++ b/src/armnn/layers/PreCompiledLayer.cpp
@@ -48,7 +48,7 @@ void PreCompiledLayer::SetPreCompiledObject(PreCompiledObjectPtr preCompiledObje
void PreCompiledLayer::Accept(ILayerVisitor& visitor) const
{
- boost::ignore_unused(visitor);
+ IgnoreUnused(visitor);
throw armnn::Exception("PreCompiledLayer should not appear in an input graph");
}
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index 3a952583e6..fbf3eaa80a 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -6,6 +6,7 @@
#include "LayerCloneBase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -31,7 +32,7 @@ ReshapeLayer* ReshapeLayer::Clone(Graph& graph) const
std::vector<TensorShape> ReshapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
return std::vector<TensorShape>({ m_Param.m_TargetShape });
}
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index e39caa5db1..ec82082c4a 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -50,7 +50,7 @@ void SliceLayer::ValidateTensorShapesFromInputs()
std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
BOOST_ASSERT(inputShapes.size() == 1);
TensorShape outputShape(boost::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index d38187c532..ec724bafd0 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -35,7 +35,7 @@ std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const IWorkloadFa
SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
}
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index f8a6eb3ed8..8aa0c9f8cd 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -7,7 +7,7 @@
#include "LayerCloneBase.hpp"
#include <armnn/TypesUtils.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <backendsCommon/WorkloadData.hpp>
@@ -15,8 +15,6 @@
#include <numeric>
-#include <boost/core/ignore_unused.hpp>
-
using namespace armnnUtils;
namespace armnn
@@ -37,7 +35,7 @@ std::unique_ptr<IWorkload> SpaceToDepthLayer::CreateWorkload(const IWorkloadFact
SpaceToDepthLayer* SpaceToDepthLayer::Clone(Graph& graph) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
return CloneBase<SpaceToDepthLayer>(graph, m_Param, GetName());
}
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index 84a598c847..f655e712c8 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -104,7 +104,7 @@ void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& regis
const IWorkloadFactory& workloadFactory,
const bool IsMemoryManaged)
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
OutputSlot& slot = GetOutputSlot(0);
ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
@@ -127,7 +127,7 @@ SplitterLayer* SplitterLayer::Clone(Graph& graph) const
std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
BOOST_ASSERT(inputShapes.size() == m_Param.GetNumViews());
std::vector<TensorShape> outShapes;
//Output shapes must match View shapes.
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 1a060f93c8..6f793caecc 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -32,7 +32,7 @@ StackLayer* StackLayer::Clone(Graph& graph) const
std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
const TensorShape& inputShape = m_Param.m_InputShape;
const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
diff --git a/src/armnn/layers/StandInLayer.cpp b/src/armnn/layers/StandInLayer.cpp
index d0fc325caa..d23d1d0bad 100644
--- a/src/armnn/layers/StandInLayer.cpp
+++ b/src/armnn/layers/StandInLayer.cpp
@@ -16,7 +16,7 @@ StandInLayer::StandInLayer(const StandInDescriptor& param, const char* name)
std::unique_ptr<IWorkload> StandInLayer::CreateWorkload(const IWorkloadFactory& factory) const
{
- boost::ignore_unused(factory);
+ IgnoreUnused(factory);
// This throws in the event that it's called. We would expect that any backend that
// "claims" to support the StandInLayer type would actually substitute it with a PrecompiledLayer
// during graph optimization. There is no interface on the IWorkloadFactory to create a StandInWorkload.
@@ -30,7 +30,7 @@ StandInLayer* StandInLayer::Clone(Graph& graph) const
std::vector<TensorShape> StandInLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
{
- boost::ignore_unused(inputShapes);
+ IgnoreUnused(inputShapes);
throw Exception("Stand in layer does not support infering output shapes");
}
diff --git a/src/armnn/optimizations/ConvertConstants.hpp b/src/armnn/optimizations/ConvertConstants.hpp
index b3842e3c0f..5e19c7bd05 100644
--- a/src/armnn/optimizations/ConvertConstants.hpp
+++ b/src/armnn/optimizations/ConvertConstants.hpp
@@ -11,7 +11,7 @@
#include <backendsCommon/CpuTensorHandle.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <Half.hpp>
@@ -72,7 +72,7 @@ public:
void Run(Graph& graph, Layer& layer) const override
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
if (Predicate::Test(layer))
{
layer.OperateOnConstantTensors(Converter::Func);
diff --git a/src/armnn/optimizations/OptimizeInverseConversions.hpp b/src/armnn/optimizations/OptimizeInverseConversions.hpp
index f0d11ce159..3ea4a5b279 100644
--- a/src/armnn/optimizations/OptimizeInverseConversions.hpp
+++ b/src/armnn/optimizations/OptimizeInverseConversions.hpp
@@ -6,7 +6,7 @@
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -20,7 +20,7 @@ public:
/// Fp16ToFp32 followed by Fp32ToFp16 or vice-versa.
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
Layer& child = connection.GetOwningLayer();
diff --git a/src/armnn/optimizations/OptimizeInversePermutes.hpp b/src/armnn/optimizations/OptimizeInversePermutes.hpp
index 77d62a50cb..98e87c36c6 100644
--- a/src/armnn/optimizations/OptimizeInversePermutes.hpp
+++ b/src/armnn/optimizations/OptimizeInversePermutes.hpp
@@ -6,7 +6,7 @@
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -21,7 +21,7 @@ public:
/// Bypasses both layers for that connection if one is the inverse of the other.
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
Layer& base = connection.GetConnectedOutputSlot()->GetOwningLayer();
auto child = boost::polymorphic_downcast<PermuteType*>(&connection.GetOwningLayer());
diff --git a/src/armnn/optimizations/SquashEqualSiblings.hpp b/src/armnn/optimizations/SquashEqualSiblings.hpp
index d5a8a5d81e..bac27c06a7 100644
--- a/src/armnn/optimizations/SquashEqualSiblings.hpp
+++ b/src/armnn/optimizations/SquashEqualSiblings.hpp
@@ -6,7 +6,7 @@
#include "Optimization.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -23,7 +23,7 @@ public:
/// the child layer, so the siblings are left unconnected (and later removed).
void Run(Graph& graph, InputSlot& connection) const
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
auto& child = connection.GetOwningLayer();
if (!child.IsOutputUnconnected())
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 4782c432a2..72ad9d45ef 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -11,6 +11,7 @@
#include <ResolveType.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/WorkloadData.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
@@ -18,7 +19,6 @@
#include <boost/test/unit_test.hpp>
#include <boost/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <utility>
@@ -1298,7 +1298,7 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
armnn::Graph& graph,
bool biasEnabled = false)
{
- boost::ignore_unused(graph);
+ IgnoreUnused(graph);
// To create a PreCompiled layer, create a network and Optimize it.
armnn::Network net;
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index bd8bdd543c..c89da83a89 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -60,7 +60,7 @@ BOOST_AUTO_TEST_CASE(RuntimeRegisterDebugCallback)
std::vector<unsigned int> slotIndexes;
auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, ITensorHandle* tensor)
{
- boost::ignore_unused(guid);
+ IgnoreUnused(guid);
slotIndexes.push_back(slotIndex);
tensorShapes.push_back(tensor->GetShape());
callCount++;
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index df84be4277..a8192a6480 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -6,8 +6,8 @@
#include <armnn/Descriptors.hpp>
#include <armnn/IRuntime.hpp>
#include <armnn/INetwork.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#include <set>
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 0ca4fc4764..56032adc33 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -691,7 +691,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
auto inputLayer = boost::polymorphic_downcast<const InputLayer*>(layer);
BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
}
@@ -700,7 +700,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
auto outputLayer = boost::polymorphic_downcast<const OutputLayer*>(layer);
BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
}
@@ -709,7 +709,7 @@ BOOST_AUTO_TEST_CASE(BackendHintTest)
const ActivationDescriptor& activationDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(activationDescriptor, name);
+ IgnoreUnused(activationDescriptor, name);
auto activation = boost::polymorphic_downcast<const ActivationLayer*>(layer);
BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
}
diff --git a/src/armnn/test/OptionalTest.cpp b/src/armnn/test/OptionalTest.cpp
index fd136439c8..73c96431fb 100644
--- a/src/armnn/test/OptionalTest.cpp
+++ b/src/armnn/test/OptionalTest.cpp
@@ -7,19 +7,19 @@
#include <armnn/Optional.hpp>
#include <string>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace
{
void PassStringRef(armnn::Optional<std::string&> value)
{
- boost::ignore_unused(value);
+ armnn::IgnoreUnused(value);
}
void PassStringRefWithDefault(armnn::Optional<std::string&> value = armnn::EmptyOptional())
{
- boost::ignore_unused(value);
+ armnn::IgnoreUnused(value);
}
} // namespace <anonymous>
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index a052862bdd..9376fa4cea 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -5,6 +5,7 @@
#include <armnn/IRuntime.hpp>
#include <armnn/TypesUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/test/tools/output_test_stream.hpp>
@@ -309,7 +310,7 @@ BOOST_AUTO_TEST_CASE(ProfilerJsonPrinter)
profiler->Print(json);
std::string output = buffer.str();
- boost::ignore_unused(output);
+ armnn::IgnoreUnused(output);
// Disable profiling here to not print out anything on stdout.
profiler->EnableProfiling(false);
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 2dc054af07..ef9b2da782 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -3,15 +3,6 @@
// SPDX-License-Identifier: MIT
//
-#include <armnn/INetwork.hpp>
-#include <armnn/LayerVisitorBase.hpp>
-#include <armnn/Tensor.hpp>
-#include <armnn/Types.hpp>
-
-#include <armnnQuantizer/INetworkQuantizer.hpp>
-
-#include <QuantizeHelper.hpp>
-
#include "../Graph.hpp"
#include "../Network.hpp"
#include "../NetworkQuantizerUtils.hpp"
@@ -19,7 +10,14 @@
#include "../RangeTracker.hpp"
#include "../../armnnQuantizer/CommandLineProcessor.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/INetwork.hpp>
+#include <armnn/LayerVisitorBase.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/Types.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnnQuantizer/INetworkQuantizer.hpp>
+#include <QuantizeHelper.hpp>
+
#include <boost/test/unit_test.hpp>
#include <unordered_map>
@@ -58,7 +56,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST(m_InputShape == info.GetShape());
// Based off current default [-15.0f, 15.0f]
@@ -72,7 +70,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(m_OutputShape == info.GetShape());
}
@@ -116,7 +114,7 @@ protected:
const OffsetScalePair& params,
DataType dataType = DataType::QAsymmU8)
{
- boost::ignore_unused(dataType);
+ IgnoreUnused(dataType);
TestQuantizationParamsImpl(info, dataType, params.first, params.second);
}
@@ -212,7 +210,7 @@ public:
void VisitAdditionLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-20.0f, 20.0f]
@@ -282,7 +280,7 @@ public:
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
@@ -385,7 +383,7 @@ BOOST_AUTO_TEST_CASE(InputOutputLayerDynamicQuant)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
std::string(armnn::GetDataTypeName(info.GetDataType()))
@@ -543,7 +541,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 3.5f]
@@ -599,7 +597,7 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-1.0f, 1.0f]
@@ -654,7 +652,7 @@ public:
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-5.0f, 15.0f]
@@ -725,7 +723,7 @@ BOOST_AUTO_TEST_CASE(QuantizeELuActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
@@ -779,7 +777,7 @@ BOOST_AUTO_TEST_CASE(QuantizeHardSwishActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
@@ -839,7 +837,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
const ConstTensor& gamma,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
@@ -924,7 +922,7 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace)
const DepthToSpaceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1116,7 +1114,7 @@ void ValidateFullyConnectedLayer(const bool biasEnabled)
const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -1173,7 +1171,7 @@ void TestQuantizeConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(convolution2dDescriptor, name);
+ IgnoreUnused(convolution2dDescriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -1259,7 +1257,7 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(convolution2dDescriptor, name);
+ IgnoreUnused(convolution2dDescriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -1343,7 +1341,7 @@ BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
const InstanceNormalizationDescriptor& descriptor,
const char* name = nullptr)
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1411,7 +1409,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax)
const SoftmaxDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -1503,7 +1501,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax)
const SoftmaxDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 1.0f]
@@ -1636,7 +1634,7 @@ BOOST_AUTO_TEST_CASE(QuantizePermute)
const PermuteDescriptor& desc,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1691,7 +1689,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch)
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(spaceToBatchNdDescriptor, name);
+ IgnoreUnused(spaceToBatchNdDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1804,7 +1802,7 @@ BOOST_AUTO_TEST_CASE(QuantizePooling2d)
const Pooling2dDescriptor& desc,
const char* name = nullptr) override
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1873,7 +1871,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant)
const ConstTensor& input,
const char* name = nullptr) override
{
- boost::ignore_unused(input, name);
+ IgnoreUnused(input, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
@@ -1946,20 +1944,20 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitArgMinMaxLayer(const IConnectableLayer* layer,
const ArgMinMaxDescriptor& argMinMaxDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(argMinMaxDescriptor, name);
+ IgnoreUnused(argMinMaxDescriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
@@ -2034,7 +2032,7 @@ BOOST_AUTO_TEST_CASE(QuantizeComparison)
const ComparisonDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -2106,19 +2104,19 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(originsDescriptor, name);
+ IgnoreUnused(originsDescriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(
outputInfo, {60.8f / g_AsymmU8QuantizationBase, 65},
@@ -2214,7 +2212,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReshape)
const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(reshapeDescriptor, name);
+ IgnoreUnused(reshapeDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2269,7 +2267,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter)
const SplitterDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2325,7 +2323,7 @@ BOOST_AUTO_TEST_CASE(QuantizeResize)
const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(resizeDescriptor, name);
+ IgnoreUnused(resizeDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2382,7 +2380,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStridedSlice)
const StridedSliceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2437,7 +2435,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace)
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(batchToSpaceNdDescriptor, name);
+ IgnoreUnused(batchToSpaceNdDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2499,7 +2497,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
switch (id)
@@ -2526,7 +2524,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(m_OutputShape == info.GetShape());
}
@@ -2534,7 +2532,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
void VisitPreluLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(info,
{ 30.0f / g_AsymmU8QuantizationBase, 128 }, // QASymmU8
@@ -2617,7 +2615,7 @@ void TestQuantizeTransposeConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -2704,20 +2702,20 @@ BOOST_AUTO_TEST_CASE(QuantizeStack)
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, id, name);
+ IgnoreUnused(layer, id, name);
}
void VisitStackLayer(const IConnectableLayer* layer,
const StackDescriptor& descriptor,
const char* name = nullptr) override
{
- boost::ignore_unused(descriptor, name);
+ IgnoreUnused(descriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
@@ -2784,7 +2782,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSlice)
const SliceDescriptor& desc,
const char* name = nullptr)
{
- boost::ignore_unused(desc, name);
+ IgnoreUnused(desc, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymmU8Params{ 30.0f / g_AsymmU8QuantizationBase, 128 };
@@ -2876,7 +2874,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
BOOST_TEST(m_InputShape == info.GetShape());
@@ -2886,7 +2884,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
- boost::ignore_unused(id, name);
+ IgnoreUnused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
BOOST_TEST(m_OutputShape == info.GetShape());
@@ -2895,14 +2893,14 @@ public:
void VisitQuantizeLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, name);
+ IgnoreUnused(layer, name);
m_VisitedQuantizeLayer = true;
}
void VisitDequantizeLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
- boost::ignore_unused(layer, name);
+ IgnoreUnused(layer, name);
m_VisitedDequantizeLayer = true;
}
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index d9ed18bdd5..e3cbe03c62 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -158,8 +158,8 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
// These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning.
- boost::ignore_unused(dubious);
- boost::ignore_unused(suppressed);
+ IgnoreUnused(dubious);
+ IgnoreUnused(suppressed);
}
#endif // WITH_VALGRIND
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 3e59c0b604..976e58eb50 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -16,10 +16,11 @@
#include <Network.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <vector>
#include <string>
-#include <boost/core/ignore_unused.hpp>
using namespace armnn;
@@ -44,20 +45,20 @@ public:
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
{
- boost::ignore_unused(tensorInfo);
+ IgnoreUnused(tensorInfo);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const override
{
- boost::ignore_unused(tensorInfo, dataLayout);
+ IgnoreUnused(tensorInfo, dataLayout);
return nullptr;
}
@@ -85,20 +86,20 @@ public:
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override
{
- boost::ignore_unused(tensorInfo);
+ IgnoreUnused(tensorInfo);
return nullptr;
}
std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const override
{
- boost::ignore_unused(tensorInfo, dataLayout);
+ IgnoreUnused(tensorInfo, dataLayout);
return nullptr;
}
@@ -123,7 +124,7 @@ public:
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
@@ -164,7 +165,7 @@ public:
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
@@ -202,7 +203,7 @@ public:
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
@@ -239,7 +240,7 @@ public:
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager = nullptr) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index ed4605b2af..1f7c360d51 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -13,13 +13,13 @@
#include <armnnUtils/Permute.hpp>
#include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <ParserHelper.hpp>
#include <VerificationHelpers.hpp>
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/format.hpp>
@@ -743,7 +743,7 @@ INetworkPtr Deserializer::CreateNetworkFromGraph(GraphPtr graph)
BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerIndex,
const std::string& name) const
{
- boost::ignore_unused(layerIndex);
+ IgnoreUnused(layerIndex);
for (auto inputBinding : m_InputBindings)
{
if (inputBinding.first == name)
@@ -761,7 +761,7 @@ BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerInde
BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerIndex,
const std::string& name) const
{
- boost::ignore_unused(layerIndex);
+ IgnoreUnused(layerIndex);
for (auto outputBinding : m_OutputBindings)
{
if (outputBinding.first == name)
@@ -1805,7 +1805,7 @@ void Deserializer::ParsePermute(GraphPtr graph, unsigned int layerIndex)
armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::PoolingDescriptor pooling2dDesc,
unsigned int layerIndex)
{
- boost::ignore_unused(layerIndex);
+ IgnoreUnused(layerIndex);
armnn::Pooling2dDescriptor desc;
switch (pooling2dDesc->poolType())
@@ -2157,7 +2157,7 @@ armnn::NormalizationDescriptor Deserializer::GetNormalizationDescriptor(
Deserializer::NormalizationDescriptorPtr normalizationDescriptor,
unsigned int layerIndex)
{
- boost::ignore_unused(layerIndex);
+ IgnoreUnused(layerIndex);
armnn::NormalizationDescriptor desc;
switch (normalizationDescriptor->normChannelType())
diff --git a/src/armnnDeserializer/test/DeserializeAdd.cpp b/src/armnnDeserializer/test/DeserializeAdd.cpp
index 325bb6e1c3..4f29189ccc 100644
--- a/src/armnnDeserializer/test/DeserializeAdd.cpp
+++ b/src/armnnDeserializer/test/DeserializeAdd.cpp
@@ -7,7 +7,7 @@
#include "ParserFlatbuffersSerializeFixture.hpp"
#include "../Deserializer.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <string>
#include <iostream>
@@ -22,7 +22,7 @@ struct AddFixture : public ParserFlatbuffersSerializeFixture
const std::string & dataType,
const std::string & activation="NONE")
{
- boost::ignore_unused(activation);
+ armnn::IgnoreUnused(activation);
m_JsonString = R"(
{
inputIds: [0, 1],
diff --git a/src/armnnDeserializer/test/DeserializeMultiplication.cpp b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
index c0bb13e17b..8198001f58 100644
--- a/src/armnnDeserializer/test/DeserializeMultiplication.cpp
+++ b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
@@ -3,11 +3,12 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "ParserFlatbuffersSerializeFixture.hpp"
#include "../Deserializer.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <boost/test/unit_test.hpp>
#include <string>
#include <iostream>
@@ -22,7 +23,7 @@ struct MultiplicationFixture : public ParserFlatbuffersSerializeFixture
const std::string & dataType,
const std::string & activation="NONE")
{
- boost::ignore_unused(activation);
+ armnn::IgnoreUnused(activation);
m_JsonString = R"(
{
inputIds: [0, 1],
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index de7fe5cb5e..91d07f304a 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -6,21 +6,20 @@
#pragma once
#include "SchemaSerialize.hpp"
+#include "test/TensorHelpers.hpp"
+
+#include "flatbuffers/idl.h"
+#include "flatbuffers/util.h"
+#include <ArmnnSchema_generated.h>
#include <armnn/IRuntime.hpp>
#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <ResolveType.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
-#include <ResolveType.hpp>
-#include "test/TensorHelpers.hpp"
-
-#include "flatbuffers/idl.h"
-#include "flatbuffers/util.h"
-
-#include <ArmnnSchema_generated.h>
using armnnDeserializer::IDeserializer;
using TensorRawPtr = armnnSerializer::TensorInfo*;
@@ -155,7 +154,7 @@ struct ParserFlatbuffersSerializeFixture
armnnSerializer::TensorInfo tensorType, const std::string& name,
const float scale, const int64_t zeroPoint)
{
- boost::ignore_unused(name);
+ armnn::IgnoreUnused(name);
BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(),
tensors->dimensions()->begin(), tensors->dimensions()->end());
diff --git a/src/armnnQuantizer/QuantizationDataSet.cpp b/src/armnnQuantizer/QuantizationDataSet.cpp
index 9694342fe8..7042d74d00 100644
--- a/src/armnnQuantizer/QuantizationDataSet.cpp
+++ b/src/armnnQuantizer/QuantizationDataSet.cpp
@@ -8,7 +8,8 @@
#define BOOST_FILESYSTEM_NO_DEPRECATED
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
@@ -52,7 +53,7 @@ void InputLayerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer,
armnn::LayerBindingId id,
const char* name)
{
- boost::ignore_unused(name);
+ armnn::IgnoreUnused(name);
m_TensorInfos.emplace(id, layer->GetOutputSlot(0).GetTensorInfo());
}
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 39df0c2a7f..47b5d052f5 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -8,10 +8,10 @@
#include <armnn/Descriptors.hpp>
#include <armnn/LstmParams.hpp>
#include <armnn/QuantizedLstmParams.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <iostream>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <flatbuffers/util.h>
@@ -86,7 +86,7 @@ uint32_t SerializerVisitor::GetSerializedId(armnn::LayerGuid guid)
// Build FlatBuffer for Input Layer
void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
@@ -108,7 +108,7 @@ void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, L
// Build FlatBuffer for Output Layer
void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
@@ -128,7 +128,7 @@ void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
@@ -140,7 +140,7 @@ void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* lay
const armnn::ActivationDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
@@ -163,7 +163,7 @@ void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* lay
// Build FlatBuffer for Addition Layer
void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
@@ -180,7 +180,7 @@ void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *laye
const armnn::ArgMinMaxDescriptor& descriptor,
const char *name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
@@ -203,7 +203,7 @@ void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer*
const armnn::BatchToSpaceNdDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
@@ -237,7 +237,7 @@ void SerializerVisitor::VisitBatchNormalizationLayer(const armnn::IConnectableLa
const armnn::ConstTensor& gamma,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
@@ -264,7 +264,7 @@ void SerializerVisitor::VisitComparisonLayer(const armnn::IConnectableLayer* lay
const armnn::ComparisonDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
auto fbDescriptor = serializer::CreateComparisonDescriptor(
@@ -280,7 +280,7 @@ void SerializerVisitor::VisitConstantLayer(const armnn::IConnectableLayer* layer
const armnn::ConstTensor& input,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
@@ -303,7 +303,7 @@ void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer*
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
@@ -342,7 +342,7 @@ void SerializerVisitor::VisitDepthToSpaceLayer(const armnn::IConnectableLayer* l
const armnn::DepthToSpaceDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
@@ -360,7 +360,7 @@ void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectab
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
@@ -394,7 +394,7 @@ void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectab
void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
@@ -407,7 +407,7 @@ void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectable
const armnn::ConstTensor& anchors,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
@@ -435,7 +435,7 @@ void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectable
void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
@@ -447,7 +447,7 @@ void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLaye
const armnn::ElementwiseUnaryDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ElementwiseUnary);
auto fbDescriptor = serializer::CreateElementwiseUnaryDescriptor(
@@ -460,7 +460,7 @@ void SerializerVisitor::VisitElementwiseUnaryLayer(const armnn::IConnectableLaye
void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
@@ -470,7 +470,7 @@ void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, c
void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
@@ -480,7 +480,7 @@ void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, c
void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer);
@@ -490,7 +490,7 @@ void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
@@ -503,7 +503,7 @@ void SerializerVisitor::VisitInstanceNormalizationLayer(
const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
m_flatBufferBuilder,
@@ -522,7 +522,7 @@ void SerializerVisitor::VisitL2NormalizationLayer(const armnn::IConnectableLayer
const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
@@ -543,7 +543,7 @@ void SerializerVisitor::VisitLogSoftmaxLayer(const armnn::IConnectableLayer* lay
const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
@@ -568,7 +568,7 @@ void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer,
const armnn::LstmInputParams& params,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
@@ -673,7 +673,7 @@ void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
@@ -685,7 +685,7 @@ void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer,
const armnn::MeanDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
@@ -701,7 +701,7 @@ void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
@@ -711,7 +711,7 @@ void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
@@ -730,7 +730,7 @@ void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer,
const armnn::ConcatDescriptor& concatDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
@@ -763,7 +763,7 @@ void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
@@ -776,7 +776,7 @@ void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer,
const armnn::PadDescriptor& padDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
@@ -802,7 +802,7 @@ void SerializerVisitor::VisitPermuteLayer(const armnn::IConnectableLayer* layer,
const armnn::PermuteDescriptor& permuteDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
@@ -830,7 +830,7 @@ void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer,
const armnn::ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
@@ -856,7 +856,7 @@ void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer*
const armnn::ResizeBilinearDescriptor& resizeDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
@@ -877,7 +877,7 @@ void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
const armnn::ResizeDescriptor& resizeDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
@@ -897,7 +897,7 @@ void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
@@ -909,7 +909,7 @@ void SerializerVisitor::VisitSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::SliceDescriptor& sliceDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
@@ -926,7 +926,7 @@ void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
const armnn::SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
@@ -948,7 +948,7 @@ void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* laye
const armnn::Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
@@ -976,7 +976,7 @@ void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* laye
void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
@@ -990,7 +990,7 @@ void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
@@ -1005,7 +1005,7 @@ void SerializerVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer*
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
@@ -1042,7 +1042,7 @@ void SerializerVisitor::VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer*
const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
@@ -1073,7 +1073,7 @@ void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* l
const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
auto flatBufferDescriptor =
@@ -1093,7 +1093,7 @@ void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer
const armnn::ViewsDescriptor& viewsDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer ViewOrigins
std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
@@ -1159,7 +1159,7 @@ void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer*
const armnn::NormalizationDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
@@ -1184,7 +1184,7 @@ void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
const armnn::StackDescriptor& stackDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
@@ -1207,7 +1207,7 @@ void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer,
const armnn::StandInDescriptor& standInDescriptor,
const char *name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
standInDescriptor.m_NumInputs,
@@ -1223,7 +1223,7 @@ void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* l
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
@@ -1248,7 +1248,7 @@ void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* l
void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
@@ -1258,7 +1258,7 @@ void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* la
void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
@@ -1273,7 +1273,7 @@ void SerializerVisitor::VisitTransposeConvolution2dLayer(
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
@@ -1307,7 +1307,7 @@ void SerializerVisitor::VisitTransposeLayer(const armnn::IConnectableLayer* laye
const armnn::TransposeDescriptor& descriptor,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Transpose);
@@ -1334,7 +1334,7 @@ void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer*
const armnn::QuantizedLstmInputParams& params,
const char* name)
{
- boost::ignore_unused(name);
+ IgnoreUnused(name);
auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp
index 0362412d0c..abc63ae64d 100644
--- a/src/armnnSerializer/test/ActivationSerializationTests.cpp
+++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp
@@ -3,18 +3,18 @@
// SPDX-License-Identifier: MIT
//
+#include "../Serializer.hpp"
+
#include <armnn/Descriptors.hpp>
#include <armnn/INetwork.hpp>
#include <armnn/IRuntime.hpp>
#include <armnnDeserializer/IDeserializer.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include "../Serializer.hpp"
+#include <boost/test/unit_test.hpp>
#include <sstream>
-#include <boost/core/ignore_unused.hpp>
-#include <boost/test/unit_test.hpp>
-
BOOST_AUTO_TEST_SUITE(SerializerTests)
class VerifyActivationName : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
@@ -24,7 +24,7 @@ public:
const armnn::ActivationDescriptor& activationDescriptor,
const char* name) override
{
- boost::ignore_unused(layer, activationDescriptor);
+ IgnoreUnused(layer, activationDescriptor);
BOOST_TEST(name == "activation");
}
};
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 5a5274a90f..eab9f4ea30 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -9,7 +9,7 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
-#include <boost/filesystem.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
// armnnUtils:
#include <armnnUtils/Permute.hpp>
@@ -22,10 +22,10 @@
#include <flatbuffers/flexbuffers.h>
-#include <boost/core/ignore_unused.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
+#include <boost/filesystem.hpp>
#include <fstream>
#include <algorithm>
@@ -426,7 +426,7 @@ CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
armnn::TensorInfo& tensorInfo,
armnn::Optional<armnn::PermutationVector&> permutationVector)
{
- boost::ignore_unused(tensorPtr);
+ IgnoreUnused(tensorPtr);
BOOST_ASSERT_MSG(tensorPtr != nullptr, "tensorPtr is null");
BOOST_ASSERT_MSG(bufferPtr != nullptr,
boost::str(
@@ -1827,7 +1827,7 @@ void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, A
{
CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
- boost::ignore_unused(operatorPtr);
+ IgnoreUnused(operatorPtr);
auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
CHECK_VALID_SIZE(inputs.size(), 1);
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index 13833314fd..793bd0e233 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -11,6 +11,7 @@
#include <armnnUtils/Permute.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Transpose.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <GraphTopologicalSort.hpp>
#include <ParserHelper.hpp>
@@ -21,7 +22,6 @@
#include <tensorflow/core/framework/graph.pb.h>
#include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/polymorphic_cast.hpp>
@@ -732,7 +732,7 @@ IConnectableLayer* TfParser::CreateAdditionLayer(
ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
if (numberOfInputs < 2)
{
@@ -812,7 +812,7 @@ ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, con
ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
// If one of the inputs is a MatMul and the other is a const, then we handle both nodes
@@ -842,7 +842,7 @@ ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, cons
ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
return AddAdditionLayer(nodeDef, true);
}
@@ -873,7 +873,7 @@ private:
ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
// Any requests for the output slots of this node should be forwarded to the node connected as input.
return std::make_unique<ParsedIdentityTfOperation>(this, nodeDef, inputs[0].m_IndexedValue);
@@ -1067,7 +1067,7 @@ struct InvokeParseFunction
ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
BOOST_ASSERT(nodeDef.op() == "Const");
if (nodeDef.attr().count("value") == 0)
@@ -1204,7 +1204,7 @@ unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOper
ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -1346,7 +1346,7 @@ ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -1542,7 +1542,7 @@ TensorInfo OutputShapeOfExpandDims(const tensorflow::NodeDef& nodeDef, TensorInf
ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1563,7 +1563,7 @@ ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDe
ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
if (!HasParsedConstTensor<float>(inputs[1].m_IndexedValue->GetNode().name()))
@@ -1712,7 +1712,7 @@ bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef
ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
if (inputs.size() != 2)
{
@@ -1850,7 +1850,7 @@ ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& params = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
IOutputSlot& indices = inputs[1].m_IndexedValue->ResolveArmnnOutputSlot(inputs[1].m_Index);
@@ -1887,7 +1887,7 @@ ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
@@ -1901,7 +1901,7 @@ ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
@@ -1915,7 +1915,7 @@ ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
IOutputSlot* input0Slot = inputLayers.first;
IOutputSlot* input1Slot = inputLayers.second;
@@ -1927,7 +1927,7 @@ ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot* input0Slot = &inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -1967,7 +1967,7 @@ ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, cons
ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
unsigned int numInputs = static_cast<unsigned int>(nodes.size());
@@ -2058,7 +2058,7 @@ ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, co
ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
auto inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
const auto inputCount = inputs.size();
@@ -2157,7 +2157,7 @@ TensorInfo CalculatePaddedOutputTensorInfo(const TensorInfo& inputTensorInfo,
ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
// input consists of:
// input[0] the tensor which will be padded
// input[1] the tensor holding the padding values
@@ -2232,7 +2232,7 @@ ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
// In tensorflow, we have the last input of the Concat layer as the axis for concatenation.
@@ -2318,7 +2318,7 @@ ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
// Note: the Shape layer is handled in a special way, because:
// 1. ARMNN doesn't support int32 tensors which it outputs.
// 2. ARMNN works with statically shaped tensors which are known at parse time.
@@ -2361,7 +2361,7 @@ ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
ParsedTfOperation* inputNode = inputs[0].m_IndexedValue;
@@ -2400,7 +2400,7 @@ ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
if (!HasParsedConstTensor<int32_t>(inputs[1].m_IndexedValue->GetNode().name()))
@@ -2539,7 +2539,7 @@ TensorInfo OutputShapeOfSqueeze(const tensorflow::NodeDef& nodeDef, TensorInfo i
ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& prevLayerOutputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
@@ -2559,7 +2559,7 @@ ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
NormalizationDescriptor normalizationDescriptor;
@@ -2605,7 +2605,7 @@ public:
ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
// Defers the creation of the layer (see ParsedMatMulTfOperation).
return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
@@ -2613,7 +2613,7 @@ ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, c
ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
TensorInfo inputTensorInfo = inputSlot.GetTensorInfo();
@@ -2688,7 +2688,7 @@ public:
ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
}
@@ -2696,7 +2696,7 @@ ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, cons
ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 0);
@@ -2725,14 +2725,14 @@ ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeD
ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
return AddRealDivLayer(nodeDef);
}
ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::ReLu;
@@ -2742,7 +2742,7 @@ ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::BoundedReLu;
@@ -2755,7 +2755,7 @@ ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::Sigmoid;
@@ -2766,7 +2766,7 @@ ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
const tensorflow::GraphDef &graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
@@ -2783,7 +2783,7 @@ ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
@@ -2800,7 +2800,7 @@ ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
unsigned int numInputs = static_cast<unsigned int>(nodes.size());
@@ -2895,7 +2895,7 @@ ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::SoftReLu;
@@ -2906,7 +2906,7 @@ ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
unsigned int numInputs = static_cast<unsigned int>(nodes.size());
@@ -2953,7 +2953,7 @@ ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& node
ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
ActivationDescriptor activationDesc;
activationDesc.m_Function = ActivationFunction::TanH;
@@ -2991,7 +2991,7 @@ ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
{
- boost::ignore_unused(graphDef);
+ IgnoreUnused(graphDef);
std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
IOutputSlot& inputSlot = inputs[0].m_IndexedValue->ResolveArmnnOutputSlot(inputs[0].m_Index);
diff --git a/src/armnnTfParser/test/Split.cpp b/src/armnnTfParser/test/Split.cpp
index d53ae672eb..eeef90a625 100644
--- a/src/armnnTfParser/test/Split.cpp
+++ b/src/armnnTfParser/test/Split.cpp
@@ -3,10 +3,13 @@
// SPDX-License-Identifier: MIT
//
-#include <boost/test/unit_test.hpp>
#include "armnnTfParser/ITfParser.hpp"
#include "ParserPrototxtFixture.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
+#include <boost/test/unit_test.hpp>
+
BOOST_AUTO_TEST_SUITE(TensorflowParser)
struct SplitFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
@@ -176,7 +179,7 @@ BOOST_FIXTURE_TEST_CASE(ParseSplit, InputFirstSplitFixture)
struct SplitLastDimFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
SplitLastDimFixture(bool withDimZero=false) {
- boost::ignore_unused(withDimZero);
+ armnn::IgnoreUnused(withDimZero);
m_Prototext = R"(
node {
name: "Placeholder"
diff --git a/src/armnnUtils/QuantizeHelper.hpp b/src/armnnUtils/QuantizeHelper.hpp
index 061c459156..6fd13fda98 100644
--- a/src/armnnUtils/QuantizeHelper.hpp
+++ b/src/armnnUtils/QuantizeHelper.hpp
@@ -5,6 +5,7 @@
#pragma once
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
#include <Half.hpp>
@@ -13,7 +14,6 @@
#include <iterator>
#include <vector>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnnUtils
@@ -38,13 +38,13 @@ struct SelectiveQuantizer<T, false>
{
static T Quantize(float value, float scale, int32_t offset)
{
- boost::ignore_unused(scale, offset);
+ armnn::IgnoreUnused(scale, offset);
return value;
}
static float Dequantize(T value, float scale, int32_t offset)
{
- boost::ignore_unused(scale, offset);
+ armnn::IgnoreUnused(scale, offset);
return value;
}
};
@@ -54,13 +54,13 @@ struct SelectiveQuantizer<armnn::Half, false>
{
static armnn::Half Quantize(float value, float scale, int32_t offset)
{
- boost::ignore_unused(scale, offset);
+ armnn::IgnoreUnused(scale, offset);
return armnn::Half(value);
}
static float Dequantize(armnn::Half value, float scale, int32_t offset)
{
- boost::ignore_unused(scale, offset);
+ armnn::IgnoreUnused(scale, offset);
return value;
}
};
diff --git a/src/armnnUtils/test/QuantizeHelperTest.cpp b/src/armnnUtils/test/QuantizeHelperTest.cpp
index 7e781d0b5d..410fdfa715 100644
--- a/src/armnnUtils/test/QuantizeHelperTest.cpp
+++ b/src/armnnUtils/test/QuantizeHelperTest.cpp
@@ -4,8 +4,8 @@
//
#include <QuantizeHelper.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#include <vector>
@@ -18,7 +18,7 @@ namespace
template<typename T>
bool IsFloatIterFunc(T iter)
{
- boost::ignore_unused(iter);
+ armnn::IgnoreUnused(iter);
return armnnUtils::IsFloatingPointIterator<T>::value;
}
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/CpuTensorHandle.cpp
index de83048340..65e6c47179 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.cpp
@@ -3,6 +3,7 @@
// SPDX-License-Identifier: MIT
//
#include <armnn/Exceptions.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
diff --git a/src/backends/backendsCommon/LayerSupportBase.cpp b/src/backends/backendsCommon/LayerSupportBase.cpp
index 127913447c..e8ef46ecd8 100644
--- a/src/backends/backendsCommon/LayerSupportBase.cpp
+++ b/src/backends/backendsCommon/LayerSupportBase.cpp
@@ -10,7 +10,7 @@
#include <backendsCommon/LayerSupportBase.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace
{
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 250a10a61c..7ef140e453 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -31,9 +31,9 @@ struct MakeWorkloadForType<NullWorkload>
const WorkloadInfo& info,
Args&&... args)
{
- boost::ignore_unused(descriptor);
- boost::ignore_unused(info);
- boost::ignore_unused(args...);
+ IgnoreUnused(descriptor);
+ IgnoreUnused(info);
+ IgnoreUnused(args...);
return nullptr;
}
};
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 92ef2d224f..66056db4ca 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -55,11 +55,11 @@ void CopyTensorContentsGeneric(const ITensorHandle* srcTensor, ITensorHandle* ds
TensorShape srcStrides = srcTensor->GetStrides();
const TensorShape& srcShape = srcTensor->GetShape();
const auto srcSize = srcTensor->GetStrides()[0] * srcShape[0];
- boost::ignore_unused(srcSize); // Only used for asserts
+ IgnoreUnused(srcSize); // Only used for asserts
TensorShape dstStrides = dstTensor->GetStrides();
const TensorShape& dstShape = dstTensor->GetShape();
const auto dstSize = dstTensor->GetStrides()[0] * dstShape[0];
- boost::ignore_unused(dstSize); // Only used for asserts
+ IgnoreUnused(dstSize); // Only used for asserts
size_t srcDepth = 1;
size_t srcBatches = 1;
diff --git a/src/backends/backendsCommon/test/BackendProfilingTests.cpp b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
index 455533699d..b9e0e45c0a 100644
--- a/src/backends/backendsCommon/test/BackendProfilingTests.cpp
+++ b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
@@ -14,6 +14,7 @@
#include "ProfilingUtils.hpp"
#include "RequestCounterDirectoryCommandHandler.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/BackendId.hpp>
#include <armnn/Logging.hpp>
#include <armnn/profiling/ISendTimelinePacket.hpp>
@@ -56,7 +57,7 @@ public:
/// Create and write a CounterDirectoryPacket from the parameters to the buffer.
virtual void SendCounterDirectoryPacket(const ICounterDirectory& counterDirectory)
{
- boost::ignore_unused(counterDirectory);
+ armnn::IgnoreUnused(counterDirectory);
}
/// Create and write a PeriodicCounterCapturePacket from the parameters to the buffer.
@@ -69,8 +70,8 @@ public:
virtual void SendPeriodicCounterSelectionPacket(uint32_t capturePeriod,
const std::vector<uint16_t>& selectedCounterIds)
{
- boost::ignore_unused(capturePeriod);
- boost::ignore_unused(selectedCounterIds);
+ armnn::IgnoreUnused(capturePeriod);
+ armnn::IgnoreUnused(selectedCounterIds);
}
std::vector<Timestamp> GetTimestamps()
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 395a63d6e6..15608ccdd8 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -8,7 +8,7 @@
#include <backendsCommon/WorkloadFactory.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace
{
@@ -414,7 +414,7 @@ struct LayerTypePolicy<armnn::LayerType::name, DataType> \
static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
unsigned int nIn, unsigned int nOut) \
{ \
- boost::ignore_unused(factory, nIn, nOut); \
+ IgnoreUnused(factory, nIn, nOut); \
return std::unique_ptr<armnn::IWorkload>(); \
} \
};
@@ -559,7 +559,7 @@ unsigned int GetNumOutputs(const armnn::Layer& layer)
template<>
unsigned int GetNumInputs<armnn::LayerType::Concat>(const armnn::Layer& layer)
{
- boost::ignore_unused(layer);
+ IgnoreUnused(layer);
return 2;
}
@@ -613,7 +613,7 @@ bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
}
catch(const armnn::InvalidArgumentException& e)
{
- boost::ignore_unused(e);
+ IgnoreUnused(e);
// This is ok since we throw InvalidArgumentException when creating the dummy workload.
return true;
}
@@ -644,12 +644,12 @@ bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
// InvalidArgumentException or UnimplementedException.
catch(const armnn::InvalidArgumentException& e)
{
- boost::ignore_unused(e);
+ IgnoreUnused(e);
return true;
}
catch(const armnn::UnimplementedException& e)
{
- boost::ignore_unused(e);
+ IgnoreUnused(e);
return true;
}
catch(const std::exception& e)
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index b2388cf45a..8d40117741 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -108,7 +108,7 @@ IBackendInternal::IBackendContextPtr MockBackend::CreateBackendContext(const IRu
IBackendInternal::IBackendProfilingContextPtr MockBackend::CreateBackendProfilingContext(
const IRuntime::CreationOptions& options, IBackendProfilingPtr& backendProfiling)
{
- boost::ignore_unused(options);
+ IgnoreUnused(options);
std::shared_ptr<armnn::MockBackendProfilingContext> context =
std::make_shared<MockBackendProfilingContext>(backendProfiling);
MockBackendProfilingService::Instance().SetProfilingContextPtr(context);
diff --git a/src/backends/backendsCommon/test/TestDynamicBackend.cpp b/src/backends/backendsCommon/test/TestDynamicBackend.cpp
index cbfe09377e..5018b4459d 100644
--- a/src/backends/backendsCommon/test/TestDynamicBackend.cpp
+++ b/src/backends/backendsCommon/test/TestDynamicBackend.cpp
@@ -7,7 +7,7 @@
#include <armnn/backends/IBackendInternal.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
constexpr const char* TestDynamicBackendId()
{
@@ -65,7 +65,7 @@ public:
}
IWorkloadFactoryPtr CreateWorkloadFactory(const IMemoryManagerSharedPtr& memoryManager) const override
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return IWorkloadFactoryPtr{};
}
ILayerSupportSharedPtr GetLayerSupport() const override
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 6993b9e9b1..319434e093 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -36,7 +36,7 @@ LayerTestResult<T, 4> BoundedReLuTestCommon(
unsigned int inputChannels,
unsigned int inputBatchSize)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = inputWidth;
unsigned int outputHeight = inputHeight;
unsigned int outputChannels = inputChannels;
@@ -245,7 +245,7 @@ boost::multi_array<float, 4> BoundedReLuRandomInputTest(
float upperBound,
const armnn::ActivationDescriptor& activationDescriptor)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
@@ -310,7 +310,7 @@ LayerTestResult<T,4> ConstantLinearActivationTestCommon(
float qScale = 0.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int inputHeight = 20;
unsigned int inputWidth = 17;
unsigned int inputChannels = 3;
@@ -402,7 +402,7 @@ LayerTestResult<T, 4> SimpleActivationTest(
int32_t outOffset,
const std::vector<float>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
constexpr static unsigned int inputWidth = 16u;
constexpr static unsigned int inputHeight = 1u;
constexpr static unsigned int inputChannels = 1u;
@@ -793,7 +793,7 @@ LayerTestResult<float, 5> SqrtNNTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const int inputDataSize = 120;
std::vector<float> inputData(inputDataSize);
@@ -1148,7 +1148,7 @@ LayerTestResult<T,4> CompareActivationTestImpl(
float qScale = 0.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int width = 17;
unsigned int height = 29;
unsigned int channels = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
index 82dc59b66b..bfe0282dc4 100644
--- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
@@ -165,7 +165,7 @@ LayerTestResult<T, 4> AdditionBroadcastTestImpl(
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
@@ -248,7 +248,7 @@ LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
@@ -455,7 +455,7 @@ LayerTestResult<float, 4> AdditionAfterMaxPoolTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
// Create Initial Tensor
// 1, 2, 3
@@ -563,7 +563,7 @@ LayerTestResult<float,4> CompareAdditionTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int batchSize = 4;
unsigned int channels = 1;
unsigned int height = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
index 7bfccd64d2..20dcef5dd4 100644
--- a/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
@@ -26,7 +26,7 @@ LayerTestResult<int32_t, 3> ArgMinMaxTestCommon(
const std::vector<int32_t>& outputData,
int axis = 3)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(inputData, inputTensorInfo));
LayerTestResult<int32_t, 3> result(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index f64b06d84b..48f7257a2e 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -8,7 +8,7 @@
#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
@@ -36,7 +36,7 @@ LayerTestResult<T, 4> BatchNormTestImpl(
int32_t qOffset,
armnn::DataLayout dataLayout)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
@@ -115,7 +115,7 @@ LayerTestResult<T,4> BatchNormTestNhwcImpl(
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 2;
const unsigned int height = 3;
@@ -589,7 +589,7 @@ LayerTestResult<float,4> CompareBatchNormTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 2;
const unsigned int height = 3;
const unsigned int channels = 5;
diff --git a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
index 12413667b9..2ba3a0c68c 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchToSpaceNdTestImpl.hpp
@@ -39,7 +39,7 @@ LayerTestResult<T, OutputDim> BatchToSpaceNdHelper(
float scale = 1.0f,
int32_t offset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 9f9944d6db..2156b0ee9e 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -43,7 +43,7 @@ LayerTestResult<uint8_t, NumDims> ComparisonTestImpl(
float outQuantScale,
int outQuantOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 9d590e3feb..f6f4b09f6a 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -126,7 +126,7 @@ template<typename T> void PermuteTensorData(
const T * inputData,
std::vector<T>& outputData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
if (inputData == nullptr)
{
@@ -178,7 +178,7 @@ template<typename T> void PermuteInputsForConcat(
unsigned int & concatDim,
TensorInfo & outputTensorInfo)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
"Expecting more than one tensor to be concatenated here");
@@ -1918,7 +1918,7 @@ LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
// Defines the tensor descriptors.
TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
@@ -2073,7 +2073,7 @@ LayerTestResult<float,3> ConcatTest(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
@@ -2346,7 +2346,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
@@ -2491,7 +2491,7 @@ LayerTestResult<uint8_t, 3> ConcatUint8Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
@@ -2629,7 +2629,7 @@ LayerTestResult<uint16_t, 3> ConcatUint16Test(
IWorkloadFactory& workloadFactory,
const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int outputWidth = 3;
unsigned int outputHeight = 6;
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index d11004c069..7a8aac4686 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -28,7 +28,7 @@ LayerTestResult<T, 4> ConstantTestImpl(
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
constexpr unsigned int inputWidth = 3;
constexpr unsigned int inputHeight = 4;
constexpr unsigned int inputChannels = 3;
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 669398fb54..89cdd96e37 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -8,7 +8,7 @@
#include <QuantizeHelper.hpp>
#include <armnnUtils/TensorUtils.hpp>
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
@@ -217,7 +217,7 @@ LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
uint32_t dilationX = 1,
uint32_t dilationY = 1)
{
- boost::ignore_unused(memoryManager);
+ armnn::IgnoreUnused(memoryManager);
unsigned int inputHeight = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
unsigned int inputWidth = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
@@ -381,7 +381,7 @@ LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(
uint32_t strideX = 1,
uint32_t strideY = 1)
{
- boost::ignore_unused(qScale, qOffset);
+ armnn::IgnoreUnused(qScale, qOffset);
unsigned int inputNum = boost::numeric_cast<unsigned int>(input.shape()[0]);
unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[3]);
unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[1]);
@@ -587,7 +587,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
bool biasEnabled,
armnn::DataLayout dataLayout)
{
- boost::ignore_unused(biasEnabled);
+ armnn::IgnoreUnused(biasEnabled);
// Use common single-batch 5x5 image.
armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
@@ -640,7 +640,7 @@ LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
bool biasEnabled,
const armnn::DataLayout& dataLayout)
{
- boost::ignore_unused(biasEnabled);
+ armnn::IgnoreUnused(biasEnabled);
// Input is a single-batch, 1 channel, 5x5 image.
armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
index cea6efb549..8b3bbd82e2 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp16ToFp32TestImpl.cpp
@@ -17,7 +17,7 @@ LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
using namespace half_float::literal;
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index 9f4eecac2e..1e604719e7 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -15,7 +15,7 @@ LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
using namespace half_float::literal;
const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index 92c5d92fcc..149779b9ef 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -30,7 +30,7 @@ LayerTestResult<T, Dim> DebugTestImpl(
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index 4ddfb3089f..4d4a6bc156 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -29,7 +29,7 @@ LayerTestResult<T, 4> DepthToSpaceTestImpl(
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
{
PermuteTensorNhwcToNchw<float>(inputInfo, inputData);
diff --git a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
index 91d56bb492..0a4bdb8124 100644
--- a/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DequantizeTestImpl.cpp
@@ -26,7 +26,7 @@ LayerTestResult<T1, Dim> DequantizeTestImpl(
const std::vector<T1>& expectedOutputData,
armnn::DequantizeQueueDescriptor descriptor)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
boost::multi_array<T, Dim> input = MakeTensor<T, Dim>(inputTensorInfo, inputData);
LayerTestResult<T1, Dim> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
index 223beb49e8..2359f777b8 100644
--- a/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DivisionTestImpl.cpp
@@ -20,7 +20,7 @@ LayerTestResult<float, 4> DivisionByZeroTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 2u;
const unsigned int height = 2u;
const unsigned int channelCount = 2u;
diff --git a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
index d905bde7a0..905f97b968 100644
--- a/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FakeQuantizationTestImpl.cpp
@@ -17,7 +17,7 @@ LayerTestResult<float, 2> FakeQuantizationTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
constexpr unsigned int width = 2;
constexpr unsigned int height = 3;
diff --git a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
index ebad7fc91c..444809f69b 100644
--- a/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FloorTestImpl.cpp
@@ -16,7 +16,7 @@ LayerTestResult<T, 4> SimpleFloorTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType);
inputTensorInfo.SetQuantizationScale(0.1f);
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index 7c6122e5fa..43bcfb1d76 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -34,7 +34,7 @@ LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
bool biasEnabled,
bool transposeWeights)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
index 68410559f7..47adb22fe7 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
@@ -31,7 +31,7 @@ LayerTestResult<T, OutputDim> GatherTestImpl(
const std::vector<int32_t>& indicesData,
const std::vector<T>& outputData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto params = MakeTensor<T, ParamsDim>(paramsInfo, paramsData);
auto indices = MakeTensor<int32_t, IndicesDim>(indicesInfo, indicesData);
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index ae28bc03f3..a13198b2f9 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -34,7 +34,7 @@ LayerTestResult<T, 4> InstanceNormTestImpl(
float qScale = 0.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index e500a126f6..4d98e236d3 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -33,7 +33,7 @@ LayerTestResult<T, 4> L2NormalizationTestImpl(
const armnn::DataLayout layout,
float epsilon = 1e-12f)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType, scale, offset);
const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType, outScale, outOffset);
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 392983c311..208bed24a5 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -36,7 +36,7 @@ LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
float qScale = 1.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
LayerTestResult<T, NumDims> result(outputInfo);
result.outputExpected =
MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index b12df8a2b9..50ef5c9758 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -142,7 +142,7 @@ LstmNoCifgNoPeepholeNoProjectionTestImpl(
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
@@ -345,7 +345,7 @@ LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workl
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int batchSize = 2;
unsigned int outputSize = 16;
unsigned int inputSize = 5;
@@ -1060,7 +1060,7 @@ LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
bool cifgEnabled = true;
bool peepholeEnabled = true;
bool projectionEnabled = false;
@@ -1285,7 +1285,7 @@ LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadF
int32_t qOffset = 0,
armnn::DataType constantDataType = armnn::DataType::Float32)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int batchSize = 2;
unsigned int outputSize = 3;
unsigned int inputSize = 5;
@@ -1552,7 +1552,7 @@ LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
const boost::multi_array<uint8_t, 2>& input,
const boost::multi_array<uint8_t, 2>& outputExpected)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
diff --git a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
index 5147cffddb..0e66d9fecd 100644
--- a/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MaximumTestImpl.cpp
@@ -19,7 +19,7 @@ std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::MaximumQueueDescriptor>(
LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 2u;
const unsigned int height = 2u;
const unsigned int channelCount = 2u;
diff --git a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
index b8eae1c93e..cd7b22ea42 100644
--- a/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/MeanTestImpl.hpp
@@ -28,7 +28,7 @@ LayerTestResult<T, OutputDim> MeanTestHelper(
float scale = 1.0f,
int32_t offset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo(InputDim, inputShape, ArmnnType);
armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
index a0a4029115..ae54746ad8 100644
--- a/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MinimumTestImpl.cpp
@@ -20,7 +20,7 @@ LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int shape0[] = { 1, 2, 2, 2 };
unsigned int shape1[] = { 1, 1, 1, 1 };
diff --git a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
index d32e0cf89b..1a9cf5b173 100644
--- a/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/MultiplicationTestImpl.cpp
@@ -401,7 +401,7 @@ LayerTestResult<float,4> CompareMultiplicationTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int width = 16;
const unsigned int height = 32;
const unsigned int channelCount = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
index ef828555fa..ef3a45b391 100644
--- a/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/NormalizationTestImpl.cpp
@@ -24,7 +24,7 @@ LayerTestResult<float,4> SimpleNormalizationTestImpl(
armnn::NormalizationAlgorithmChannel normChannel,
armnn::NormalizationAlgorithmMethod normMethod)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int inputHeight = 2;
const unsigned int inputWidth = 2;
const unsigned int inputChannels = 1;
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 9239c665eb..69c651b5cd 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -24,7 +24,7 @@ LayerTestResult<T, 2> Pad2dTestCommon(
int32_t qOffset,
const float customPaddingValue)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorShape inputShape{ 3, 3 };
const armnn::TensorShape outputShape{ 7, 7 };
@@ -96,7 +96,7 @@ LayerTestResult<T, 3> Pad3dTestCommon(
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorShape inputShape{ 2, 2, 2 };
const armnn::TensorShape outputShape{ 3, 5, 6 };
@@ -180,7 +180,7 @@ LayerTestResult<T, 4> Pad4dTestCommon(
float qScale,
int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
index 9460592888..71e15334e7 100644
--- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
@@ -25,7 +25,7 @@ LayerTestResult<T, 4> SimplePermuteTestImpl(
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
LayerTestResult<T, 4> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index b58e9826b8..89e46fbdb1 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -14,6 +14,8 @@
#include <armnnUtils/DataLayoutIndexed.hpp>
#include <armnnUtils/Permute.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <backendsCommon/WorkloadInfo.hpp>
#include <backendsCommon/test/TensorCopyUtils.hpp>
@@ -38,7 +40,7 @@ LayerTestResult<T, 4> SimplePooling2dTestImpl(
const boost::multi_array<T, 4>& input,
const boost::multi_array<T, 4>& outputExpected)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
auto heightIndex = dimensionIndices.GetHeightIndex();
@@ -740,7 +742,7 @@ LayerTestResult<T, 4> ComparePooling2dTestCommon(
float qScale = 1.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const unsigned int inputWidth = 16;
const unsigned int inputHeight = 32;
const unsigned int channelCount = 2;
diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
index 7138b46f88..3b6c2d8412 100644
--- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
@@ -24,7 +24,7 @@ LayerTestResult<T, 4> PreluTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputTensorInfo ({ 1, 2, 2, 3 }, ArmnnType);
armnn::TensorInfo alphaTensorInfo ({ 1, 1, 1, 3 }, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
index e8996d4a51..673bfef678 100644
--- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
@@ -29,7 +29,7 @@ LayerTestResult<T, Dim> QuantizeTestImpl(
const std::vector<T>& expectedOutputData,
armnn::QuantizeQueueDescriptor descriptor)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
boost::multi_array<float, Dim> input = MakeTensor<float, Dim>(inputTensorInfo, inputData);
LayerTestResult<T, Dim> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
index 894ece65a5..5ed947d8c3 100644
--- a/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ReshapeTestImpl.cpp
@@ -23,7 +23,7 @@ LayerTestResult<T, NumDims> SimpleReshapeTestImpl(
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto input = MakeTensor<T, NumDims>(inputTensorInfo, inputData);
LayerTestResult<T, NumDims> ret(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
index a6e0ca19a5..e95f18b7a5 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.cpp
@@ -73,7 +73,7 @@ LayerTestResult<T, NumDims> ResizeTestImpl(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const ResizeTestParams& params)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
armnn::TensorInfo inputInfo(params.m_InputShape, ArmnnType);
armnn::TensorInfo outputInfo(params.m_OutputShape, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
index 09f15c022e..df3b6238c5 100644
--- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
@@ -29,7 +29,7 @@ LayerTestResult<T, NumDims> SliceTestImpl(
const float qScale = 1.0f,
const int qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 044589b9d2..772ae2ccc7 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -65,7 +65,7 @@ LayerTestResult<T, n> SimpleSoftmaxBaseTestImpl(
const std::vector<float>& inputData,
int axis = 1)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
using std::exp;
const float qScale = 1.f / 256.f;
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index ed35413e1e..d1bc2a950f 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -31,7 +31,7 @@ LayerTestResult<T, 4> SpaceToBatchNdTestImpl(
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::PermutationVector NCHWToNHWC = {0, 3, 1, 2};
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NHWC)
{
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
index bf2f48ca61..c6a5bbed0e 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
@@ -31,7 +31,7 @@ LayerTestResult<T, 4> SpaceToDepthTestImpl(
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::PermutationVector NHWCToNCHW = {0, 2, 3, 1};
if (descriptor.m_Parameters.m_DataLayout == armnn::DataLayout::NCHW)
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index f55aca1baa..88b18b9732 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -24,7 +24,7 @@ std::vector<LayerTestResult<T,3>> SplitterTestCommon(
float qScale = 0.0f,
int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int inputWidth = 5;
unsigned int inputHeight = 6;
unsigned int inputChannels = 3;
@@ -257,7 +257,7 @@ LayerTestResult<T, 3> CopyViaSplitterTestImpl(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale, int32_t qOffset)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
auto input = MakeTensor<T, 3>(
tensorInfo,
diff --git a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
index 45dff96ea7..eeaa846972 100644
--- a/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StackTestImpl.cpp
@@ -30,7 +30,7 @@ LayerTestResult<T, outputDimLength> StackTestHelper(
const std::vector<std::vector<T>>& inputData,
const std::vector<T>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
unsigned int numInputs = static_cast<unsigned int>(inputData.size());
std::vector<boost::multi_array<T, outputDimLength-1>> inputs;
for (unsigned int i = 0; i < numInputs; ++i)
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index 63a95b1008..b857a1b23b 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -29,7 +29,7 @@ LayerTestResult<T, OutDim> StridedSliceTestImpl(
const float qScale = 1.0f,
const int32_t qOffset = 0)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
if(armnn::IsQuantizedType<T>())
{
inputTensorInfo.SetQuantizationScale(qScale);
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index 378ec46bd1..07f52584ca 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -51,7 +51,7 @@ void TransposeConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
const TensorData<T>& weights,
const armnn::Optional<TensorData<BT>>& biases)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
using namespace armnn;
VerifyInputTensorData(input, "input");
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
index 3949dcc142..0e0f317a3e 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeTestImpl.hpp
@@ -25,7 +25,7 @@ LayerTestResult<T, 4> SimpleTransposeTestImpl(
const std::vector<T>& inputData,
const std::vector<T>& outputExpectedData)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
LayerTestResult<T, 4> ret(outputTensorInfo);
diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp
index 72c8e9fe45..f307133085 100644
--- a/src/backends/cl/ClContextControl.cpp
+++ b/src/backends/cl/ClContextControl.cpp
@@ -9,13 +9,14 @@
#include <LeakChecking.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <arm_compute/core/CL/CLKernelLibrary.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
#include <boost/assert.hpp>
#include <boost/format.hpp>
#include <boost/polymorphic_cast.hpp>
-#include <boost/core/ignore_unused.hpp>
namespace cl
{
@@ -33,7 +34,7 @@ ClContextControl::ClContextControl(arm_compute::CLTuner *tuner,
, m_ProfilingEnabled(profilingEnabled)
{
// Ignore m_ProfilingEnabled if unused to avoid compiling problems when ArmCompute is disabled.
- boost::ignore_unused(m_ProfilingEnabled);
+ IgnoreUnused(m_ProfilingEnabled);
try
{
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index d3ac98655a..cdb93d7218 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -6,14 +6,13 @@
#include "ClLayerSupport.hpp"
#include "ClBackendId.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/Descriptors.hpp>
#include <armnn/BackendRegistry.hpp>
#include <InternalTypes.hpp>
#include <LayerSupportCommon.hpp>
-#include <boost/core/ignore_unused.hpp>
-
#if defined(ARMCOMPUTECL_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
#include <aclCommon/ArmComputeTensorUtils.hpp>
@@ -63,7 +62,6 @@
#include "workloads/ClTransposeWorkload.hpp"
#endif
-using namespace boost;
namespace armnn
{
@@ -93,7 +91,7 @@ bool IsMatchingStride(uint32_t actualStride)
template<typename ... Args>
bool IsClBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
{
- boost::ignore_unused(reasonIfUnsupported, (args)...);
+ IgnoreUnused(reasonIfUnsupported, (args)...);
#if defined(ARMCOMPUTECL_ENABLED)
return true;
#else
@@ -649,7 +647,7 @@ bool ClLayerSupport::IsReshapeSupported(const TensorInfo& input,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(ClReshapeWorkloadValidate, reasonIfUnsupported, input, output);
}
@@ -728,7 +726,7 @@ bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
return IsSupportedForDataTypeCl(reasonIfUnsupported,
input.GetDataType(),
&TrueFunc<>,
@@ -756,7 +754,7 @@ bool ClLayerSupport::IsSplitterSupported(const TensorInfo& input,
*splitAxis.begin());
}
#endif
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
for (auto output : outputs)
{
if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 21c26296af..e7e4fa7e1b 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -9,6 +9,7 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
@@ -23,7 +24,6 @@
#include <arm_compute/runtime/CL/CLBufferAllocator.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
-#include <boost/core/ignore_unused.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/format.hpp>
@@ -85,7 +85,7 @@ ClWorkloadFactory::ClWorkloadFactory(const std::shared_ptr<ClMemoryManager>& mem
std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
const bool IsMemoryManaged) const
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo);
tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
@@ -96,7 +96,7 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateTensorHandle(const Tenso
DataLayout dataLayout,
const bool IsMemoryManaged) const
{
- boost::ignore_unused(IsMemoryManaged);
+ IgnoreUnused(IsMemoryManaged);
std::unique_ptr<ClTensorHandle> tensorHandle = std::make_unique<ClTensorHandle>(tensorInfo, dataLayout);
tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
@@ -131,7 +131,7 @@ std::unique_ptr<ITensorHandle> ClWorkloadFactory::CreateSubTensorHandle(ITensorH
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
@@ -279,7 +279,7 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateElementwiseUnary(const Eleme
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
@@ -308,7 +308,7 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGather(const GatherQueueDesc
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
@@ -477,7 +477,7 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateResizeBilinear(const ResizeB
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
diff --git a/src/backends/cl/OpenClTimer.cpp b/src/backends/cl/OpenClTimer.cpp
index ee3c114ba0..5f106993ec 100644
--- a/src/backends/cl/OpenClTimer.cpp
+++ b/src/backends/cl/OpenClTimer.cpp
@@ -5,10 +5,11 @@
#include "OpenClTimer.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <string>
#include <sstream>
-#include <boost/core/ignore_unused.hpp>
namespace armnn
{
@@ -31,7 +32,7 @@ void OpenClTimer::Start()
const cl_event * event_wait_list,
cl_event * event)
{
- boost::ignore_unused(event);
+ IgnoreUnused(event);
cl_int retVal = 0;
// Get the name of the kernel
diff --git a/src/backends/cl/test/ClRuntimeTests.cpp b/src/backends/cl/test/ClRuntimeTests.cpp
index 9aa36173d0..a0d7963aa8 100644
--- a/src/backends/cl/test/ClRuntimeTests.cpp
+++ b/src/backends/cl/test/ClRuntimeTests.cpp
@@ -9,8 +9,8 @@
#include <backendsCommon/test/RuntimeTestImpl.hpp>
#include <test/ProfilingTestUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#ifdef WITH_VALGRIND
@@ -144,8 +144,8 @@ BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
// These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
// so they are assigned to, but still considered unused, causing a warning.
- boost::ignore_unused(dubious);
- boost::ignore_unused(suppressed);
+ IgnoreUnused(dubious);
+ IgnoreUnused(suppressed);
}
#endif
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index ee5163f668..b7d274fdca 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -11,8 +11,8 @@
#include <Graph.hpp>
#include <Optimizer.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#include <set>
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 7e58dabe93..78776124ec 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -13,8 +13,7 @@
#include <InternalTypes.hpp>
#include <LayerSupportCommon.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#if defined(ARMCOMPUTENEON_ENABLED)
#include <aclCommon/ArmComputeUtils.hpp>
@@ -62,8 +61,6 @@
#include "workloads/NeonTransposeWorkload.hpp"
#endif
-using namespace boost;
-
namespace armnn
{
@@ -73,7 +70,7 @@ namespace
template< typename ... Args>
bool IsNeonBackendSupported(Optional<std::string&> reasonIfUnsupported, Args... args)
{
- boost::ignore_unused(reasonIfUnsupported, (args)...);
+ IgnoreUnused(reasonIfUnsupported, (args)...);
#if defined(ARMCOMPUTENEON_ENABLED)
return true;
#else
@@ -134,7 +131,7 @@ bool NeonLayerSupport::IsActivationSupported(const TensorInfo& input,
const ActivationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonActivationWorkloadValidate,
reasonIfUnsupported,
input,
@@ -268,9 +265,9 @@ bool NeonLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input);
- ignore_unused(output);
- ignore_unused(reasonIfUnsupported);
+ armnn::IgnoreUnused(input);
+ armnn::IgnoreUnused(output);
+ armnn::IgnoreUnused(reasonIfUnsupported);
return true;
}
@@ -278,9 +275,9 @@ bool NeonLayerSupport::IsConvertFp32ToFp16Supported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(input);
- ignore_unused(output);
- ignore_unused(reasonIfUnsupported);
+ armnn::IgnoreUnused(input);
+ armnn::IgnoreUnused(output);
+ armnn::IgnoreUnused(reasonIfUnsupported);
return true;
}
@@ -381,7 +378,7 @@ bool NeonLayerSupport::IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
+ armnn::IgnoreUnused(output);
return IsNeonBackendSupported(reasonIfUnsupported) &&
IsSupportedForDataTypeGeneric(reasonIfUnsupported,
input.GetDataType(),
@@ -622,7 +619,7 @@ bool NeonLayerSupport::IsReshapeSupported(const TensorInfo& input,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ armnn::IgnoreUnused(descriptor);
FORWARD_WORKLOAD_VALIDATE_FUNC(NeonReshapeWorkloadValidate,
reasonIfUnsupported,
input,
@@ -712,7 +709,7 @@ bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ armnn::IgnoreUnused(descriptor);
return IsSupportedForDataTypeNeon(reasonIfUnsupported,
input.GetDataType(),
&TrueFunc<>,
@@ -740,7 +737,7 @@ bool NeonLayerSupport::IsSplitterSupported(const TensorInfo& input,
*splitAxis.begin());
}
#endif
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
for (auto output : outputs)
{
if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp
index d5fef4ea95..26b14af144 100644
--- a/src/backends/neon/NeonTensorHandleFactory.cpp
+++ b/src/backends/neon/NeonTensorHandleFactory.cpp
@@ -6,7 +6,7 @@
#include "NeonTensorHandleFactory.hpp"
#include "NeonTensorHandle.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 939590cad6..cf9999f5d2 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -10,6 +10,7 @@
#include <Layer.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <backendsCommon/MakeWorkloadHelper.hpp>
@@ -19,7 +20,6 @@
#include <neon/workloads/NeonWorkloadUtils.hpp>
#include <neon/workloads/NeonWorkloads.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/polymorphic_cast.hpp>
namespace armnn
@@ -98,7 +98,7 @@ std::unique_ptr<ITensorHandle> NeonWorkloadFactory::CreateTensorHandle(const Ten
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Abs);
@@ -245,7 +245,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const Ele
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Equal);
@@ -275,7 +275,7 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGather(const armnn::Gather
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters = ComparisonDescriptor(ComparisonOperation::Greater);
@@ -446,7 +446,7 @@ std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateResizeBilinear(
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor &descriptor,
const WorkloadInfo &info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters = ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt);
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index bc4107dbb0..f122792203 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -8,8 +8,7 @@
#include <backendsCommon/WorkloadFactoryBase.hpp>
#include <aclCommon/BaseMemoryManager.hpp>
-
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 4dc9641a74..d3ca675679 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -219,7 +219,7 @@ DepthwiseConvolution2dDescriptor MakeDepthwiseConv2dDesc(uint32_t strideX, uint3
uint32_t depthMultiplier = 1, uint32_t padLeft = 0, uint32_t padRight = 0,
uint32_t padTop = 0, uint32_t padBottom = 0)
{
- boost::ignore_unused(depthMultiplier);
+ IgnoreUnused(depthMultiplier);
DepthwiseConvolution2dDescriptor desc;
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 7d5c3b509e..bd2e7289d8 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -8,13 +8,12 @@
#include <armnn/TypesUtils.hpp>
#include <armnn/Types.hpp>
#include <armnn/Descriptors.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <LayerSupportCommon.hpp>
-
#include <backendsCommon/LayerSupportRules.hpp>
#include <boost/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <vector>
#include <array>
@@ -178,7 +177,7 @@ bool RefLayerSupport::IsArgMinMaxSupported(const armnn::TensorInfo &input, const
const armnn::ArgMinMaxDescriptor &descriptor,
armnn::Optional<std::string &> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 4> supportedTypes =
{
@@ -207,7 +206,7 @@ bool RefLayerSupport::IsBatchNormalizationSupported(const TensorInfo& input,
const BatchNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 4> supportedTypes =
{
@@ -248,7 +247,7 @@ bool RefLayerSupport::IsBatchToSpaceNdSupported(const TensorInfo& input,
const BatchToSpaceNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
@@ -297,7 +296,7 @@ bool RefLayerSupport::IsComparisonSupported(const TensorInfo& input0,
const ComparisonDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 4> supportedInputTypes =
{
@@ -325,7 +324,7 @@ bool RefLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inp
const ConcatDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,5> supportedTypes =
@@ -475,7 +474,7 @@ bool RefLayerSupport::IsConvolution2dSupported(const TensorInfo& input,
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
"Reference Convolution2d: biases is not a supported type.");
}
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
return supported;
}
@@ -514,7 +513,7 @@ bool RefLayerSupport::IsDepthToSpaceSupported(const TensorInfo& input,
const DepthToSpaceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
@@ -602,7 +601,7 @@ bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
"Reference DepthwiseConvolution2d: biases is not a supported type.");
}
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
return supported;
@@ -655,7 +654,7 @@ bool RefLayerSupport::IsDetectionPostProcessSupported(const TensorInfo& boxEncod
const DetectionPostProcessDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
+ IgnoreUnused(anchors, detectionBoxes, detectionClasses, detectionScores, numDetections, descriptor);
bool supported = true;
@@ -725,7 +724,7 @@ bool RefLayerSupport::IsElementwiseUnarySupported(const TensorInfo& input,
const ElementwiseUnaryDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 4> supportedTypes =
{
@@ -769,7 +768,7 @@ bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
const FakeQuantizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,1> supportedTypes =
@@ -787,7 +786,7 @@ bool RefLayerSupport::IsFloorSupported(const TensorInfo& input,
const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
+ IgnoreUnused(output);
bool supported = true;
std::array<DataType,3> supportedTypes =
@@ -916,7 +915,7 @@ bool RefLayerSupport::IsInstanceNormalizationSupported(const TensorInfo& input,
const InstanceNormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
// Define supported types
std::array<DataType, 4> supportedTypes =
{
@@ -947,7 +946,7 @@ bool RefLayerSupport::IsL2NormalizationSupported(const TensorInfo& input,
const L2NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
// Define supported types
std::array<DataType, 4> supportedTypes =
{
@@ -980,7 +979,7 @@ bool RefLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
const LogSoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
std::array<DataType, 2> supportedTypes =
{
@@ -1012,8 +1011,8 @@ bool RefLayerSupport::IsLstmSupported(const TensorInfo& input,
const LstmInputParamsInfo& paramsInfo,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
- ignore_unused(paramsInfo);
+ IgnoreUnused(descriptor);
+ IgnoreUnused(paramsInfo);
bool supported = true;
@@ -1319,7 +1318,7 @@ bool RefLayerSupport::IsNormalizationSupported(const TensorInfo& input,
const NormalizationDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
// Define supported types
std::array<DataType, 4> supportedTypes =
@@ -1356,7 +1355,7 @@ bool RefLayerSupport::IsPadSupported(const TensorInfo& input,
const PadDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported output and inputs types.
@@ -1385,7 +1384,7 @@ bool RefLayerSupport::IsPermuteSupported(const TensorInfo& input,
const PermuteDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported output and inputs types.
@@ -1414,7 +1413,7 @@ bool RefLayerSupport::IsPooling2dSupported(const TensorInfo& input,
const Pooling2dDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported output and inputs types.
@@ -1479,8 +1478,8 @@ bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
const ReshapeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
- ignore_unused(descriptor);
+ IgnoreUnused(output);
+ IgnoreUnused(descriptor);
// Define supported output types.
std::array<DataType,7> supportedOutputTypes =
{
@@ -1526,7 +1525,7 @@ bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
const ResizeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,5> supportedTypes =
{
@@ -1564,7 +1563,7 @@ bool RefLayerSupport::IsSliceSupported(const TensorInfo& input,
const SliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType, 3> supportedTypes =
@@ -1591,7 +1590,7 @@ bool RefLayerSupport::IsSoftmaxSupported(const TensorInfo& input,
const SoftmaxDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,6> supportedTypes =
{
@@ -1620,7 +1619,7 @@ bool RefLayerSupport::IsSpaceToBatchNdSupported(const TensorInfo& input,
const SpaceToBatchNdDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
@@ -1648,7 +1647,7 @@ bool RefLayerSupport::IsSpaceToDepthSupported(const TensorInfo& input,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
@@ -1675,7 +1674,7 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
@@ -1696,7 +1695,7 @@ bool RefLayerSupport::IsSplitterSupported(const TensorInfo& input,
const ViewsDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
{
@@ -1725,7 +1724,7 @@ bool RefLayerSupport::IsStackSupported(const std::vector<const TensorInfo*>& inp
const StackDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
@@ -1756,7 +1755,7 @@ bool RefLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
const StridedSliceDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,3> supportedTypes =
@@ -1853,7 +1852,7 @@ bool RefLayerSupport::IsTransposeConvolution2dSupported(const TensorInfo& input,
const Optional<TensorInfo>& biases,
Optional<std::string&> reasonIfUnsupported) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
std::array<DataType,4> supportedTypes =
@@ -1919,7 +1918,7 @@ bool RefLayerSupport::IsTransposeSupported(const TensorInfo& input,
const TransposeDescriptor& descriptor,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
bool supported = true;
// Define supported output and inputs types.
diff --git a/src/backends/reference/RefTensorHandleFactory.cpp b/src/backends/reference/RefTensorHandleFactory.cpp
index c97a779cb3..d687c78b17 100644
--- a/src/backends/reference/RefTensorHandleFactory.cpp
+++ b/src/backends/reference/RefTensorHandleFactory.cpp
@@ -6,7 +6,7 @@
#include "RefTensorHandleFactory.hpp"
#include "RefTensorHandle.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -23,7 +23,7 @@ std::unique_ptr<ITensorHandle> RefTensorHandleFactory::CreateSubTensorHandle(ITe
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
@@ -35,7 +35,7 @@ std::unique_ptr<ITensorHandle> RefTensorHandleFactory::CreateTensorHandle(const
std::unique_ptr<ITensorHandle> RefTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
DataLayout dataLayout) const
{
- boost::ignore_unused(dataLayout);
+ IgnoreUnused(dataLayout);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager, m_ImportFlags);
}
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 2a415bfbf0..52d71df936 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -102,7 +102,7 @@ std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const Tens
{
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
- boost::ignore_unused(isMemoryManaged);
+ IgnoreUnused(isMemoryManaged);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
@@ -112,14 +112,14 @@ std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const Tens
{
// For Ref it is okay to make the TensorHandle memory managed as it can also store a pointer
// to unmanaged memory. This also ensures memory alignment.
- boost::ignore_unused(isMemoryManaged, dataLayout);
+ IgnoreUnused(isMemoryManaged, dataLayout);
return std::make_unique<RefTensorHandle>(tensorInfo, m_MemoryManager);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateAbs(const AbsQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Abs;
@@ -267,7 +267,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateElementwiseUnary(const Elem
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Equal;
@@ -303,7 +303,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGather(const GatherQueueDes
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ComparisonQueueDescriptor comparisonDescriptor;
comparisonDescriptor.m_Parameters.m_Operation = ComparisonOperation::Greater;
@@ -506,7 +506,7 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const Resize
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- boost::ignore_unused(descriptor);
+ IgnoreUnused(descriptor);
ElementwiseUnaryQueueDescriptor elementwiseUnaryDescriptor;
elementwiseUnaryDescriptor.m_Parameters.m_Operation = UnaryOperation::Rsqrt;
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 030ce6f03d..b64479e207 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -4,12 +4,11 @@
//
#pragma once
-#include <armnn/Optional.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
-
#include "RefMemoryManager.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/Optional.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
@@ -48,7 +47,7 @@ public:
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
diff --git a/src/backends/reference/test/RefWorkloadFactoryHelper.hpp b/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
index 10e5b9fa28..30d2037a32 100644
--- a/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
+++ b/src/backends/reference/test/RefWorkloadFactoryHelper.hpp
@@ -25,7 +25,7 @@ struct WorkloadFactoryHelper<armnn::RefWorkloadFactory>
static armnn::RefWorkloadFactory GetFactory(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr)
{
- boost::ignore_unused(memoryManager);
+ IgnoreUnused(memoryManager);
return armnn::RefWorkloadFactory();
}
};
diff --git a/src/backends/reference/workloads/ArgMinMax.cpp b/src/backends/reference/workloads/ArgMinMax.cpp
index db85b958e9..637aa17013 100644
--- a/src/backends/reference/workloads/ArgMinMax.cpp
+++ b/src/backends/reference/workloads/ArgMinMax.cpp
@@ -15,7 +15,7 @@ namespace armnn
void ArgMinMax(Decoder<float>& in, int32_t* out, const TensorInfo& inputTensorInfo,
const TensorInfo& outputTensorInfo, ArgMinMaxFunction function, int axis)
{
- boost::ignore_unused(outputTensorInfo);
+ IgnoreUnused(outputTensorInfo);
unsigned int uAxis = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 3f0144670f..5cae5bda83 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,14 +5,13 @@
#pragma once
-
+#include <armnn/utility/IgnoreUnused.hpp>
#include <armnn/TypesUtils.hpp>
#include <armnnUtils/FloatingPointConverter.hpp>
#include <ResolveType.hpp>
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
namespace armnn
{
@@ -107,7 +106,7 @@ public:
TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
{
- boost::ignore_unused(axisIndex);
+ IgnoreUnused(axisIndex);
BOOST_ASSERT(m_Iterator);
m_Iterator = m_Start + index;
return *this;
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index 4025e8d7fa..63c0405efe 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -5,7 +5,8 @@
#include "Dequantize.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
namespace armnn
{
@@ -14,7 +15,7 @@ void Dequantize(Decoder<float>& inputDecoder,
const TensorInfo& inputInfo,
const TensorInfo& outputInfo)
{
- boost::ignore_unused(outputInfo);
+ IgnoreUnused(outputInfo);
BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
{
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 96e57803a1..57cf01e4a1 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -154,7 +154,7 @@ void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
float* detectionScores,
float* numDetections)
{
- boost::ignore_unused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
+ IgnoreUnused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
// Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
// which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
diff --git a/src/backends/reference/workloads/Gather.cpp b/src/backends/reference/workloads/Gather.cpp
index 5416855f48..4cf3a142a0 100644
--- a/src/backends/reference/workloads/Gather.cpp
+++ b/src/backends/reference/workloads/Gather.cpp
@@ -8,8 +8,8 @@
#include "RefWorkloadUtils.hpp"
#include <backendsCommon/WorkloadData.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
@@ -22,7 +22,7 @@ void Gather(const TensorInfo& paramsInfo,
const int32_t* indices,
Encoder<float>& output)
{
- boost::ignore_unused(outputInfo);
+ IgnoreUnused(outputInfo);
const TensorShape& paramsShape = paramsInfo.GetShape();
unsigned int paramsProduct = 1;
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index ddf5674fb8..103d62a8df 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -6,11 +6,11 @@
#include "LogSoftmax.hpp"
#include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <cmath>
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace
@@ -37,7 +37,7 @@ void LogSoftmax(Decoder<float>& input,
bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
BOOST_ASSERT_MSG(axisIsValid,
"Axis index is not in range [-numDimensions, numDimensions).");
- boost::ignore_unused(axisIsValid);
+ IgnoreUnused(axisIsValid);
unsigned int uAxis = descriptor.m_Axis < 0 ?
numDimensions - boost::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index 8bb1670a48..bfd3c284ae 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -28,7 +28,7 @@ void RefStridedSliceWorkload::Execute() const
DataType outputDataType = outputInfo.GetDataType();
BOOST_ASSERT(inputDataType == outputDataType);
- boost::ignore_unused(outputDataType);
+ IgnoreUnused(outputDataType);
StridedSlice(inputInfo,
m_Data.m_Parameters,
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
index c7ca3b156e..0223cdc56a 100644
--- a/src/backends/reference/workloads/Slice.cpp
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -5,8 +5,9 @@
#include "Slice.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
namespace armnn
@@ -72,7 +73,7 @@ void Slice(const TensorInfo& inputInfo,
const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
unsigned char* output = reinterpret_cast<unsigned char*>(outputData);
- boost::ignore_unused(dim0);
+ IgnoreUnused(dim0);
for (unsigned int idx0 = begin0; idx0 < begin0 + size0; ++idx0)
{
for (unsigned int idx1 = begin1; idx1 < begin1 + size1; ++idx1)
diff --git a/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp b/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp
index 88b67987e1..86e6555e14 100644
--- a/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp
+++ b/src/dynamic/sample/SampleDynamicWorkloadFactory.hpp
@@ -33,7 +33,7 @@ public:
TensorShape const& subTensorShape,
unsigned int const* subTensorOrigin) const override
{
- boost::ignore_unused(parent, subTensorShape, subTensorOrigin);
+ IgnoreUnused(parent, subTensorShape, subTensorOrigin);
return nullptr;
}
diff --git a/src/profiling/CommandHandlerFunctor.hpp b/src/profiling/CommandHandlerFunctor.hpp
index ea76d10141..743bb937c2 100644
--- a/src/profiling/CommandHandlerFunctor.hpp
+++ b/src/profiling/CommandHandlerFunctor.hpp
@@ -3,11 +3,13 @@
// SPDX-License-Identifier: MIT
//
+#pragma once
+
#include "Packet.hpp"
-#include <cstdint>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
+#include <cstdint>
namespace armnn
{
@@ -15,8 +17,6 @@ namespace armnn
namespace profiling
{
-#pragma once
-
class CommandHandlerFunctor
{
public:
diff --git a/src/profiling/CounterDirectory.cpp b/src/profiling/CounterDirectory.cpp
index 052e452b7b..c84da10506 100644
--- a/src/profiling/CounterDirectory.cpp
+++ b/src/profiling/CounterDirectory.cpp
@@ -8,8 +8,8 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/format.hpp>
namespace armnn
@@ -191,7 +191,7 @@ const Counter* CounterDirectory::RegisterCounter(const BackendId& backendId,
const Optional<uint16_t>& deviceUid,
const Optional<uint16_t>& counterSetUid)
{
- boost::ignore_unused(backendId);
+ IgnoreUnused(backendId);
// Check that the given parent category name is valid
if (parentCategoryName.empty() ||
diff --git a/src/profiling/FileOnlyProfilingConnection.cpp b/src/profiling/FileOnlyProfilingConnection.cpp
index 1db8030313..83229caad7 100644
--- a/src/profiling/FileOnlyProfilingConnection.cpp
+++ b/src/profiling/FileOnlyProfilingConnection.cpp
@@ -40,7 +40,7 @@ void FileOnlyProfilingConnection::Close()
bool FileOnlyProfilingConnection::WaitForStreamMeta(const unsigned char* buffer, uint32_t length)
{
- boost::ignore_unused(length);
+ IgnoreUnused(length);
// The first word, stream_metadata_identifer, should always be 0.
if (ToUint32(buffer, TargetEndianness::BeWire) != 0)
diff --git a/src/profiling/PacketVersionResolver.cpp b/src/profiling/PacketVersionResolver.cpp
index 869f09e635..2c75067487 100644
--- a/src/profiling/PacketVersionResolver.cpp
+++ b/src/profiling/PacketVersionResolver.cpp
@@ -5,7 +5,7 @@
#include "PacketVersionResolver.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -54,7 +54,7 @@ bool PacketKey::operator!=(const PacketKey& rhs) const
Version PacketVersionResolver::ResolvePacketVersion(uint32_t familyId, uint32_t packetId) const
{
- boost::ignore_unused(familyId, packetId);
+ IgnoreUnused(familyId, packetId);
// NOTE: For now every packet specification is at version 1.0.0
return Version(1, 0, 0);
}
diff --git a/src/profiling/ProfilingStateMachine.hpp b/src/profiling/ProfilingStateMachine.hpp
index 160de71bbe..cbc65ec8b0 100644
--- a/src/profiling/ProfilingStateMachine.hpp
+++ b/src/profiling/ProfilingStateMachine.hpp
@@ -7,7 +7,7 @@
#include <atomic>
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
namespace armnn
{
@@ -35,7 +35,7 @@ public:
bool IsOneOfStates(ProfilingState state1)
{
- boost::ignore_unused(state1);
+ IgnoreUnused(state1);
return false;
}
diff --git a/src/profiling/SendCounterPacket.cpp b/src/profiling/SendCounterPacket.cpp
index b8ef189d6e..942ccc7b59 100644
--- a/src/profiling/SendCounterPacket.cpp
+++ b/src/profiling/SendCounterPacket.cpp
@@ -9,10 +9,10 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Conversion.hpp>
#include <Processes.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <cstring>
diff --git a/src/profiling/SendThread.cpp b/src/profiling/SendThread.cpp
index 0318a74901..5962f2fc5d 100644
--- a/src/profiling/SendThread.cpp
+++ b/src/profiling/SendThread.cpp
@@ -13,7 +13,6 @@
#include <boost/format.hpp>
#include <boost/numeric/conversion/cast.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <cstring>
diff --git a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
index 77d4d683b6..7db42de416 100644
--- a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
+++ b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
@@ -8,8 +8,8 @@
#include <ProfilingService.hpp>
#include <Runtime.hpp>
#include <Filesystem.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/filesystem.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/unit_test.hpp>
diff --git a/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp b/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
index 6a092815b2..c36867815f 100644
--- a/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
+++ b/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
@@ -5,11 +5,11 @@
#include "../ProfilingConnectionDumpToFileDecorator.hpp"
#include <Runtime.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <fstream>
#include <sstream>
-#include <boost/core/ignore_unused.hpp>
#include <boost/filesystem.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/unit_test.hpp>
@@ -49,14 +49,14 @@ public:
bool WritePacket(const unsigned char* buffer, uint32_t length) override
{
- boost::ignore_unused(buffer);
- boost::ignore_unused(length);
+ armnn::IgnoreUnused(buffer);
+ armnn::IgnoreUnused(length);
return true;
}
Packet ReadPacket(uint32_t timeout) override
{
- boost::ignore_unused(timeout);
+ armnn::IgnoreUnused(timeout);
return std::move(*m_Packet);
}
diff --git a/src/profiling/test/ProfilingMocks.hpp b/src/profiling/test/ProfilingMocks.hpp
index 19aad491af..944aea6454 100644
--- a/src/profiling/test/ProfilingMocks.hpp
+++ b/src/profiling/test/ProfilingMocks.hpp
@@ -16,9 +16,9 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Optional.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <atomic>
@@ -128,7 +128,7 @@ public:
Packet ReadPacket(uint32_t timeout) override
{
- boost::ignore_unused(timeout);
+ IgnoreUnused(timeout);
// Simulate a delay in the reading process. The default timeout is way too long.
std::this_thread::sleep_for(std::chrono::milliseconds(5));
@@ -162,7 +162,7 @@ class MockProfilingConnectionFactory : public IProfilingConnectionFactory
public:
IProfilingConnectionPtr GetProfilingConnection(const ExternalProfilingOptions& options) const override
{
- boost::ignore_unused(options);
+ IgnoreUnused(options);
return std::make_unique<MockProfilingConnection>();
}
};
@@ -399,7 +399,7 @@ public:
void SendCounterDirectoryPacket(const ICounterDirectory& counterDirectory) override
{
- boost::ignore_unused(counterDirectory);
+ IgnoreUnused(counterDirectory);
std::string message("SendCounterDirectoryPacket");
unsigned int reserved = 0;
@@ -411,7 +411,7 @@ public:
void SendPeriodicCounterCapturePacket(uint64_t timestamp,
const std::vector<CounterValue>& values) override
{
- boost::ignore_unused(timestamp, values);
+ IgnoreUnused(timestamp, values);
std::string message("SendPeriodicCounterCapturePacket");
unsigned int reserved = 0;
@@ -423,7 +423,7 @@ public:
void SendPeriodicCounterSelectionPacket(uint32_t capturePeriod,
const std::vector<uint16_t>& selectedCounterIds) override
{
- boost::ignore_unused(capturePeriod, selectedCounterIds);
+ IgnoreUnused(capturePeriod, selectedCounterIds);
std::string message("SendPeriodicCounterSelectionPacket");
unsigned int reserved = 0;
@@ -513,7 +513,7 @@ public:
const armnn::Optional<uint16_t>& deviceUid = armnn::EmptyOptional(),
const armnn::Optional<uint16_t>& counterSetUid = armnn::EmptyOptional())
{
- boost::ignore_unused(backendId);
+ IgnoreUnused(backendId);
// Get the number of cores from the argument only
uint16_t deviceCores = numberOfCores.has_value() ? numberOfCores.value() : 0;
@@ -597,19 +597,19 @@ public:
const Device* GetDevice(uint16_t uid) const override
{
- boost::ignore_unused(uid);
+ IgnoreUnused(uid);
return nullptr; // Not used by the unit tests
}
const CounterSet* GetCounterSet(uint16_t uid) const override
{
- boost::ignore_unused(uid);
+ IgnoreUnused(uid);
return nullptr; // Not used by the unit tests
}
const Counter* GetCounter(uint16_t uid) const override
{
- boost::ignore_unused(uid);
+ IgnoreUnused(uid);
return nullptr; // Not used by the unit tests
}
diff --git a/src/profiling/test/ProfilingTests.cpp b/src/profiling/test/ProfilingTests.cpp
index 3dab93d9b8..0e91696af8 100644
--- a/src/profiling/test/ProfilingTests.cpp
+++ b/src/profiling/test/ProfilingTests.cpp
@@ -34,6 +34,7 @@
#include <armnn/Types.hpp>
#include <armnn/Utils.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/numeric/conversion/cast.hpp>
@@ -1697,7 +1698,7 @@ BOOST_AUTO_TEST_CASE(CounterSelectionCommandHandlerParseData)
{
bool IsCounterRegistered(uint16_t counterUid) const override
{
- boost::ignore_unused(counterUid);
+ armnn::IgnoreUnused(counterUid);
return true;
}
uint16_t GetCounterCount() const override
@@ -1706,7 +1707,7 @@ BOOST_AUTO_TEST_CASE(CounterSelectionCommandHandlerParseData)
}
uint32_t GetCounterValue(uint16_t counterUid) const override
{
- boost::ignore_unused(counterUid);
+ armnn::IgnoreUnused(counterUid);
return 0;
}
};
@@ -2092,7 +2093,7 @@ BOOST_AUTO_TEST_CASE(CheckPeriodicCounterCaptureThread)
//not used
bool IsCounterRegistered(uint16_t counterUid) const override
{
- boost::ignore_unused(counterUid);
+ armnn::IgnoreUnused(counterUid);
return false;
}
diff --git a/src/profiling/test/ProfilingTests.hpp b/src/profiling/test/ProfilingTests.hpp
index 8b4bc84bd1..008110392c 100644
--- a/src/profiling/test/ProfilingTests.hpp
+++ b/src/profiling/test/ProfilingTests.hpp
@@ -77,7 +77,7 @@ public:
bool WritePacket(const unsigned char* buffer, uint32_t length) override
{
- boost::ignore_unused(buffer, length);
+ IgnoreUnused(buffer, length);
return false;
}
@@ -139,7 +139,7 @@ public:
Packet ReadPacket(uint32_t timeout) override
{
- boost::ignore_unused(timeout);
+ IgnoreUnused(timeout);
++m_ReadRequests;
throw armnn::Exception("Simulate a non-timeout error");
}
@@ -158,7 +158,7 @@ class TestProfilingConnectionBadAckPacket : public TestProfilingConnectionBase
public:
Packet ReadPacket(uint32_t timeout) override
{
- boost::ignore_unused(timeout);
+ IgnoreUnused(timeout);
// Connection Acknowledged Packet header (word 0, word 1 is always zero):
// 26:31 [6] packet_family: Control Packet Family, value 0b000000
// 16:25 [10] packet_id: Packet identifier, value 0b0000000001
@@ -181,7 +181,7 @@ public:
void operator()(const Packet& packet) override
{
- boost::ignore_unused(packet);
+ IgnoreUnused(packet);
m_Count++;
}
diff --git a/src/profiling/test/SendCounterPacketTests.hpp b/src/profiling/test/SendCounterPacketTests.hpp
index 8b46ed17d6..7a5f7962e6 100644
--- a/src/profiling/test/SendCounterPacketTests.hpp
+++ b/src/profiling/test/SendCounterPacketTests.hpp
@@ -13,9 +13,9 @@
#include <armnn/Exceptions.hpp>
#include <armnn/Optional.hpp>
#include <armnn/Conversion.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <atomic>
diff --git a/tests/DeepSpeechV1InferenceTest.hpp b/tests/DeepSpeechV1InferenceTest.hpp
index ac28bbbcd4..07b55d2ab8 100644
--- a/tests/DeepSpeechV1InferenceTest.hpp
+++ b/tests/DeepSpeechV1InferenceTest.hpp
@@ -7,8 +7,9 @@
#include "InferenceTest.hpp"
#include "DeepSpeechV1Database.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
@@ -37,7 +38,7 @@ public:
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
- boost::ignore_unused(options);
+ armnn::IgnoreUnused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits
BOOST_ASSERT(output1.size() == k_OutputSize1);
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 6423d1c7ff..68c168f126 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -4,12 +4,13 @@
//
#pragma once
+#include "InferenceModel.hpp"
+
#include <armnn/ArmNN.hpp>
#include <armnn/Logging.hpp>
#include <armnn/TypesUtils.hpp>
-#include "InferenceModel.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/program_options.hpp>
@@ -93,11 +94,11 @@ public:
virtual void AddCommandLineOptions(boost::program_options::options_description& options)
{
- boost::ignore_unused(options);
+ IgnoreUnused(options);
};
virtual bool ProcessCommandLineOptions(const InferenceTestOptions &commonOptions)
{
- boost::ignore_unused(commonOptions);
+ IgnoreUnused(commonOptions);
return true;
};
virtual std::unique_ptr<IInferenceTestCase> GetTestCase(unsigned int testCaseId) = 0;
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index c05e70d9f7..5b9b45a4a2 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -79,7 +79,7 @@ struct ClassifierResultProcessor : public boost::static_visitor<>
void operator()(const std::vector<int>& values)
{
- boost::ignore_unused(values);
+ IgnoreUnused(values);
BOOST_ASSERT_MSG(false, "Non-float predictions output not supported.");
}
diff --git a/tests/InferenceTestImage.cpp b/tests/InferenceTestImage.cpp
index 92c67ae225..83c5cce346 100644
--- a/tests/InferenceTestImage.cpp
+++ b/tests/InferenceTestImage.cpp
@@ -4,9 +4,9 @@
//
#include "InferenceTestImage.hpp"
-#include <boost/core/ignore_unused.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/format.hpp>
-#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <array>
@@ -314,7 +314,7 @@ std::vector<float> GetImageDataInArmNnLayoutAsNormalizedFloats(ImageChannelLayou
return GetImageDataInArmNnLayoutAsFloats(layout, image,
[](ImageChannel channel, float value)
{
- boost::ignore_unused(channel);
+ armnn::IgnoreUnused(channel);
return value / 255.f;
});
}
diff --git a/tests/MobileNetSsdInferenceTest.hpp b/tests/MobileNetSsdInferenceTest.hpp
index c99844b6bb..a26712c511 100644
--- a/tests/MobileNetSsdInferenceTest.hpp
+++ b/tests/MobileNetSsdInferenceTest.hpp
@@ -7,6 +7,8 @@
#include "InferenceTest.hpp"
#include "MobileNetSsdDatabase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <boost/assert.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
@@ -33,7 +35,7 @@ public:
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
- boost::ignore_unused(options);
+ armnn::IgnoreUnused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
BOOST_ASSERT(output1.size() == k_OutputSize1);
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index d7e9275916..4d996fd401 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -751,7 +751,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
const bool printIntermediate, bool enableLayerDetails = false, bool parseUnuspported = false)
{
- boost::ignore_unused(runtime);
+ IgnoreUnused(runtime);
std::string modelFormat;
std::string modelPath;
std::string inputNames;
diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp
index 16d0355d9d..4190e72365 100644
--- a/tests/YoloInferenceTest.hpp
+++ b/tests/YoloInferenceTest.hpp
@@ -7,6 +7,8 @@
#include "InferenceTest.hpp"
#include "YoloDatabase.hpp"
+#include <armnn/utility/IgnoreUnused.hpp>
+
#include <algorithm>
#include <array>
#include <utility>
@@ -32,7 +34,7 @@ public:
virtual TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
- boost::ignore_unused(options);
+ armnn::IgnoreUnused(options);
using Boost3dArray = boost::multi_array<float, 3>;