aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-12-10 22:05:21 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2019-12-31 10:43:13 +0000
commit859f9ced8360fcfe6daedf6c8f6613005893f814 (patch)
treeab90773491dc8a7e4387833c77d8bfc072dc9607
parent94a88d2b21d9ca3f42dc3435695be31b5591230b (diff)
downloadarmnn-859f9ced8360fcfe6daedf6c8f6613005893f814.tar.gz
IVGCVSW-4246 Clean build of Visitors with -Wextra
Change-Id: Icb1b35ff55fa22103777853e6f49fc282d61750d Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r--src/armnn/DynamicQuantizationVisitor.cpp4
-rw-r--r--src/armnn/OverrideInputRangeVisitor.cpp2
-rw-r--r--src/armnn/StaticRangeVisitor.cpp6
-rw-r--r--src/armnn/test/QuantizerTest.cpp66
-rw-r--r--src/armnnDeserializer/Deserializer.cpp4
-rw-r--r--src/armnnDeserializer/test/DeserializeAdd.cpp3
-rw-r--r--src/armnnDeserializer/test/DeserializeMultiplication.cpp3
-rw-r--r--src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp2
-rw-r--r--src/armnnQuantizer/QuantizationDataSet.cpp2
-rw-r--r--src/armnnSerializer/Serializer.cpp108
-rw-r--r--src/armnnSerializer/test/ActivationSerializationTests.cpp3
-rw-r--r--src/armnnSerializer/test/SerializerTests.cpp30
12 files changed, 212 insertions, 21 deletions
diff --git a/src/armnn/DynamicQuantizationVisitor.cpp b/src/armnn/DynamicQuantizationVisitor.cpp
index d4e0c9006c..ba87c6d335 100644
--- a/src/armnn/DynamicQuantizationVisitor.cpp
+++ b/src/armnn/DynamicQuantizationVisitor.cpp
@@ -84,6 +84,7 @@ void DynamicQuantizationVisitor::VisitNonCalibratedLayers() {
void DynamicQuantizationVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
SetRange(layer, 0, -20.f, 20.f);
AddToCalibratedLayers(layer);
}
@@ -138,6 +139,7 @@ void DynamicQuantizationVisitor::VisitActivationLayer(const IConnectableLayer* l
const ActivationDescriptor& activationDescriptor,
const char* name)
{
+ boost::ignore_unused(name, activationDescriptor);
switch (activationDescriptor.m_Function)
{
// Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -246,6 +248,8 @@ void DynamicQuantizationVisitor::VisitConcatLayer(const IConnectableLayer* layer
const ConcatDescriptor& originsDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+ boost::ignore_unused(originsDescriptor);
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::lowest();
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
diff --git a/src/armnn/OverrideInputRangeVisitor.cpp b/src/armnn/OverrideInputRangeVisitor.cpp
index 058e630c79..d047c5bbe8 100644
--- a/src/armnn/OverrideInputRangeVisitor.cpp
+++ b/src/armnn/OverrideInputRangeVisitor.cpp
@@ -8,6 +8,7 @@
#include "Layer.hpp"
#include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
namespace armnn
{
@@ -22,6 +23,7 @@ OverrideInputRangeVisitor::OverrideInputRangeVisitor(RangeTracker& ranges,
void OverrideInputRangeVisitor::VisitInputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name)
{
+ boost::ignore_unused(name);
if (m_LayerId == id)
{
m_Ranges.SetRange(layer, 0, m_MinMaxRange.first, m_MinMaxRange.second);
diff --git a/src/armnn/StaticRangeVisitor.cpp b/src/armnn/StaticRangeVisitor.cpp
index 94f0a01997..81428c1f90 100644
--- a/src/armnn/StaticRangeVisitor.cpp
+++ b/src/armnn/StaticRangeVisitor.cpp
@@ -31,6 +31,7 @@ void StaticRangeVisitor::ForwardParentParameters(const IConnectableLayer* layer)
void StaticRangeVisitor::VisitAdditionLayer(const IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
SetRange(layer, 0, -20.f, 20.f);
}
@@ -81,6 +82,7 @@ void StaticRangeVisitor::VisitActivationLayer(const IConnectableLayer* layer,
const ActivationDescriptor& activationDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
switch (activationDescriptor.m_Function)
{
// Range is 0, 15 for Abs, Linear, ReLu and Soft ReLu
@@ -141,6 +143,7 @@ void StaticRangeVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
const char* name)
{
boost::ignore_unused(pooling2dDescriptor);
+ boost::ignore_unused(name);
ForwardParentParameters(layer);
}
@@ -149,6 +152,7 @@ void StaticRangeVisitor::VisitSoftmaxLayer(const IConnectableLayer* layer,
const char* name)
{
boost::ignore_unused(softmaxDescriptor);
+ boost::ignore_unused(name);
SetRange(layer, 0, 0.f, 1.f);
}
@@ -156,6 +160,8 @@ void StaticRangeVisitor::VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name)
{
+ boost::ignore_unused(originsDescriptor);
+ boost::ignore_unused(name);
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::lowest();
for (unsigned int i = 0; i < layer->GetNumInputSlots(); ++i)
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 37b3bfa852..e147a84eb6 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -19,6 +19,7 @@
#include "../RangeTracker.hpp"
#include "../../armnnQuantizer/CommandLineProcessor.hpp"
+#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
#include <unordered_map>
@@ -55,6 +56,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
+ boost::ignore_unused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST(m_InputShape == info.GetShape());
// Based off current default [-15.0f, 15.0f]
@@ -67,6 +69,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
+ boost::ignore_unused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(m_OutputShape == info.GetShape());
}
@@ -105,6 +108,7 @@ protected:
const OffsetScalePair& params,
DataType dataType = DataType::QuantisedAsymm8)
{
+ boost::ignore_unused(dataType);
TestQuantizationParamsImpl(info, DataType::QuantisedAsymm8, params.first, params.second);
}
@@ -191,6 +195,7 @@ public:
void VisitAdditionLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
+ boost::ignore_unused(name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-20.0f, 20.0f]
@@ -253,6 +258,8 @@ public:
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
+
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 15.0f]
@@ -353,6 +360,7 @@ BOOST_AUTO_TEST_CASE(InputOutputLayerDynamicQuant)
LayerBindingId id,
const char* name = nullptr) override
{
+ boost::ignore_unused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_CHECK_MESSAGE(info.GetDataType() == m_DataType,
std::string(armnn::GetDataTypeName(info.GetDataType()))
@@ -489,6 +497,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBoundedReluActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 3.5f]
@@ -538,6 +547,7 @@ BOOST_AUTO_TEST_CASE(QuantizeTanHActivation)
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-1.0f, 1.0f]
@@ -586,6 +596,7 @@ public:
const ActivationDescriptor& descriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-5.0f, 15.0f]
@@ -652,6 +663,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchNorm)
const ConstTensor& gamma,
const char* name = nullptr) override
{
+ boost::ignore_unused(desc, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [-15.0f, 15.0f]
@@ -730,6 +742,7 @@ BOOST_AUTO_TEST_CASE(QuantizeDepthToSpace)
const DepthToSpaceDescriptor& desc,
const char* name = nullptr)
{
+ boost::ignore_unused(desc, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -914,6 +927,7 @@ void ValidateFullyConnectedLayer(const bool biasEnabled)
const Optional<ConstTensor>& biases,
const char* name = nullptr) override
{
+ boost::ignore_unused(desc, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -965,6 +979,7 @@ void TestQuantizeConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
+ boost::ignore_unused(convolution2dDescriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -1045,6 +1060,7 @@ void TestQuantizeDepthwiseConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
+ boost::ignore_unused(convolution2dDescriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -1123,6 +1139,7 @@ BOOST_AUTO_TEST_CASE(QuantizeInstanceNormalization)
const InstanceNormalizationDescriptor& descriptor,
const char* name = nullptr)
{
+ boost::ignore_unused(descriptor, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -1183,6 +1200,7 @@ BOOST_AUTO_TEST_CASE(QuantizeLogSoftmax)
const SoftmaxDescriptor& descriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -1267,6 +1285,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSoftmax)
const SoftmaxDescriptor& descriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off default static range [0.0f, 1.0f]
@@ -1388,6 +1407,7 @@ BOOST_AUTO_TEST_CASE(QuantizePermute)
const PermuteDescriptor& desc,
const char* name = nullptr) override
{
+ boost::ignore_unused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1437,6 +1457,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSpaceToBatch)
const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(spaceToBatchNdDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1538,6 +1559,7 @@ BOOST_AUTO_TEST_CASE(QuantizePooling2d)
const Pooling2dDescriptor& desc,
const char* name = nullptr) override
{
+ boost::ignore_unused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -1601,6 +1623,7 @@ BOOST_AUTO_TEST_CASE(QuantizeConstant)
const ConstTensor& input,
const char* name = nullptr) override
{
+ boost::ignore_unused(input, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
// Based off the range of values in the const tensor used for the test: [-2.0f, 6.0f]
@@ -1667,6 +1690,7 @@ BOOST_AUTO_TEST_CASE(QuantizeAbs)
void VisitAbsLayer(const IConnectableLayer *layer,
const char *name = nullptr) override
{
+ boost::ignore_unused(name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
@@ -1720,16 +1744,21 @@ BOOST_AUTO_TEST_CASE(QuantizeArgMinMax)
void VisitInputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
- {}
+ {
+ boost::ignore_unused(layer, id, name);
+ }
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
- {}
+ {
+ boost::ignore_unused(layer, id, name);
+ }
void VisitArgMinMaxLayer(const IConnectableLayer* layer,
const ArgMinMaxDescriptor& argMinMaxDescriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(argMinMaxDescriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
@@ -1798,6 +1827,7 @@ BOOST_AUTO_TEST_CASE(QuantizeComparison)
const ComparisonDescriptor& descriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
TensorInfo info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -1860,15 +1890,20 @@ BOOST_AUTO_TEST_CASE(QuantizeConcat)
void VisitInputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
- {}
+ {
+ boost::ignore_unused(layer, id, name);
+ }
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
- {}
+ {
+ boost::ignore_unused(layer, id, name);
+ }
void VisitConcatLayer(const IConnectableLayer* layer,
const OriginsDescriptor& originsDescriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(originsDescriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(
@@ -1964,6 +1999,7 @@ BOOST_AUTO_TEST_CASE(QuantizeReshape)
const ReshapeDescriptor& reshapeDescriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(reshapeDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2013,6 +2049,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSplitter)
const SplitterDescriptor& desc,
const char* name = nullptr)
{
+ boost::ignore_unused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2063,6 +2100,7 @@ BOOST_AUTO_TEST_CASE(QuantizeResize)
const ResizeDescriptor& resizeDescriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(resizeDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2114,6 +2152,7 @@ BOOST_AUTO_TEST_CASE(QuantizeStridedSlice)
const StridedSliceDescriptor& desc,
const char* name = nullptr)
{
+ boost::ignore_unused(desc, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2163,6 +2202,7 @@ BOOST_AUTO_TEST_CASE(QuantizeBatchToSpace)
const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(batchToSpaceNdDescriptor, name);
CheckForwardedQuantizationSettings(layer);
}
};
@@ -2219,6 +2259,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
LayerBindingId id,
const char* name = nullptr) override
{
+ boost::ignore_unused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
switch (id)
@@ -2244,6 +2285,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
LayerBindingId id,
const char* name = nullptr) override
{
+ boost::ignore_unused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(m_OutputShape == info.GetShape());
}
@@ -2251,6 +2293,7 @@ BOOST_AUTO_TEST_CASE(QuantizePrelu)
void VisitPreluLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
+ boost::ignore_unused(name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(info,
{ 30.0f / g_Asymm8QuantizationBase, 128 }, // QASymm8
@@ -2327,6 +2370,7 @@ void TestQuantizeTransposeConvolution2d(bool useBiases)
const Optional<ConstTensor>& biases,
const char *name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
TestQuantizationOnLayersWithBiases(layer, weights, biases);
}
};
@@ -2406,16 +2450,21 @@ BOOST_AUTO_TEST_CASE(QuantizeStack)
void VisitInputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
- {}
+ {
+ boost::ignore_unused(layer, id, name);
+ }
void VisitOutputLayer(const IConnectableLayer* layer,
LayerBindingId id,
const char* name = nullptr) override
- {}
+ {
+ boost::ignore_unused(layer, id, name);
+ }
void VisitStackLayer(const IConnectableLayer* layer,
const StackDescriptor& descriptor,
const char* name = nullptr) override
{
+ boost::ignore_unused(descriptor, name);
TensorInfo outputInfo = layer->GetOutputSlot(0).GetTensorInfo();
TestQuantizationParams(outputInfo,
@@ -2476,6 +2525,7 @@ BOOST_AUTO_TEST_CASE(QuantizeSlice)
const SliceDescriptor& desc,
const char* name = nullptr)
{
+ boost::ignore_unused(desc, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
const OffsetScalePair qAsymm8Params{ 30.0f / g_Asymm8QuantizationBase, 128 };
@@ -2559,6 +2609,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
+ boost::ignore_unused(id, name);
const TensorInfo& info = layer->GetOutputSlot(0).GetTensorInfo();
BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
BOOST_TEST(m_InputShape == info.GetShape());
@@ -2568,6 +2619,7 @@ public:
LayerBindingId id,
const char* name = nullptr) override
{
+ boost::ignore_unused(id, name);
const TensorInfo& info = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
BOOST_TEST(GetDataTypeName(info.GetDataType()) == GetDataTypeName(m_DataType));
BOOST_TEST(m_OutputShape == info.GetShape());
@@ -2576,12 +2628,14 @@ public:
void VisitQuantizeLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
+ boost::ignore_unused(layer, name);
m_VisitedQuantizeLayer = true;
}
void VisitDequantizeLayer(const IConnectableLayer* layer,
const char* name = nullptr) override
{
+ boost::ignore_unused(layer, name);
m_VisitedDequantizeLayer = true;
}
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index e01ed47740..6a65c6d6d5 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -710,6 +710,7 @@ INetworkPtr Deserializer::CreateNetworkFromGraph(GraphPtr graph)
BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerIndex,
const std::string& name) const
{
+ boost::ignore_unused(layerIndex);
for (auto inputBinding : m_InputBindings)
{
if (inputBinding.first == name)
@@ -727,6 +728,7 @@ BindingPointInfo Deserializer::GetNetworkInputBindingInfo(unsigned int layerInde
BindingPointInfo Deserializer::GetNetworkOutputBindingInfo(unsigned int layerIndex,
const std::string& name) const
{
+ boost::ignore_unused(layerIndex);
for (auto outputBinding : m_OutputBindings)
{
if (outputBinding.first == name)
@@ -1676,6 +1678,7 @@ void Deserializer::ParsePermute(GraphPtr graph, unsigned int layerIndex)
armnn::Pooling2dDescriptor Deserializer::GetPoolingDescriptor(Deserializer::PoolingDescriptor pooling2dDesc,
unsigned int layerIndex)
{
+ boost::ignore_unused(layerIndex);
armnn::Pooling2dDescriptor desc;
switch (pooling2dDesc->poolType())
@@ -2027,6 +2030,7 @@ armnn::NormalizationDescriptor Deserializer::GetNormalizationDescriptor(
Deserializer::NormalizationDescriptorPtr normalizationDescriptor,
unsigned int layerIndex)
{
+ boost::ignore_unused(layerIndex);
armnn::NormalizationDescriptor desc;
switch (normalizationDescriptor->normChannelType())
diff --git a/src/armnnDeserializer/test/DeserializeAdd.cpp b/src/armnnDeserializer/test/DeserializeAdd.cpp
index b053b10efa..be292bc304 100644
--- a/src/armnnDeserializer/test/DeserializeAdd.cpp
+++ b/src/armnnDeserializer/test/DeserializeAdd.cpp
@@ -7,6 +7,8 @@
#include "ParserFlatbuffersSerializeFixture.hpp"
#include "../Deserializer.hpp"
+#include <boost/core/ignore_unused.hpp>
+
#include <string>
#include <iostream>
@@ -20,6 +22,7 @@ struct AddFixture : public ParserFlatbuffersSerializeFixture
const std::string & dataType,
const std::string & activation="NONE")
{
+ boost::ignore_unused(activation);
m_JsonString = R"(
{
inputIds: [0, 1],
diff --git a/src/armnnDeserializer/test/DeserializeMultiplication.cpp b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
index a9dbfbf7da..f784ba6f31 100644
--- a/src/armnnDeserializer/test/DeserializeMultiplication.cpp
+++ b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
@@ -7,6 +7,8 @@
#include "ParserFlatbuffersSerializeFixture.hpp"
#include "../Deserializer.hpp"
+#include <boost/core/ignore_unused.hpp>
+
#include <string>
#include <iostream>
@@ -20,6 +22,7 @@ struct MultiplicationFixture : public ParserFlatbuffersSerializeFixture
const std::string & dataType,
const std::string & activation="NONE")
{
+ boost::ignore_unused(activation);
m_JsonString = R"(
{
inputIds: [0, 1],
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index fef2409074..de7fe5cb5e 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -10,6 +10,7 @@
#include <armnn/IRuntime.hpp>
#include <armnnDeserializer/IDeserializer.hpp>
+#include <boost/core/ignore_unused.hpp>
#include <boost/assert.hpp>
#include <boost/format.hpp>
@@ -154,6 +155,7 @@ struct ParserFlatbuffersSerializeFixture
armnnSerializer::TensorInfo tensorType, const std::string& name,
const float scale, const int64_t zeroPoint)
{
+ boost::ignore_unused(name);
BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(),
tensors->dimensions()->begin(), tensors->dimensions()->end());
diff --git a/src/armnnQuantizer/QuantizationDataSet.cpp b/src/armnnQuantizer/QuantizationDataSet.cpp
index 007cf0a16d..9694342fe8 100644
--- a/src/armnnQuantizer/QuantizationDataSet.cpp
+++ b/src/armnnQuantizer/QuantizationDataSet.cpp
@@ -8,6 +8,7 @@
#define BOOST_FILESYSTEM_NO_DEPRECATED
+#include <boost/core/ignore_unused.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/path.hpp>
@@ -51,6 +52,7 @@ void InputLayerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer,
armnn::LayerBindingId id,
const char* name)
{
+ boost::ignore_unused(name);
m_TensorInfos.emplace(id, layer->GetOutputSlot(0).GetTensorInfo());
}
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 5d06958054..608a9c3480 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -11,6 +11,7 @@
#include <iostream>
+#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <flatbuffers/util.h>
@@ -80,6 +81,8 @@ uint32_t SerializerVisitor::GetSerializedId(armnn::LayerGuid guid)
// Build FlatBuffer for Input Layer
void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferInputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Input);
@@ -100,6 +103,8 @@ void SerializerVisitor::VisitInputLayer(const armnn::IConnectableLayer* layer, L
// Build FlatBuffer for Output Layer
void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer, LayerBindingId id, const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferOutputBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Output);
@@ -118,6 +123,7 @@ void SerializerVisitor::VisitOutputLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitAbsLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Abs);
auto flatBufferAbsLayer = serializer::CreateAbsLayer(m_flatBufferBuilder, flatBufferBaseLayer);
@@ -129,6 +135,8 @@ void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* lay
const armnn::ActivationDescriptor& descriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Activation);
@@ -150,6 +158,8 @@ void SerializerVisitor::VisitActivationLayer(const armnn::IConnectableLayer* lay
// Build FlatBuffer for Addition Layer
void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferAdditionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Addition);
@@ -165,6 +175,8 @@ void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *laye
const armnn::ArgMinMaxDescriptor& descriptor,
const char *name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ArgMinMax);
@@ -186,6 +198,8 @@ void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer*
const armnn::BatchToSpaceNdDescriptor& descriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchToSpaceNd);
@@ -218,6 +232,8 @@ void SerializerVisitor::VisitBatchNormalizationLayer(const armnn::IConnectableLa
const armnn::ConstTensor& gamma,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbBatchNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_BatchNormalization);
auto fbBatchNormalizationDescriptor = serializer::CreateBatchNormalizationDescriptor(
m_flatBufferBuilder,
@@ -243,6 +259,8 @@ void SerializerVisitor::VisitComparisonLayer(const armnn::IConnectableLayer* lay
const armnn::ComparisonDescriptor& descriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Comparison);
auto fbDescriptor = serializer::CreateComparisonDescriptor(
m_flatBufferBuilder,
@@ -257,6 +275,8 @@ void SerializerVisitor::VisitConstantLayer(const armnn::IConnectableLayer* layer
const armnn::ConstTensor& input,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferConstantBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Constant);
@@ -278,6 +298,8 @@ void SerializerVisitor::VisitConvolution2dLayer(const armnn::IConnectableLayer*
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
@@ -315,6 +337,8 @@ void SerializerVisitor::VisitDepthToSpaceLayer(const armnn::IConnectableLayer* l
const armnn::DepthToSpaceDescriptor& descriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthToSpace);
auto fbDescriptor = CreateDepthToSpaceDescriptor(m_flatBufferBuilder,
descriptor.m_BlockSize,
@@ -331,6 +355,8 @@ void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectab
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DepthwiseConvolution2d);
auto fbDescriptor = CreateDepthwiseConvolution2dDescriptor(m_flatBufferBuilder,
descriptor.m_PadLeft,
@@ -363,6 +389,8 @@ void SerializerVisitor::VisitDepthwiseConvolution2dLayer(const armnn::IConnectab
void SerializerVisitor::VisitDequantizeLayer(const armnn::IConnectableLayer* layer,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbDequantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Dequantize);
auto fbDequantizeLayer = serializer::CreateDequantizeLayer(m_flatBufferBuilder, fbDequantizeBaseLayer);
@@ -374,6 +402,8 @@ void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectable
const armnn::ConstTensor& anchors,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_DetectionPostProcess);
auto fbDescriptor = CreateDetectionPostProcessDescriptor(m_flatBufferBuilder,
descriptor.m_MaxDetections,
@@ -400,6 +430,8 @@ void SerializerVisitor::VisitDetectionPostProcessLayer(const armnn::IConnectable
void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbDivisionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Division);
auto fbDivisionLayer = serializer::CreateDivisionLayer(m_flatBufferBuilder, fbDivisionBaseLayer);
@@ -408,6 +440,8 @@ void SerializerVisitor::VisitDivisionLayer(const armnn::IConnectableLayer* layer
void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Equal);
auto fbEqualLayer = serializer::CreateEqualLayer(m_flatBufferBuilder, fbBaseLayer);
@@ -416,6 +450,8 @@ void SerializerVisitor::VisitEqualLayer(const armnn::IConnectableLayer* layer, c
void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, const char *name)
{
+ boost::ignore_unused(name);
+
auto flatBufferFloorBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Floor);
auto flatBufferFloorLayer = serializer::CreateFloorLayer(m_flatBufferBuilder, flatBufferFloorBaseLayer);
@@ -424,6 +460,8 @@ void SerializerVisitor::VisitFloorLayer(const armnn::IConnectableLayer *layer, c
void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbGatherBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Gather);
auto flatBufferLayer = serializer::CreateGatherLayer(m_flatBufferBuilder, fbGatherBaseLayer);
@@ -432,6 +470,8 @@ void SerializerVisitor::VisitGatherLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbGreaterBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Greater);
auto fbGreaterLayer = serializer::CreateGreaterLayer(m_flatBufferBuilder, fbGreaterBaseLayer);
@@ -443,6 +483,8 @@ void SerializerVisitor::VisitInstanceNormalizationLayer(
const armnn::InstanceNormalizationDescriptor& instanceNormalizationDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbDescriptor = serializer::CreateInstanceNormalizationDescriptor(
m_flatBufferBuilder,
instanceNormalizationDescriptor.m_Gamma,
@@ -460,6 +502,8 @@ void SerializerVisitor::VisitL2NormalizationLayer(const armnn::IConnectableLayer
const armnn::L2NormalizationDescriptor& l2NormalizationDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_L2Normalization);
@@ -479,6 +523,8 @@ void SerializerVisitor::VisitLogSoftmaxLayer(const armnn::IConnectableLayer* lay
const armnn::LogSoftmaxDescriptor& logSoftmaxDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferLogSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_LogSoftmax);
@@ -502,6 +548,8 @@ void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer,
const armnn::LstmInputParams& params,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Lstm);
auto fbLstmDescriptor = serializer::CreateLstmDescriptor(
@@ -605,6 +653,8 @@ void SerializerVisitor::VisitLstmLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitMaximumLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbMaximumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Maximum);
auto fbMaximumLayer = serializer::CreateMaximumLayer(m_flatBufferBuilder, fbMaximumBaseLayer);
@@ -615,6 +665,8 @@ void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer,
const armnn::MeanDescriptor& descriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbMeanBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Mean);
auto fbMeanDescriptor = serializer::CreateMeanDescriptor(m_flatBufferBuilder,
m_flatBufferBuilder.CreateVector(descriptor.m_Axis),
@@ -629,6 +681,8 @@ void SerializerVisitor::VisitMeanLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbMinimumBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Minimum);
auto fbMinimumLayer = serializer::CreateMinimumLayer(m_flatBufferBuilder, fbMinimumBaseLayer);
@@ -637,6 +691,8 @@ void SerializerVisitor::VisitMinimumLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitMergeLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbMergeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Merge);
auto fbMergeLayer = serializer::CreateMergeLayer(m_flatBufferBuilder, fbMergeBaseLayer);
@@ -654,6 +710,8 @@ void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer,
const armnn::ConcatDescriptor& concatDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto flatBufferConcatBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Concat);
std::vector<flatbuffers::Offset<UintVector>> views;
@@ -685,6 +743,8 @@ void SerializerVisitor::VisitConcatLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitMultiplicationLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbMultiplicationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Multiplication);
auto fbMultiplicationLayer = serializer::CreateMultiplicationLayer(m_flatBufferBuilder,
fbMultiplicationBaseLayer);
@@ -696,6 +756,8 @@ void SerializerVisitor::VisitPadLayer(const armnn::IConnectableLayer* layer,
const armnn::PadDescriptor& padDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pad);
std::vector<unsigned int> padList;
@@ -720,6 +782,8 @@ void SerializerVisitor::VisitPermuteLayer(const armnn::IConnectableLayer* layer,
const armnn::PermuteDescriptor& permuteDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferPermuteBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Permute);
@@ -746,6 +810,8 @@ void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer,
const armnn::ReshapeDescriptor& reshapeDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferReshapeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Reshape);
@@ -770,6 +836,8 @@ void SerializerVisitor::VisitResizeBilinearLayer(const armnn::IConnectableLayer*
const armnn::ResizeBilinearDescriptor& resizeDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_ResizeBilinear);
auto flatBufferDescriptor =
@@ -789,6 +857,8 @@ void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
const armnn::ResizeDescriptor& resizeDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Resize);
auto flatBufferDescriptor =
@@ -807,6 +877,8 @@ void SerializerVisitor::VisitResizeLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitRsqrtLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbRsqrtBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rsqrt);
auto fbRsqrtLayer = serializer::CreateRsqrtLayer(m_flatBufferBuilder, fbRsqrtBaseLayer);
@@ -817,6 +889,8 @@ void SerializerVisitor::VisitSliceLayer(const armnn::IConnectableLayer* layer,
const armnn::SliceDescriptor& sliceDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbSliceBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Slice);
auto fbSliceDescriptor = CreateSliceDescriptor(m_flatBufferBuilder,
m_flatBufferBuilder.CreateVector(sliceDescriptor.m_Begin),
@@ -832,6 +906,8 @@ void SerializerVisitor::VisitSoftmaxLayer(const armnn::IConnectableLayer* layer,
const armnn::SoftmaxDescriptor& softmaxDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferSoftmaxBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Softmax);
@@ -852,6 +928,8 @@ void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* laye
const armnn::Pooling2dDescriptor& pooling2dDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbPooling2dBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
m_flatBufferBuilder,
@@ -878,6 +956,8 @@ void SerializerVisitor::VisitPooling2dLayer(const armnn::IConnectableLayer* laye
void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferPreluBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Prelu);
@@ -890,6 +970,8 @@ void SerializerVisitor::VisitPreluLayer(const armnn::IConnectableLayer* layer,
void SerializerVisitor::VisitQuantizeLayer(const armnn::IConnectableLayer *layer, const char *name)
{
+ boost::ignore_unused(name);
+
auto fbQuantizeBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Quantize);
auto fbQuantizeLayer = serializer::CreateQuantizeLayer(m_flatBufferBuilder,
fbQuantizeBaseLayer);
@@ -903,6 +985,8 @@ void SerializerVisitor::VisitFullyConnectedLayer(const armnn::IConnectableLayer*
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_FullyConnected);
@@ -938,6 +1022,8 @@ void SerializerVisitor::VisitSpaceToBatchNdLayer(const armnn::IConnectableLayer*
const armnn::SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer BaseLayer
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToBatchNd);
@@ -967,6 +1053,8 @@ void SerializerVisitor::VisitSpaceToDepthLayer(const armnn::IConnectableLayer* l
const armnn::SpaceToDepthDescriptor& spaceToDepthDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_SpaceToDepth);
auto flatBufferDescriptor =
CreateSpaceToDepthDescriptor(m_flatBufferBuilder,
@@ -985,6 +1073,8 @@ void SerializerVisitor::VisitSplitterLayer(const armnn::IConnectableLayer* layer
const armnn::ViewsDescriptor& viewsDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
// Create FlatBuffer ViewOrigins
std::vector<flatbuffers::Offset<UintVector>> flatBufferViewOrigins;
flatBufferViewOrigins.reserve(viewsDescriptor.GetNumViews());
@@ -1049,6 +1139,8 @@ void SerializerVisitor::VisitNormalizationLayer(const armnn::IConnectableLayer*
const armnn::NormalizationDescriptor& descriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbNormalizationBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Normalization);
auto fbNormalizationDescriptor = serializer::CreateNormalizationDescriptor(
@@ -1072,6 +1164,8 @@ void SerializerVisitor::VisitStackLayer(const armnn::IConnectableLayer* layer,
const armnn::StackDescriptor& stackDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto stackBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Stack);
std::vector<unsigned int> inputShape;
@@ -1093,6 +1187,8 @@ void SerializerVisitor::VisitStandInLayer(const armnn::IConnectableLayer *layer,
const armnn::StandInDescriptor& standInDescriptor,
const char *name)
{
+ boost::ignore_unused(name);
+
auto fbDescriptor = serializer::CreateStandInDescriptor(m_flatBufferBuilder,
standInDescriptor.m_NumInputs,
standInDescriptor.m_NumOutputs);
@@ -1107,6 +1203,8 @@ void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* l
const armnn::StridedSliceDescriptor& stridedSliceDescriptor,
const char* name)
{
+ boost::ignore_unused(name);
+
auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_StridedSlice);
auto flatBufferDescriptor =
@@ -1130,6 +1228,8 @@ void SerializerVisitor::VisitStridedSliceLayer(const armnn::IConnectableLayer* l
void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbSubtractionBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Subtraction);
auto fbSubtractionLayer = serializer::CreateSubtractionLayer(m_flatBufferBuilder, fbSubtractionBaseLayer);
@@ -1138,6 +1238,8 @@ void SerializerVisitor::VisitSubtractionLayer(const armnn::IConnectableLayer* la
void SerializerVisitor::VisitSwitchLayer(const armnn::IConnectableLayer* layer, const char* name)
{
+ boost::ignore_unused(name);
+
auto fbSwitchBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Switch);
auto fbSwitchLayer = serializer::CreateSwitchLayer(m_flatBufferBuilder, fbSwitchBaseLayer);
@@ -1151,6 +1253,8 @@ void SerializerVisitor::VisitTransposeConvolution2dLayer(
const armnn::Optional<armnn::ConstTensor>& biases,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
auto fbDescriptor = CreateTransposeConvolution2dDescriptor(m_flatBufferBuilder,
descriptor.m_PadLeft,
@@ -1183,6 +1287,8 @@ void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer*
const armnn::QuantizedLstmInputParams& params,
const char* name)
{
+ boost::ignore_unused(name);
+
auto fbQuantizedLstmBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_QuantizedLstm);
// Get input parameters
@@ -1227,6 +1333,7 @@ void SerializerVisitor::VisitQuantizedLstmLayer(const armnn::IConnectableLayer*
fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
const serializer::LayerType layerType)
{
+
uint32_t fbIndex = GetSerializedId(layer->GetGuid());
std::vector<fb::Offset<serializer::InputSlot>> inputSlots = CreateInputSlots(layer);
@@ -1242,6 +1349,7 @@ fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConn
void SerializerVisitor::CreateAnyLayer(const flatbuffers::Offset<void>& layer, const serializer::Layer serializerLayer)
{
+
auto anyLayer = armnnSerializer::CreateAnyLayer(m_flatBufferBuilder, serializerLayer, layer);
m_serializedLayers.push_back(anyLayer);
}
diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp
index 2a46045f59..34e99f66db 100644
--- a/src/armnnSerializer/test/ActivationSerializationTests.cpp
+++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp
@@ -8,6 +8,8 @@
#include <armnn/INetwork.hpp>
#include "../Serializer.hpp"
#include <sstream>
+
+#include <boost/core/ignore_unused.hpp>
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(SerializerTests)
@@ -19,6 +21,7 @@ public:
const armnn::ActivationDescriptor& activationDescriptor,
const char* name) override
{
+ boost::ignore_unused(layer, activationDescriptor);
BOOST_TEST(name == "activation");
}
};
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 4260669124..8dfca3c52e 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -56,7 +56,7 @@ public: \
struct DefaultLayerVerifierPolicy
{
- static void Apply(const std::string s = "")
+ static void Apply(const std::string)
{
BOOST_TEST_MESSAGE("Unexpected layer found in network");
BOOST_TEST(false);
@@ -75,7 +75,7 @@ public:
void VisitInputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId, const char*) override {}
- void VisitOutputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId id, const char*) override {}
+ void VisitOutputLayer(const armnn::IConnectableLayer*, armnn::LayerBindingId, const char*) override {}
protected:
void VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name)
@@ -521,7 +521,7 @@ BOOST_AUTO_TEST_CASE(SerializeConstant)
CompareConstTensor(input, m_LayerInput);
}
- void VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override {}
+ void VisitAdditionLayer(const armnn::IConnectableLayer*, const char*) override {}
private:
armnn::ConstTensor m_LayerInput;
@@ -927,7 +927,7 @@ public:
BOOST_CHECK(descriptor.m_Operation == armnn::ComparisonOperation::Equal);
}
- void VisitEqualLayer(const armnn::IConnectableLayer* layer, const char* name) override
+ void VisitEqualLayer(const armnn::IConnectableLayer*, const char*) override
{
throw armnn::Exception("EqualLayer should have translated to ComparisonLayer");
}
@@ -1146,9 +1146,9 @@ BOOST_AUTO_TEST_CASE(SerializeGather)
VerifyNameAndConnections(layer, name);
}
- void VisitConstantLayer(const armnn::IConnectableLayer* layer,
- const armnn::ConstTensor& input,
- const char *name) override {}
+ void VisitConstantLayer(const armnn::IConnectableLayer*,
+ const armnn::ConstTensor&,
+ const char*) override {}
};
const std::string layerName("gather");
@@ -1201,7 +1201,7 @@ public:
BOOST_CHECK(descriptor.m_Operation == armnn::ComparisonOperation::Greater);
}
- void VisitGreaterLayer(const armnn::IConnectableLayer* layer, const char* name) override
+ void VisitGreaterLayer(const armnn::IConnectableLayer*, const char*) override
{
throw armnn::Exception("GreaterLayer should have translated to ComparisonLayer");
}
@@ -1543,9 +1543,9 @@ public:
const armnn::OriginsDescriptor& descriptor)
: LayerVerifierBaseWithDescriptor<armnn::OriginsDescriptor>(layerName, inputInfos, outputInfos, descriptor) {}
- void VisitMergerLayer(const armnn::IConnectableLayer* layer,
- const armnn::OriginsDescriptor& descriptor,
- const char* name) override
+ void VisitMergerLayer(const armnn::IConnectableLayer*,
+ const armnn::OriginsDescriptor&,
+ const char*) override
{
throw armnn::Exception("MergerLayer should have translated to ConcatLayer");
}
@@ -2514,9 +2514,9 @@ BOOST_AUTO_TEST_CASE(SerializeSwitch)
VerifyNameAndConnections(layer, name);
}
- void VisitConstantLayer(const armnn::IConnectableLayer* layer,
- const armnn::ConstTensor& input,
- const char *name) override {}
+ void VisitConstantLayer(const armnn::IConnectableLayer*,
+ const armnn::ConstTensor&,
+ const char*) override {}
};
const std::string layerName("switch");
@@ -2658,7 +2658,7 @@ BOOST_AUTO_TEST_CASE(SerializeDeserializeNonLinearNetwork)
CompareConstTensor(input, m_LayerInput);
}
- void VisitAdditionLayer(const armnn::IConnectableLayer* layer, const char* name = nullptr) override {}
+ void VisitAdditionLayer(const armnn::IConnectableLayer*, const char*) override {}
private:
armnn::ConstTensor m_LayerInput;