aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-12-10 22:20:54 +0000
committerDerek Lamberti <derek.lamberti@arm.com>2020-01-07 16:13:46 +0000
commitba25aeecc1c9728eeb6246d686be3cce2df3a0e0 (patch)
treec7c8489770845e489c6cf93752790c492ce5cffd
parentbaa177f0d465fe1d4f9e1979e1611ff6b1f128e0 (diff)
downloadarmnn-ba25aeecc1c9728eeb6246d686be3cce2df3a0e0.tar.gz
IVGCVSW-4246 Enable -Wextra by default
!referencetests:218340 Change-Id: If24a604310d0363b1f09b406e4d53ebfeb106aad Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r--CMakeLists.txt2
-rw-r--r--cmake/GlobalConfig.cmake2
-rw-r--r--src/armnn/CompatibleTypes.hpp2
-rw-r--r--src/armnn/LayerSupportCommon.hpp8
-rw-r--r--src/armnn/test/CreateWorkload.hpp3
-rw-r--r--src/armnnTfParser/test/Split.cpp1
-rw-r--r--src/armnnUtils/TensorUtils.cpp2
-rw-r--r--src/backends/cl/ClWorkloadFactory.cpp7
-rw-r--r--src/backends/cl/ClWorkloadFactory.hpp3
-rw-r--r--src/backends/neon/NeonWorkloadFactory.cpp13
-rw-r--r--src/backends/neon/NeonWorkloadFactory.hpp6
-rw-r--r--src/backends/reference/RefMemoryManager.cpp1
-rw-r--r--tests/DeepSpeechV1InferenceTest.hpp2
13 files changed, 17 insertions, 35 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index d0f3d7aebe..d268983a82 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -133,7 +133,7 @@ if(BUILD_TF_PARSER)
)
# The generated tensorflow protobuf .cc files are not warning clean and we can't fix them.
if(COMPILER_IS_GNU_LIKE)
- set_source_files_properties(${TF_PROTOBUFS} PROPERTIES COMPILE_FLAGS "-Wno-conversion -Wno-sign-conversion")
+ set_source_files_properties(${TF_PROTOBUFS} PROPERTIES COMPILE_FLAGS "-Wno-unused-variable -Wno-unused-parameter -Wno-conversion -Wno-sign-conversion")
endif()
add_library_ex(armnnTfParser SHARED ${armnn_tf_parser_sources})
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index 4a2c026f1b..ccf0eccd29 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -55,7 +55,7 @@ endif()
# Compiler flags that are always set
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
if(COMPILER_IS_GNU_LIKE)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL MSVC)
# Disable C4996 (use of deprecated identifier) due to https://developercommunity.visualstudio.com/content/problem/252574/deprecated-compilation-warning-for-virtual-overrid.html
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /MP /wd4996")
diff --git a/src/armnn/CompatibleTypes.hpp b/src/armnn/CompatibleTypes.hpp
index fd33f6c37a..cc545a9642 100644
--- a/src/armnn/CompatibleTypes.hpp
+++ b/src/armnn/CompatibleTypes.hpp
@@ -12,7 +12,7 @@ namespace armnn
{
template<typename T>
-bool CompatibleTypes(DataType dataType)
+bool CompatibleTypes(DataType)
{
return false;
}
diff --git a/src/armnn/LayerSupportCommon.hpp b/src/armnn/LayerSupportCommon.hpp
index 8fca3d49d1..557e72a323 100644
--- a/src/armnn/LayerSupportCommon.hpp
+++ b/src/armnn/LayerSupportCommon.hpp
@@ -70,6 +70,7 @@ bool FalseFunc(Optional<std::string&> reasonIfUnsupported, Params&&... params)
template<typename ... Params>
bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type");
return false;
}
@@ -77,6 +78,7 @@ bool FalseFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type");
return false;
}
@@ -84,6 +86,7 @@ bool FalseFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with 8-bit data type");
return false;
}
@@ -91,6 +94,7 @@ bool FalseFuncU8(Optional<std::string&> reasonIfUnsupported, Params&&... params)
template<typename ... Params>
bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with int32 data type");
return false;
}
@@ -98,6 +102,7 @@ bool FalseFuncI32(Optional<std::string&> reasonIfUnsupported, Params&&... params
template<typename ... Params>
bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type input");
return false;
}
@@ -105,6 +110,7 @@ bool FalseInputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... p
template<typename ... Params>
bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type input");
return false;
}
@@ -112,6 +118,7 @@ bool FalseInputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... p
template<typename ... Params>
bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float32 data type output");
return false;
}
@@ -119,6 +126,7 @@ bool FalseOutputFuncF32(Optional<std::string&> reasonIfUnsupported, Params&&...
template<typename ... Params>
bool FalseOutputFuncF16(Optional<std::string&> reasonIfUnsupported, Params&&... params)
{
+ boost::ignore_unused(params...);
SetValueChecked(reasonIfUnsupported, "Layer is not supported with float16 data type output");
return false;
}
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 4e7967bf40..f6928f858f 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -18,6 +18,7 @@
#include <boost/test/unit_test.hpp>
#include <boost/cast.hpp>
+#include <boost/core/ignore_unused.hpp>
#include <utility>
@@ -1324,6 +1325,8 @@ std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> Cre
armnn::Graph& graph,
bool biasEnabled = false)
{
+ boost::ignore_unused(graph);
+
// To create a PreCompiled layer, create a network and Optimize it.
armnn::Network net;
diff --git a/src/armnnTfParser/test/Split.cpp b/src/armnnTfParser/test/Split.cpp
index 10ff04df89..d53ae672eb 100644
--- a/src/armnnTfParser/test/Split.cpp
+++ b/src/armnnTfParser/test/Split.cpp
@@ -176,6 +176,7 @@ BOOST_FIXTURE_TEST_CASE(ParseSplit, InputFirstSplitFixture)
struct SplitLastDimFixture : public armnnUtils::ParserPrototxtFixture<armnnTfParser::ITfParser>
{
SplitLastDimFixture(bool withDimZero=false) {
+ boost::ignore_unused(withDimZero);
m_Prototext = R"(
node {
name: "Placeholder"
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index 84fc8db053..535d68adbe 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -114,7 +114,6 @@ unsigned int GetNumElementsBetween(const TensorShape& shape,
const unsigned int firstAxisInclusive,
const unsigned int lastAxisExclusive)
{
- BOOST_ASSERT(0 <= firstAxisInclusive);
BOOST_ASSERT(firstAxisInclusive <= lastAxisExclusive);
BOOST_ASSERT(lastAxisExclusive <= shape.GetNumDimensions());
unsigned int count = 1;
@@ -141,7 +140,6 @@ unsigned int GetUnsignedAxis(const unsigned int inputDimension, const int axis)
unsigned int GetNumElementsAfter(const armnn::TensorShape& shape, unsigned int axis)
{
unsigned int numDim = shape.GetNumDimensions();
- BOOST_ASSERT(0 >= axis);
BOOST_ASSERT(axis <= numDim - 1);
unsigned int count = 1;
for (unsigned int i = axis; i < numDim; i++)
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index 4746167795..f9e6632b0c 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -260,13 +260,6 @@ std::unique_ptr<IWorkload> ClWorkloadFactory::CreateEqual(const EqualQueueDescri
return CreateComparison(comparisonDescriptor, info);
}
-std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFakeQuantization(
- const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
-}
-
std::unique_ptr<IWorkload> ClWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index 8c94818db2..8f377e959d 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -95,9 +95,6 @@ public:
std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 8d798ec864..1cc9e50e0b 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -225,13 +225,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateEqual(const EqualQueueDesc
return CreateComparison(comparisonDescriptor, info);
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFakeQuantization(
- const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return nullptr;
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
@@ -441,12 +434,6 @@ std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSoftmax(const SoftmaxQueue
descriptor, info, m_MemoryManager->GetIntraLayerManager());
}
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const
-{
- return nullptr;
-}
-
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateSpaceToDepth(
const armnn::SpaceToDepthQueueDescriptor& descriptor, const armnn::WorkloadInfo& info) const
{
diff --git a/src/backends/neon/NeonWorkloadFactory.hpp b/src/backends/neon/NeonWorkloadFactory.hpp
index 6bdc237fff..b76a3a340a 100644
--- a/src/backends/neon/NeonWorkloadFactory.hpp
+++ b/src/backends/neon/NeonWorkloadFactory.hpp
@@ -97,9 +97,6 @@ public:
std::unique_ptr<IWorkload> CreateEqual(const EqualQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateFakeQuantization(const FakeQuantizationQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateFloor(const FloorQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
@@ -193,9 +190,6 @@ public:
std::unique_ptr<IWorkload> CreateSoftmax(const SoftmaxQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
- std::unique_ptr<IWorkload> CreateSpaceToBatchNd(const SpaceToBatchNdQueueDescriptor& descriptor,
- const WorkloadInfo& info) const override;
-
std::unique_ptr<IWorkload> CreateSpaceToDepth(const SpaceToDepthQueueDescriptor& descriptor,
const WorkloadInfo& info) const override;
diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp
index fdd008dbe6..4f15e39ee1 100644
--- a/src/backends/reference/RefMemoryManager.cpp
+++ b/src/backends/reference/RefMemoryManager.cpp
@@ -88,7 +88,6 @@ void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
void RefMemoryManager::Pool::Acquire()
{
BOOST_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
- BOOST_ASSERT(m_Size >= 0);
m_Pointer = ::operator new(size_t(m_Size));
}
diff --git a/tests/DeepSpeechV1InferenceTest.hpp b/tests/DeepSpeechV1InferenceTest.hpp
index c46fa5799f..ac28bbbcd4 100644
--- a/tests/DeepSpeechV1InferenceTest.hpp
+++ b/tests/DeepSpeechV1InferenceTest.hpp
@@ -8,6 +8,7 @@
#include "DeepSpeechV1Database.hpp"
#include <boost/assert.hpp>
+#include <boost/core/ignore_unused.hpp>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/test/tools/floating_point_comparison.hpp>
@@ -36,6 +37,7 @@ public:
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
+ boost::ignore_unused(options);
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // logits
BOOST_ASSERT(output1.size() == k_OutputSize1);