aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDerek Lamberti <derek.lamberti@arm.com>2019-12-10 21:20:10 +0000
committerFrancis Murtagh <francis.murtagh@arm.com>2019-12-30 15:03:02 +0000
commiteb1fce0f5b13b479b13a9d265ebdc8792676b163 (patch)
tree450dcfe789f230218d3f8f334a8248e81b4d82fc
parent12575ec6e9a936292ee1915b078b0bf0015c4438 (diff)
downloadarmnn-eb1fce0f5b13b479b13a9d265ebdc8792676b163.tar.gz
IVGCVSW-4246 Clean build end-to-end tests with -Wextra
Change-Id: Ia25f919e45a210e1e2d5d50b0c9098bf01d88013 Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
-rw-r--r--tests/InferenceTest.hpp13
-rw-r--r--tests/InferenceTest.inl1
-rw-r--r--tests/MobileNetSsdInferenceTest.hpp2
-rw-r--r--tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp1
-rw-r--r--tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp2
-rw-r--r--tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp2
-rw-r--r--tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp2
-rw-r--r--tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp2
-rw-r--r--tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp2
-rw-r--r--tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp2
-rw-r--r--tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp2
-rw-r--r--tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp2
-rw-r--r--tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp2
-rw-r--r--tests/YoloInferenceTest.hpp2
14 files changed, 25 insertions, 12 deletions
diff --git a/tests/InferenceTest.hpp b/tests/InferenceTest.hpp
index 7b7dcecea0..6423d1c7ff 100644
--- a/tests/InferenceTest.hpp
+++ b/tests/InferenceTest.hpp
@@ -9,7 +9,7 @@
#include <armnn/TypesUtils.hpp>
#include "InferenceModel.hpp"
-
+#include <boost/core/ignore_unused.hpp>
#include <boost/program_options.hpp>
@@ -91,8 +91,15 @@ class IInferenceTestCaseProvider
public:
virtual ~IInferenceTestCaseProvider() {}
- virtual void AddCommandLineOptions(boost::program_options::options_description& options) {};
- virtual bool ProcessCommandLineOptions(const InferenceTestOptions &commonOptions) { return true; };
+ virtual void AddCommandLineOptions(boost::program_options::options_description& options)
+ {
+ boost::ignore_unused(options);
+ };
+ virtual bool ProcessCommandLineOptions(const InferenceTestOptions &commonOptions)
+ {
+ boost::ignore_unused(commonOptions);
+ return true;
+ };
virtual std::unique_ptr<IInferenceTestCase> GetTestCase(unsigned int testCaseId) = 0;
virtual bool OnInferenceTestFinished() { return true; };
};
diff --git a/tests/InferenceTest.inl b/tests/InferenceTest.inl
index fd888e2137..c05e70d9f7 100644
--- a/tests/InferenceTest.inl
+++ b/tests/InferenceTest.inl
@@ -79,6 +79,7 @@ struct ClassifierResultProcessor : public boost::static_visitor<>
void operator()(const std::vector<int>& values)
{
+ boost::ignore_unused(values);
BOOST_ASSERT_MSG(false, "Non-float predictions output not supported.");
}
diff --git a/tests/MobileNetSsdInferenceTest.hpp b/tests/MobileNetSsdInferenceTest.hpp
index a950b93c1c..c99844b6bb 100644
--- a/tests/MobileNetSsdInferenceTest.hpp
+++ b/tests/MobileNetSsdInferenceTest.hpp
@@ -33,6 +33,8 @@ public:
TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
+ boost::ignore_unused(options);
+
const std::vector<float>& output1 = boost::get<std::vector<float>>(this->GetOutputs()[0]); // bounding boxes
BOOST_ASSERT(output1.size() == k_OutputSize1);
diff --git a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
index 279bf30e83..f9e9b146d4 100644
--- a/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
+++ b/tests/NetworkExecutionUtils/NetworkExecutionUtils.hpp
@@ -732,6 +732,7 @@ int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IR
const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
const bool printIntermediate, bool enableLayerDetails = false, bool parseUnuspported = false)
{
+ boost::ignore_unused(runtime);
std::string modelFormat;
std::string modelPath;
std::string inputNames;
diff --git a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
index bf5a865ac8..f4b395598e 100644
--- a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
+++ b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"output", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
index b0af830ba7..169ecb064b 100644
--- a/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
+++ b/tests/TfLiteInceptionV4Quantized-Armnn/TfLiteInceptionV4Quantized-Armnn.cpp
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"InceptionV4/Logits/Predictions", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
index 4cf16d78cc..4194d4b30a 100644
--- a/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
+++ b/tests/TfLiteMnasNet-Armnn/TfLiteMnasNet-Armnn.cpp
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"output", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
return DatabaseType(
dataDir,
224,
diff --git a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
index f193a9870a..f497de5660 100644
--- a/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
+++ b/tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
@@ -39,7 +39,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"MobilenetV1/Predictions/Reshape_1", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
index 1b411f9b94..b2d3f0f3f5 100644
--- a/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
+++ b/tests/TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
@@ -105,7 +105,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"MobilenetV1/Predictions/Reshape_1", // output tensor name
indices, // vector of indices to select which images to validate
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
index 9bc1034fea..b8def4fbb4 100644
--- a/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
+++ b/tests/TfLiteMobilenetV2Quantized-Armnn/TfLiteMobilenetV2Quantized-Armnn.cpp
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"output", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
index 98235e3f8a..7446809e3a 100644
--- a/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
+++ b/tests/TfLiteResNetV2-50-Quantized-Armnn/TfLiteResNetV2-50-Quantized-Armnn.cpp
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"resnet_v2_50/predictions/Reshape_1", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
index 1e2ffbf568..107660ef1a 100644
--- a/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
+++ b/tests/TfLiteResNetV2-Armnn/TfLiteResNetV2-Armnn.cpp
@@ -37,7 +37,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"output", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
return DatabaseType(
dataDir,
299,
diff --git a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
index 030f01cce4..8da553f0c2 100644
--- a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
+++ b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
@@ -39,7 +39,7 @@ int main(int argc, char* argv[])
"input", // input tensor name
"vgg_16/fc8/squeezed", // output tensor name
{ 0, 1, 2 }, // test images to test with as above
- [&imageSet](const char* dataDir, const ModelType & model) {
+ [&imageSet](const char* dataDir, const ModelType &) {
// we need to get the input quantization parameters from
// the parsed model
return DatabaseType(
diff --git a/tests/YoloInferenceTest.hpp b/tests/YoloInferenceTest.hpp
index 91ea97771c..16d0355d9d 100644
--- a/tests/YoloInferenceTest.hpp
+++ b/tests/YoloInferenceTest.hpp
@@ -32,6 +32,8 @@ public:
virtual TestCaseResult ProcessResult(const InferenceTestOptions& options) override
{
+ boost::ignore_unused(options);
+
using Boost3dArray = boost::multi_array<float, 3>;
const std::vector<float>& output = boost::get<std::vector<float>>(this->GetOutputs()[0]);