aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorMatteo Martincigh <matteo.martincigh@arm.com>2019-01-11 13:25:59 +0000
committerMatteo Martincigh <matteo.martincigh@arm.com>2019-01-15 08:59:50 +0000
commit4912402497a51c6afe0898b3900f87feefa006a6 (patch)
tree4e9b5161781d2b0be041aec17227193da5977443 /tests
parentd0a1608e2c41639d8f3e3f9305d79c5f92c9cff8 (diff)
downloadarmnn-4912402497a51c6afe0898b3900f87feefa006a6.tar.gz
IVGCVSW-2454 Merge together the pluggable backends work (was in a
separate branch) and master * Brings in all the changes done for the pluggable backends * Added sub-graph support and tests * Added precompiled layer support and tests * Moved BackendSettings to a separate file * Removed the backend-specific code * Ported DebugLayer and associated functionality * Included fixes to make those changes work with master Change-Id: Id7028fa7917527b844628d5aff5732e3d94c0488
Diffstat (limited to 'tests')
-rw-r--r--tests/ExecuteNetwork/ExecuteNetwork.cpp4
-rw-r--r--tests/InferenceTest.cpp2
-rw-r--r--tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp14
3 files changed, 9 insertions, 11 deletions
diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp
index cfddc38a99..7d6aafcfbf 100644
--- a/tests/ExecuteNetwork/ExecuteNetwork.cpp
+++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp
@@ -463,9 +463,9 @@ int main(int argc, const char* argv[])
"as they are expected to be defined in the file for each test in particular.")
("concurrent,n", po::bool_switch()->default_value(false),
"Whether or not the test cases should be executed in parallel")
- ("model-format,f", po::value(&modelFormat),
+ ("model-format,f", po::value(&modelFormat)->required(),
"caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or tensorflow-text.")
- ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt,"
+ ("model-path,m", po::value(&modelPath)->required(), "Path to model file, e.g. .caffemodel, .prototxt,"
" .tflite, .onnx")
("compute,c", po::value<std::vector<std::string>>()->multitoken(),
backendsMessage.c_str())
diff --git a/tests/InferenceTest.cpp b/tests/InferenceTest.cpp
index 8733bc53a0..7413de97dd 100644
--- a/tests/InferenceTest.cpp
+++ b/tests/InferenceTest.cpp
@@ -34,8 +34,6 @@ bool ParseCommandLine(int argc, char** argv, IInferenceTestCaseProvider& testCas
{
namespace po = boost::program_options;
- std::string computeDeviceStr;
-
po::options_description desc("Options");
try
diff --git a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
index e23dbdc9d4..84d5292195 100644
--- a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
+++ b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp
@@ -23,7 +23,7 @@ int main(int argc, char* argv[])
{"shark.jpg", 669},
};
- armnn::TensorShape inputTensorShape({ 2, 224, 224, 3 });
+ armnn::TensorShape inputTensorShape({ 1, 224, 224, 3 });
using DataType = uint8_t;
using DatabaseType = ImagePreprocessor<DataType>;
@@ -34,11 +34,11 @@ int main(int argc, char* argv[])
retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType,
ParserType>(
argc, argv,
- "vgg_16_u8.tflite", // model name
- true, // model is binary
- "content_vgg/concat", // input tensor name
- "content_vgg/prob", // output tensor name
- { 0, 1, 2 }, // test images to test with as above
+ "vgg_16_u8_batch1.tflite", // model name
+ true, // model is binary
+ "content_vgg/concat", // input tensor name
+ "content_vgg/prob", // output tensor name
+ { 0, 1, 2 }, // test images to test with as above
[&imageSet](const char* dataDir, const ModelType & model) {
// we need to get the input quantization parameters from
// the parsed model
@@ -53,7 +53,7 @@ int main(int argc, char* argv[])
{{0, 0, 0}},
{{1, 1, 1}},
DatabaseType::DataFormat::NCHW,
- 2);
+ 1);
},
&inputTensorShape);
}