From 4912402497a51c6afe0898b3900f87feefa006a6 Mon Sep 17 00:00:00 2001 From: Matteo Martincigh Date: Fri, 11 Jan 2019 13:25:59 +0000 Subject: IVGCVSW-2454 Merge together the pluggable backends work (was in a separate branch) and master * Brings in all the changes done for the pluggable backends * Added sub-graph support and tests * Added precompiled layer support and tests * Moved BackendSettings to a separate file * Removed the backend-specific code * Ported DebugLayer and associated functionality * Included fixes to make those changes work with master Change-Id: Id7028fa7917527b844628d5aff5732e3d94c0488 --- tests/ExecuteNetwork/ExecuteNetwork.cpp | 4 ++-- tests/InferenceTest.cpp | 2 -- .../TfLiteVGG16Quantized-Armnn.cpp | 14 +++++++------- 3 files changed, 9 insertions(+), 11 deletions(-) (limited to 'tests') diff --git a/tests/ExecuteNetwork/ExecuteNetwork.cpp b/tests/ExecuteNetwork/ExecuteNetwork.cpp index cfddc38a99..7d6aafcfbf 100644 --- a/tests/ExecuteNetwork/ExecuteNetwork.cpp +++ b/tests/ExecuteNetwork/ExecuteNetwork.cpp @@ -463,9 +463,9 @@ int main(int argc, const char* argv[]) "as they are expected to be defined in the file for each test in particular.") ("concurrent,n", po::bool_switch()->default_value(false), "Whether or not the test cases should be executed in parallel") - ("model-format,f", po::value(&modelFormat), + ("model-format,f", po::value(&modelFormat)->required(), "caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or tensorflow-text.") - ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .caffemodel, .prototxt," + ("model-path,m", po::value(&modelPath)->required(), "Path to model file, e.g. .caffemodel, .prototxt," " .tflite, .onnx") ("compute,c", po::value>()->multitoken(), backendsMessage.c_str()) diff --git a/tests/InferenceTest.cpp b/tests/InferenceTest.cpp index 8733bc53a0..7413de97dd 100644 --- a/tests/InferenceTest.cpp +++ b/tests/InferenceTest.cpp @@ -34,8 +34,6 @@ bool ParseCommandLine(int argc, char** argv, IInferenceTestCaseProvider& testCas { namespace po = boost::program_options; - std::string computeDeviceStr; - po::options_description desc("Options"); try diff --git a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp index e23dbdc9d4..84d5292195 100644 --- a/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp +++ b/tests/TfLiteVGG16Quantized-Armnn/TfLiteVGG16Quantized-Armnn.cpp @@ -23,7 +23,7 @@ int main(int argc, char* argv[]) {"shark.jpg", 669}, }; - armnn::TensorShape inputTensorShape({ 2, 224, 224, 3 }); + armnn::TensorShape inputTensorShape({ 1, 224, 224, 3 }); using DataType = uint8_t; using DatabaseType = ImagePreprocessor; @@ -34,11 +34,11 @@ int main(int argc, char* argv[]) retVal = armnn::test::ClassifierInferenceTestMain( argc, argv, - "vgg_16_u8.tflite", // model name - true, // model is binary - "content_vgg/concat", // input tensor name - "content_vgg/prob", // output tensor name - { 0, 1, 2 }, // test images to test with as above + "vgg_16_u8_batch1.tflite", // model name + true, // model is binary + "content_vgg/concat", // input tensor name + "content_vgg/prob", // output tensor name + { 0, 1, 2 }, // test images to test with as above [&imageSet](const char* dataDir, const ModelType & model) { // we need to get the input quantization parameters from // the parsed model @@ -53,7 +53,7 @@ int main(int argc, char* argv[]) {{0, 0, 0}}, {{1, 1, 1}}, DatabaseType::DataFormat::NCHW, - 2); + 1); }, &inputTensorShape); } -- cgit v1.2.1