aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorBruno Goncalves <bruno.slackware@gmail.com>2018-12-27 16:15:01 -0200
committerMatthew Bentham <matthew.bentham@arm.com>2019-02-01 17:37:05 +0000
commitc981df3bb24df1f98c233d885e73a2ea5c6d3449 (patch)
tree724adc3cabb1c70ea80acf06d80967e5f27a1dd6 /tests
parent06304114286efd0773e0a6d702f94f322feca5e4 (diff)
downloadarmnn-c981df3bb24df1f98c233d885e73a2ea5c6d3449.tar.gz
Added TfLiteParser test for InceptionV3Quantized
Change-Id: I6ed434b798c617f85ca1faa7c5eef4cdabff4a07
Diffstat (limited to 'tests')
-rw-r--r--tests/CMakeLists.txt6
-rw-r--r--tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp63
2 files changed, 69 insertions, 0 deletions
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 90fd5ec783..b8aa4fc16c 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -182,6 +182,12 @@ if (BUILD_TF_LITE_PARSER)
ImagePreprocessor.hpp
ImagePreprocessor.cpp)
TfLiteParserTest(TfLiteVGG16Quantized-Armnn "${TfLiteVGG16Quantized-Armnn_sources}")
+
+ set(TfLiteInceptionV3Quantized-Armnn_sources
+ TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
+ ImagePreprocessor.hpp
+ ImagePreprocessor.cpp)
+ TfLiteParserTest(TfLiteInceptionV3Quantized-Armnn "${TfLiteInceptionV3Quantized-Armnn_sources}")
endif()
if (BUILD_ONNX_PARSER)
diff --git a/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
new file mode 100644
index 0000000000..4fa0e140f1
--- /dev/null
+++ b/tests/TfLiteInceptionV3Quantized-Armnn/TfLiteInceptionV3Quantized-Armnn.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "../InferenceTest.hpp"
+#include "../ImagePreprocessor.hpp"
+#include "armnnTfLiteParser/ITfLiteParser.hpp"
+
+using namespace armnnTfLiteParser;
+
+int main(int argc, char* argv[])
+{
+ int retVal = EXIT_FAILURE;
+ try
+ {
+ std::vector<ImageSet> imageSet =
+ {
+ {"Dog.jpg", 209},
+ {"Cat.jpg", 283},
+ {"shark.jpg", 3},
+
+ };
+
+ armnn::TensorShape inputTensorShape({ 1, 299, 299, 3 });
+
+ using DataType = uint8_t;
+ using DatabaseType = ImagePreprocessor<DataType>;
+ using ParserType = armnnTfLiteParser::ITfLiteParser;
+ using ModelType = InferenceModel<ParserType, DataType>;
+
+ // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
+ retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType,
+ ParserType>(
+ argc, argv,
+ "inception_v3_quant.tflite", // model name
+ true, // model is binary
+ "input", // input tensor name
+ "output", // output tensor name
+ { 0, 1, 2 }, // test images to test with as above
+ [&imageSet](const char* dataDir, const ModelType & model) {
+ // we need to get the input quantization parameters from
+ // the parsed model
+ auto inputBinding = model.GetInputBindingInfo();
+ return DatabaseType(
+ dataDir,
+ 299,
+ 299,
+ imageSet,
+ inputBinding.second.GetQuantizationScale(),
+ inputBinding.second.GetQuantizationOffset());
+ },
+ &inputTensorShape);
+ }
+ catch (const std::exception& e)
+ {
+ // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
+ // exception of type std::length_error.
+ // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
+ std::cerr << "WARNING: " << *argv << ": An error has occurred when running "
+ "the classifier inference tests: " << e.what() << std::endl;
+ }
+ return retVal;
+}