From 6940dd720ebb6b3d1df8ca203ab696daefe58189 Mon Sep 17 00:00:00 2001 From: Jim Flynn Date: Fri, 20 Mar 2020 12:25:56 +0000 Subject: renamed Documentation folder 20.02 and added .nojekyll file Signed-off-by: Jim Flynn --- ..._net_quantized_softmax-_armnn_8cpp_source.xhtml | 122 +++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 20.02/_tf_lite_mobile_net_quantized_softmax-_armnn_8cpp_source.xhtml (limited to '20.02/_tf_lite_mobile_net_quantized_softmax-_armnn_8cpp_source.xhtml') diff --git a/20.02/_tf_lite_mobile_net_quantized_softmax-_armnn_8cpp_source.xhtml b/20.02/_tf_lite_mobile_net_quantized_softmax-_armnn_8cpp_source.xhtml new file mode 100644 index 0000000000..2937fc6865 --- /dev/null +++ b/20.02/_tf_lite_mobile_net_quantized_softmax-_armnn_8cpp_source.xhtml @@ -0,0 +1,122 @@ + + + + + + + + + + + + + +ArmNN: tests/TfLiteMobileNetQuantizedSoftmax-Armnn/TfLiteMobileNetQuantizedSoftmax-Armnn.cpp Source File + + + + + + + + + + + + + + + + +
+
+ + + + ArmNN + + + +
+
+  20.02 +
+
+
+ + + + + + + +
+
+ +
+
+
+ +
+ +
+
+ + +
+ +
+ +
+
+
TfLiteMobileNetQuantizedSoftmax-Armnn.cpp
+
+
+Go to the documentation of this file.
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "../InferenceTest.hpp"
6 #include "../ImagePreprocessor.hpp"
8 
9 using namespace armnnTfLiteParser;
10 
11 int main(int argc, char* argv[])
12 {
13  int retVal = EXIT_FAILURE;
14  try
15  {
16  // Coverity fix: The following code may throw an exception of type std::length_error.
17  // The model we are using incorrectly classifies the images
18  // But can still be used for benchmarking the layers.
19  std::vector<ImageSet> imageSet =
20  {
21  {"Dog.jpg", 789},
22  {"Cat.jpg", 592},
23  {"shark.jpg", 755},
24  };
25 
26  armnn::TensorShape inputTensorShape({ 1, 128, 128, 3 });
27 
28  using DataType = uint8_t;
29  using DatabaseType = ImagePreprocessor<DataType>;
30  using ParserType = armnnTfLiteParser::ITfLiteParser;
31  using ModelType = InferenceModel<ParserType, DataType>;
32 
33  // Coverity fix: ClassifierInferenceTestMain() may throw uncaught exceptions.
34  retVal = armnn::test::ClassifierInferenceTestMain<DatabaseType,
35  ParserType>(
36  argc, argv,
37  "mobilenet_v1_0.25_128_quant.tflite", // model name
38  true, // model is binary
39  "input", // input tensor name
40  "MobilenetV1/Predictions/Reshape_1", // output tensor name
41  { 0, 1, 2 }, // test images to test with as above
42  [&imageSet](const char* dataDir, const ModelType &) {
43  // we need to get the input quantization parameters from
44  // the parsed model
45  return DatabaseType(
46  dataDir,
47  128,
48  128,
49  imageSet,
50  1,
51  {{0, 0, 0}},
52  {{1, 1, 1}},
53  DatabaseType::DataFormat::NCHW,
54  1);
55  },
56  &inputTensorShape);
57  }
58  catch (const std::exception& e)
59  {
60  // Coverity fix: BOOST_LOG_TRIVIAL (typically used to report errors) may throw an
61  // exception of type std::length_error.
62  // Using stderr instead in this context as there is no point in nesting try-catch blocks here.
63  std::cerr << "WARNING: " << *argv << ": An error has occurred when running "
64  "the classifier inference tests: " << e.what() << std::endl;
65  }
66  return retVal;
67 }
+ + +
int main(int argc, char *argv[])
+ +
DataType
Definition: Types.hpp:32
+
int ClassifierInferenceTestMain(int argc, char *argv[], const char *modelFilename, bool isModelBinary, const char *inputBindingName, const char *outputBindingName, const std::vector< unsigned int > &defaultTestCaseIds, TConstructDatabaseCallable constructDatabase, const armnn::TensorShape *inputTensorShape=nullptr)
+ + +
+
+ + + + -- cgit v1.2.1