summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Readme.md2
-rw-r--r--docs/documentation.md1
-rw-r--r--docs/use_cases/visual_wake_word.md400
-rw-r--r--resources/vww/labels/visual_wake_word_labels.txt2
-rw-r--r--resources/vww/samples/adult_blur.pngbin0 -> 28081 bytes
-rw-r--r--resources/vww/samples/files.md10
-rw-r--r--resources/vww/samples/man_in_red_jacket.pngbin0 -> 30024 bytes
-rwxr-xr-xset_up_default_resources.py9
-rw-r--r--source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld4
-rw-r--r--source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld28
-rw-r--r--source/application/main/UseCaseCommonUtils.cc265
-rw-r--r--source/application/main/include/UseCaseCommonUtils.hpp87
-rw-r--r--source/use_case/ad/src/UseCaseHandler.cc40
-rw-r--r--source/use_case/asr/src/UseCaseHandler.cc41
-rw-r--r--source/use_case/img_class/src/MainLoop.cc37
-rw-r--r--source/use_case/img_class/src/UseCaseHandler.cc116
-rw-r--r--source/use_case/kws/src/UseCaseHandler.cc43
-rw-r--r--source/use_case/kws_asr/src/UseCaseHandler.cc40
-rw-r--r--source/use_case/vww/include/UseCaseHandler.hpp37
-rw-r--r--source/use_case/vww/include/VisualWakeWordModel.hpp48
-rw-r--r--source/use_case/vww/src/MainLoop.cc91
-rw-r--r--source/use_case/vww/src/UseCaseHandler.cc182
-rw-r--r--source/use_case/vww/src/VisualWakeWordModel.cc57
-rw-r--r--source/use_case/vww/usecase.cmake62
-rw-r--r--tests/use_case/vww/InferenceVisualWakeWordModelTests.cc81
-rw-r--r--tests/use_case/vww/VisualWakeWordTests.cc18
-rw-r--r--tests/use_case/vww/VisualWakeWordUCTests.cc135
27 files changed, 1468 insertions, 368 deletions
diff --git a/Readme.md b/Readme.md
index 63efa70..b2a20b5 100644
--- a/Readme.md
+++ b/Readme.md
@@ -32,6 +32,7 @@ The example application at your disposal and the utilized models are listed in t
| [Automated Speech Recognition(ASR)](./docs/use_cases/asr.md) | Transcribe words in a recording | [Wav2Letter](https://github.com/ARM-software/ML-zoo/tree/1a92aa08c0de49a7304e0a7f3f59df6f4fd33ac8/models/speech_recognition/wav2letter/tflite_int8) |
| [KWS and ASR](./docs/use_cases/kws_asr.md) | Utilise Cortex-M and Ethos-U to transcribe words in a recording after a keyword was spotted | [DS-CNN-L](https://github.com/ARM-software/ML-zoo/tree/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8) [Wav2Letter](https://github.com/ARM-software/ML-zoo/tree/1a92aa08c0de49a7304e0a7f3f59df6f4fd33ac8/models/speech_recognition/wav2letter/tflite_int8) |
| [Anomaly Detection](./docs/use_cases/ad.md) | Detecting abnormal behavior based on a sound recording of a machine | [Anomaly Detection](https://github.com/ARM-software/ML-zoo/tree/7c32b097f7d94aae2cd0b98a8ed5a3ba81e66b18/models/anomaly_detection/micronet_medium/tflite_int8/)|
+[Visual Wake Word](./docs/use_cases/visual_wake_word.md) | Recognize if person is present in a given image | [Visual Wake Word](https://github.com/ARM-software/ML-zoo/tree/7dd3b16bb84007daf88be8648983c07f3eb21140/models/visual_wake_words/micronet_vww4/tflite_int8/vww4_128_128_INT8.tflite)|
| [Generic inference runner](./docs/use_cases/inference_runner.md) | Code block allowing you to develop your own use case for Ethos-U NPU | Your custom model |
The above use cases implement end-to-end ML flow including data pre-processing and post-processing. They will allow you
@@ -188,3 +189,4 @@ Application input data sample files are provided under their original license:
| [Image Classification Samples](./resources/img_class/samples/files.md) | [Creative Commons Attribution 1.0](./resources/LICENSE_CC_1.0.txt) | <https://www.pexels.com> |
| [Keyword Spotting Samples](./resources/kws/samples/files.md) | [Creative Commons Attribution 4.0 International Public License](./resources/LICENSE_CC_4.0.txt) | <http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz> |
| [Keyword Spotting and Automatic Speech Recognition Samples](./resources/kws_asr/samples/files.md) | [Creative Commons Attribution 4.0 International Public License](./resources/LICENSE_CC_4.0.txt) | <http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz> |
+| [Visual Wake Word Samples](./resources/vww/samples/files.md) | [Creative Commons Attribution 1.0](./resources/LICENSE_CC_1.0.txt) | <https://www.pexels.com> | \ No newline at end of file
diff --git a/docs/documentation.md b/docs/documentation.md
index 59c1650..28b9eda 100644
--- a/docs/documentation.md
+++ b/docs/documentation.md
@@ -210,6 +210,7 @@ The models used in the use-cases implemented in this project can be downloaded f
- [DS-CNN](https://github.com/ARM-software/ML-zoo/tree/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b//models/keyword_spotting/ds_cnn_large/tflite_clustered_int8).
- [Wav2Letter](https://github.com/ARM-software/ML-zoo/tree/1a92aa08c0de49a7304e0a7f3f59df6f4fd33ac8/models/speech_recognition/wav2letter/tflite_pruned_int8).
- [Anomaly Detection](https://github.com/ARM-software/ML-zoo/tree/7c32b097f7d94aae2cd0b98a8ed5a3ba81e66b18/models/anomaly_detection/micronet_medium/tflite_int8).
+- [Visual Wake Word](https://github.com/ARM-software/ML-zoo/raw/7dd3b16bb84007daf88be8648983c07f3eb21140/models/visual_wake_words/micronet_vww4/tflite_int8/vww4_128_128_INT8.tflite).
When using *Ethos-U* NPU backend, Vela compiler optimizes the the NN model. However, if not and it is supported by
TensorFlow Lite Micro, then it falls back on the CPU and execute.
diff --git a/docs/use_cases/visual_wake_word.md b/docs/use_cases/visual_wake_word.md
new file mode 100644
index 0000000..cdec185
--- /dev/null
+++ b/docs/use_cases/visual_wake_word.md
@@ -0,0 +1,400 @@
+# Visual Wake Word Code Sample
+
+- [Visual Wake Word Code Sample](#visual-wake-word-sample)
+ - [Introduction](#introduction)
+ - [Prerequisites](#prerequisites)
+ - [Building the Code Samples application from sources](#building-the-code-samples-application-from-sources)
+ - [Build options](#build-options)
+ - [Build process](#build-process)
+ - [Add custom input](#add-custom-input)
+ - [Add custom model](#add-custom-model)
+ - [Setting up and running Ethos-U NPU code sample](#setting-up-and-running-ethos_u-npu-code-sample)
+ - [Setting up the Ethos-U NPU Fast Model](#setting-up-the-ethos_u-npu-fast-model)
+ - [Starting Fast Model simulation](#starting-fast-model-simulation)
+ - [Running Visual Wake Word](#running-visual-wake-word)
+
+## Introduction
+
+This document describes the process of setting up and running the Arm® Ethos™-U NPU Visual Wake Word example.
+
+Use case code could be found in [source/use_case/vww](../../source/use_case/vww) directory.
+
+### Prerequisites
+
+See [Prerequisites](../documentation.md#prerequisites)
+
+## Building the Code Samples application from sources
+
+### Build options
+
+In addition to the already specified build option in the main reference manual, Visual Wake Word use case specifies:
+
+- `vww_MODEL_TFLITE_PATH` - Path to the NN model file in the `TFLite` format. The model is then processed and included in
+ the application `axf` file. The default value points to one of the delivered set of models.
+ Note that the parameters `vww_LABELS_TXT_FILE`, `TARGET_PLATFORM`, and `ETHOS_U_NPU_ENABLED` must be aligned with the
+ chosen model. In other words:
+ - If `ETHOS_U_NPU_ENABLED` is set to `On` or `1`, then the NN model is assumed to be optimized. The model naturally
+ falls back to the Arm® *Cortex®-M* CPU if an unoptimized model is supplied.
+ - if `ETHOS_U_NPU_ENABLED` is set to `Off` or `0`, the NN model is assumed to be unoptimized. Supplying an optimized
+ model in this case results in a runtime error.
+
+- `vww_FILE_PATH`: Path to directory or file to be used as custom image file(s) to use in the evaluation
+ application. The default value points to the resources/vww/samples folder containing the delivered set
+ of images. See more in the Running custom input data section.
+
+- `vww_IMAGE_SIZE`: The NN model requires input images to be of a specific size. This parameter defines the
+ size of the image side in pixels. Images are considered squared. Default value is 128, which is what the supplied
+ visual wake word model expects.
+
+- `vww_LABELS_TXT_FILE`: Path to the labels' text file to be baked into the application. The file is used
+ to map classified classes index to the text label. Change this parameter to point to the custom labels file to map
+ custom NN model output correctly.\
+ The default value points to the delivered labels.txt file inside the delivery package.
+
+- `vww_ACTIVATION_BUF_SZ`: The intermediate/activation buffer size reserved for the NN model. By default,
+ it is set to 2MiB and should be enough for most models.
+
+### Build process
+
+> **Note:** This section describes the process for configuring the build for `MPS3: SSE-300` for different target
+>platform see [Building](../documentation.md#Building) section.
+
+Create a build directory and navigate inside:
+
+```commandline
+mkdir build_visual_wake_word && cd build_visual_wake_word
+```
+
+On Linux, execute the following command to build **only** Visual Wake Word application to run on the Ethos-U55 Fast
+Model when providing only the mandatory arguments for CMake configuration:
+
+```commandline
+cmake ../ -DUSE_CASE_BUILD=vww
+```
+
+To configure a build that can be debugged using Arm-DS, we can just specify the build type as `Debug` and use the `Arm
+Compiler` toolchain file:
+
+```commandline
+cmake .. \
+ -DCMAKE_TOOLCHAIN_FILE=scripts/cmake/toolchains/bare-metal-armclang.cmake \
+ -DCMAKE_BUILD_TYPE=Debug \
+ -DUSE_CASE_BUILD=vww
+```
+
+Also see:
+
+- [Configuring with custom TPIP dependencies](../sections/building.md#configuring-with-custom-tpip-dependencies)
+- [Using Arm Compiler](../sections/building.md#using-arm-compiler)
+- [Configuring the build for simple-platform](../sections/building.md#configuring-the-build-for-simple_platform)
+- [Working with model debugger from Arm FastModel
+ Tools](../sections/building.md#working-with-model-debugger-from-arm-fastmodel-tools)
+-[Building for different Ethos-U NPU variants](../sections/building.md#building-for-different-ethos-u-npu-variants)
+
+> **Note:** If re-building with changed parameters values, it is highly advised to clean the build directory and re-run
+>the CMake command.
+
+If the CMake command succeeded, build the application as follows:
+
+```commandline
+make -j4
+```
+
+Add VERBOSE=1 to see compilation and link details.
+
+Results of the build will be placed under `build/bin` folder:
+
+```tree
+bin
+ ├── ethos-u-vww.axf
+ ├── ethos-u-vww.htm
+ ├── ethos-u-vww.map
+ ├── images-vww.txt
+ └── sectors
+ └── vww
+ ├── ddr.bin
+ └── itcm.bin
+```
+
+Where:
+
+- `ethos-u-vww.axf`: The built application binary for the Visual Wake Word use case.
+
+- `ethos-u-vww.map`: Information from building the application (e.g. libraries used, what was optimized,
+ location of objects)
+
+- `ethos-u-vww.htm`: Human readable file containing the call graph of application functions.
+
+- `sectors/`: Folder containing the built application, split into files for loading into different FPGA memory regions.
+
+- `Images-vww.txt`: Tells the FPGA which memory regions to use for loading the binaries in sectors/**
+ folder.
+
+### Add custom input
+
+The application performs inference on image data found in the folder set by the CMake parameter
+`vww_FILE_PATH`.
+
+To run the application with your own images first create a folder to hold them and then copy the custom images into this
+folder:
+
+```commandline
+mkdir /tmp/custom_images
+
+cp custom_image1.bmp /tmp/custom_images/
+```
+
+> **Note:** Clean the build directory before re-running the cmake command.
+
+Next set `vww_FILE_PATH` to the location of this folder when building:
+
+```commandline
+cmake .. \
+ -Dvww_FILE_PATH=/tmp/custom_images/ \
+ -DUSE_CASE_BUILD=vww
+```
+
+The images found in the `vww_FILE_PATH` folder will be picked up and automatically converted to C++ files
+during the CMake configuration stage and then compiled into the application during the build phase for performing
+inference with.
+
+The log from the configuration stage should tell you what image directory path has been used:
+
+```log
+-- User option vww_FILE_PATH is set to /tmp/custom_images
+-- User option vww_IMAGE_SIZE is set to 128
+...
+-- Generating image files from /tmp/custom_images
+++ Converting custom_image1.bmp to custom_image1.cc
+...
+-- Defined build user options:
+...
+-- vww_FILE_PATH=/tmp/custom_images
+-- vww_IMAGE_SIZE=128
+```
+
+After compiling, your custom images will have now replaced the default ones in the application.
+
+> **Note:** The CMake parameter vww_IMAGE_SIZE should match the model input size. When building the
+> application, if the size of any image does not match IMAGE_SIZE then it will be rescaled and padded so that it does.
+
+### Add custom model
+
+The application performs inference using the model pointed to by the CMake parameter
+`vww_MODEL_TFLITE_PATH`.
+
+> **Note:** If you want to run the model using Ethos-U, ensure your custom model has been run through the Vela compiler
+> successfully before continuing.
+
+To run the application with a custom model you will need to provide a labels_<model_name>.txt file of labels associated
+with the model. Each line of the file should correspond to one of the outputs in your model. See the provided
+visual_wake_word_labels.txt file for an example.
+
+Then, you must set `vww_MODEL_TFLITE_PATH` to the location of the Vela processed model file and
+`vww_LABELS_TXT_FILE` to the location of the associated labels file.
+
+An example:
+
+```commandline
+cmake \
+ -Dvww_MODEL_TFLITE_PATH=<path/to/custom_model_after_vela.tflite> \
+ -Dvww_LABELS_TXT_FILE=<path/to/labels_custom_model.txt> \
+ -DUSE_CASE_BUILD=vww ..
+```
+
+> **Note:** Clean the build directory before re-running the cmake command.
+
+The TFLite model pointed to by `vww_MODEL_TFLITE_PATH` and labels text file pointed to by
+`vww_LABELS_TXT_FILE` will be converted to C++ files during the CMake configuration stage and then compiled
+into the application for performing inference with.
+
+The log from the configuration stage should tell you what model path and labels file have been used:
+
+```log
+-- User option vww_MODEL_TFLITE_PATH is set to <path/to/custom_model_after_vela.tflite>
+...
+-- User option vww_LABELS_TXT_FILE is set to <path/to/labels_custom_model.txt>
+...
+-- Using <path/to/custom_model_after_vela.tflite>
+++ Converting custom_model_after_vela.tflite to custom_model_after_vela.tflite.cc
+-- Generating labels file from <path/to/labels_custom_model.txt>
+-- writing to <path/to/build/generated/src/Labels.cc>
+...
+```
+
+After compiling, your custom model will have now replaced the default one in the application.
+
+## Setting up and running Ethos-U NPU code sample
+
+### Setting up the Ethos-U NPU Fast Model
+
+The FVP is available publicly from [Arm Ecosystem FVP
+downloads](https://developer.arm.com/tools-and-software/open-source-software/arm-platforms-software/arm-ecosystem-fvps).
+
+For the *Ethos-U* evaluation, please download the MPS3 based version of the Arm® *Corstone™-300* model that contains *Cortex-M55*
+and offers a choice of the *Ethos-U55* and *Ethos-U65* processors.
+
+- Unpack the archive
+
+- Run the install script in the extracted package
+
+```commandline
+$./FVP_Corstone_SSE-300.sh
+```
+
+- Follow the instructions to install the FVP to your desired location
+
+### Starting Fast Model simulation
+
+Pre-built application binary ethos-u-vww.axf can be found in the bin/mps3-sse-300 folder of the delivery
+package. Assuming the install location of the FVP was set to ~/FVP_install_location, the simulation can be started by:
+
+```commandline
+$ ~/FVP_install_location/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55
+./bin/mps3-sse-300/ethos-u-vww.axf
+```
+
+A log output should appear on the terminal:
+
+```log
+telnetterminal0: Listening for serial connection on port 5000
+telnetterminal1: Listening for serial connection on port 5001
+telnetterminal2: Listening for serial connection on port 5002
+telnetterminal5: Listening for serial connection on port 5003
+```
+
+This will also launch a telnet window with the sample application's standard output and error log entries containing
+information about the pre-built application version, TensorFlow Lite Micro library version used, data type as well as
+the input and output tensor sizes of the model compiled into the executable binary.
+
+After the application has started if `vww_FILE_PATH` pointed to a single file (or a folder containing a
+single image) the inference starts immediately. In case of multiple inputs choice, it outputs a menu and waits for the
+user input from telnet terminal:
+
+```log
+User input required
+Enter option number from:
+
+1. Classify next image
+2. Classify image at chosen index
+3. Run classification on all images
+4. Show NN model info
+5. List images
+
+Choice:
+
+```
+
+1. “Classify next image” menu option will run single inference on the next in line image from the collection of the
+ compiled images.
+
+2. “Classify image at chosen index” menu option will run single inference on the chosen image.
+
+ > **Note:** Please make sure to select image index in the range of supplied images during application build. By
+ default, pre-built application has 2 images, index should 0 or 1.
+
+3. “Run classification on all images” menu option triggers sequential inference executions on all built-in images.
+
+4. “Show NN model info” menu option prints information about model data type, input and output tensor sizes:
+
+ ```log
+ INFO - uTFL version: 2.5.0
+ INFO - Added ethos-u support to op resolver
+ INFO - Creating allocator using tensor arena in SRAM
+ INFO - Allocating tensors
+ INFO - Model INPUT tensors:
+ INFO - tensor type is INT8
+ INFO - tensor occupies 16384 bytes with dimensions
+ INFO - 0: 1
+ INFO - 1: 128
+ INFO - 2: 128
+ INFO - 3: 1
+ INFO - Quant dimension: 0
+ INFO - Scale[0] = 0.008138
+ INFO - ZeroPoint[0] = -70
+ INFO - Model OUTPUT tensors:
+ INFO - tensor type is INT8
+ INFO - tensor occupies 2 bytes with dimensions
+ INFO - 0: 1
+ INFO - 1: 2
+ INFO - Quant dimension: 0
+ INFO - Scale[0] = 0.022299
+ INFO - ZeroPoint[0] = -17
+ INFO - Activation buffer (a.k.a tensor arena) size used: 133292
+ INFO - Number of operators: 19
+ INFO - Operator 0: ethos-u
+ INFO - Operator 1: PAD
+ INFO - Operator 2: ethos-u
+ INFO - Operator 3: PAD
+ INFO - Operator 4: ethos-u
+ INFO - Operator 5: PAD
+ INFO - Operator 6: ethos-u
+ INFO - Operator 7: PAD
+ INFO - Operator 8: ethos-u
+ INFO - Operator 9: PAD
+ INFO - Operator 10: ethos-u
+ INFO - Operator 11: PAD
+ INFO - Operator 12: ethos-u
+ INFO - Operator 13: PAD
+ INFO - Operator 14: ethos-u
+ INFO - Operator 15: PAD
+ INFO - Operator 16: ethos-u
+ INFO - Operator 17: AVERAGE_POOL_2D
+ INFO - Operator 18: ethos-u
+ ```
+
+5. “List Images” menu option prints a list of pair image indexes - the original filenames embedded in the application:
+
+ ```log
+ INFO - List of images:
+ INFO - 0 => adult_blur.png
+ INFO - 1 => man_in_red_jacket.png
+ ```
+
+### Running Visual Wake Word
+
+Please select the first menu option to execute Visual Wake Word.
+
+The following example illustrates application output for classification:
+
+```log
+INFO - Running inference on image 0 => adult_blur.png
+INFO - Final results:
+INFO -Total number of inferences: 1
+INFO - 0) 0 (0.601562) -> Person detected: No
+INFO - Profile for Inference :
+INFO - NPU AXI0_RD_DATA_BEAT_RECEIVED beats: 95992
+INFO - NPU AXI0_WR_DATA_BEAT_WRITTEN beats: 59735
+INFO - NPU AXI1_RD_DATA_BEAT_RECEIVED beats: 34477
+INFO - NPU ACTIVE cycles: 372782
+INFO - NPU IDLE cycles: 390
+INFO - NPU total cycles: 373172
+```
+
+It could take several minutes to complete one inference run (average time is 2-3 minutes).
+
+The log shows the inference results for “image 1” (1 - index) that corresponds to “adult_blur.png” in the sample
+image resource folder.
+
+The profiling section of the log shows that for this inference:
+
+- Ethos-U's PMU report:
+
+ - 373,172 total cycle: The number of NPU cycles
+
+ - 372,782 active cycles: number of NPU cycles that were used for computation
+
+ - 390 idle cycles: number of cycles for which the NPU was idle
+
+ - 95,992 AXI0 read beats: The number of AXI beats with read transactions from AXI0 bus. AXI0 is the bus where
+ Ethos-U NPU reads and writes to the computation buffers (activation buf/tensor arenas).
+
+ - 59,735 AXI0 write beats: The number of AXI beats with write transactions to AXI0 bus.
+
+ - 34,477 AXI1 read beats: The number of AXI beats with read transactions from AXI1 bus. AXI1 is the bus where
+ Ethos-U NPU reads the model (read only)
+
+- For FPGA platforms, CPU cycle count can also be enabled. For FVP, however, CPU cycle counters should not be used as
+ the CPU model is not cycle-approximate or cycle-accurate.
+
+The application prints the detection with label index, confidence score and labels from associated pd_labels.txt file.
diff --git a/resources/vww/labels/visual_wake_word_labels.txt b/resources/vww/labels/visual_wake_word_labels.txt
new file mode 100644
index 0000000..ec80465
--- /dev/null
+++ b/resources/vww/labels/visual_wake_word_labels.txt
@@ -0,0 +1,2 @@
+not person
+person \ No newline at end of file
diff --git a/resources/vww/samples/adult_blur.png b/resources/vww/samples/adult_blur.png
new file mode 100644
index 0000000..5f2cc94
--- /dev/null
+++ b/resources/vww/samples/adult_blur.png
Binary files differ
diff --git a/resources/vww/samples/files.md b/resources/vww/samples/files.md
new file mode 100644
index 0000000..13f5de6
--- /dev/null
+++ b/resources/vww/samples/files.md
@@ -0,0 +1,10 @@
+# Visual Wake Word Model
+
+The paper for the Visual Wake Word Model:
+* https://paperswithcode.com/dataset/visual-wake-words
+
+# Sample images
+
+The sample images provided are under Creative Commons License. The links are documented here for traceability:
+* https://www.pexels.com/photo/man-in-red-jacket-1681010/
+* https://www.pexels.com/photo/adult-blur-camera-casual-598917/
diff --git a/resources/vww/samples/man_in_red_jacket.png b/resources/vww/samples/man_in_red_jacket.png
new file mode 100644
index 0000000..0bcc9f4
--- /dev/null
+++ b/resources/vww/samples/man_in_red_jacket.png
Binary files differ
diff --git a/set_up_default_resources.py b/set_up_default_resources.py
index f8d7f8c..219cb3c 100755
--- a/set_up_default_resources.py
+++ b/set_up_default_resources.py
@@ -61,6 +61,15 @@ json_uc_res = [{
{"name": "ofm0.npy",
"url": "https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8/testing_output/Identity/0.npy"}]
},
+ {
+ "use_case_name": "vww",
+ "resources": [{"name": "vww4_128_128_INT8.tflite",
+ "url": "https://github.com/ARM-software/ML-zoo/raw/7dd3b16bb84007daf88be8648983c07f3eb21140/models/visual_wake_words/micronet_vww4/tflite_int8/vww4_128_128_INT8.tflite"},
+ {"name": "ifm0.npy",
+ "url": "https://github.com/ARM-software/ML-zoo/raw/7dd3b16bb84007daf88be8648983c07f3eb21140/models/visual_wake_words/micronet_vww4/tflite_int8/testing_input/input/0.npy"},
+ {"name": "ofm0.npy",
+ "url": "https://github.com/ARM-software/ML-zoo/raw/7dd3b16bb84007daf88be8648983c07f3eb21140/models/visual_wake_words/micronet_vww4/tflite_int8/testing_output/Identity/0.npy"}]
+ },
{
"use_case_name": "kws_asr",
"resources": [{"name": "wav2letter_pruned_int8.tflite",
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
index 46fc2e5..ceaff7d 100644
--- a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
@@ -72,7 +72,7 @@ SECTIONS
* instead placed on BRAM. See comment in the
* BRAM section for details.
**/
- *(EXCLUDE_FILE(*all_ops_resolver.o) .text*)
+ *(EXCLUDE_FILE(*all_ops_resolver.o *hal.c.obj) .text*)
KEEP(*(.init))
KEEP(*(.fini))
@@ -221,6 +221,8 @@ SECTIONS
**/
*all_ops_resolver.o (*.text*)
. = ALIGN(4);
+ *hal.c.obj (*.text*)
+ . = ALIGN(4);
__data_end__ = .;
} > BRAM
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld b/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld
index 8bb99cd..ceaff7d 100644
--- a/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/simple_platform.ld
@@ -65,7 +65,14 @@ SECTIONS
.text.at_itcm :
{
KEEP(*(.vectors))
- *(.text*)
+
+ /**
+ * All code goes here, with one exception of
+ * all_ops_resolver object file. This code
+ * instead placed on BRAM. See comment in the
+ * BRAM section for details.
+ **/
+ *(EXCLUDE_FILE(*all_ops_resolver.o *hal.c.obj) .text*)
KEEP(*(.init))
KEEP(*(.fini))
@@ -87,11 +94,6 @@ SECTIONS
KEEP(*(.eh_frame*))
} > ITCM
- .ARM.extab.at_itcm :
- {
- *(.ARM.extab* .gnu.linkonce.armextab.*)
- } > ITCM
-
__exidx_start = .;
.ARM.exidx.at_itcm :
{
@@ -208,6 +210,20 @@ SECTIONS
KEEP(*(.jcr*))
. = ALIGN(4);
+ *(.ARM.extab* .gnu.linkonce.armextab.*)
+ . = ALIGN(4);
+
+ /**
+ * Place the all ops resolver code data here. This accounts
+ * for ~4k worth of saving on the ITCM load region. It is
+ * only designed to be included (by default) for the inference
+ * runner use case.
+ **/
+ *all_ops_resolver.o (*.text*)
+ . = ALIGN(4);
+ *hal.c.obj (*.text*)
+ . = ALIGN(4);
+
__data_end__ = .;
} > BRAM
diff --git a/source/application/main/UseCaseCommonUtils.cc b/source/application/main/UseCaseCommonUtils.cc
index 615f684..9834475 100644
--- a/source/application/main/UseCaseCommonUtils.cc
+++ b/source/application/main/UseCaseCommonUtils.cc
@@ -15,91 +15,230 @@
* limitations under the License.
*/
#include "UseCaseCommonUtils.hpp"
-
#include "InputFiles.hpp"
-
#include <inttypes.h>
-namespace arm {
-namespace app {
- bool RunInference(arm::app::Model& model, Profiler& profiler)
- {
- profiler.StartProfiling("Inference");
- bool runInf = model.RunInference();
- profiler.StopProfiling();
+void DisplayCommonMenu()
+{
+ printf("\n\n");
+ printf("User input required\n");
+ printf("Enter option number from:\n\n");
+ printf(" %u. Classify next ifm\n", common::MENU_OPT_RUN_INF_NEXT);
+ printf(" %u. Classify ifm at chosen index\n", common::MENU_OPT_RUN_INF_CHOSEN);
+ printf(" %u. Run classification on all ifm\n", common::MENU_OPT_RUN_INF_ALL);
+ printf(" %u. Show NN model info\n", common::MENU_OPT_SHOW_MODEL_INFO);
+ printf(" %u. List ifm\n\n", common::MENU_OPT_LIST_IFM);
+ printf(" Choice: ");
+ fflush(stdout);
+}
+
+void image::ConvertImgToInt8(void* data, const size_t kMaxImageSize)
+{
+ auto* tmp_req_data = (uint8_t*) data;
+ auto* tmp_signed_req_data = (int8_t*) data;
- return runInf;
+ for (size_t i = 0; i < kMaxImageSize; i++) {
+ tmp_signed_req_data[i] = (int8_t) (
+ (int32_t) (tmp_req_data[i]) - 128);
}
+}
- int ReadUserInputAsInt(hal_platform& platform)
- {
- char chInput[128];
- memset(chInput, 0, sizeof(chInput));
+bool image::PresentInferenceResult(hal_platform& platform,
+ const std::vector<arm::app::ClassificationResult>& results)
+{
+ return PresentInferenceResult(platform, results, false);
+}
- platform.data_acq->get_input(chInput, sizeof(chInput));
- return atoi(chInput);
- }
+bool image::PresentInferenceResult(hal_platform &platform,
+ const std::vector<arm::app::ClassificationResult> &results,
+ const time_t infTimeMs)
+{
+ return PresentInferenceResult(platform, results, true, infTimeMs);
+}
+
+
+bool image::PresentInferenceResult(hal_platform &platform,
+ const std::vector<arm::app::ClassificationResult> &results,
+ bool profilingEnabled,
+ const time_t infTimeMs)
+{
+ constexpr uint32_t dataPsnTxtStartX1 = 150;
+ constexpr uint32_t dataPsnTxtStartY1 = 30;
- void DumpTensorData(const uint8_t* tensorData,
- size_t size,
- size_t lineBreakForNumElements)
+ constexpr uint32_t dataPsnTxtStartX2 = 10;
+ constexpr uint32_t dataPsnTxtStartY2 = 150;
+
+ constexpr uint32_t dataPsnTxtYIncr = 16; /* Row index increment. */
+
+ if(profilingEnabled)
+ {
+ platform.data_psn->set_text_color(COLOR_YELLOW);
+
+ /* If profiling is enabled, and the time is valid. */
+ info("Final results:\n");
+ info("Total number of inferences: 1\n");
+ if (infTimeMs)
{
- char strhex[8];
- std::string strdump;
-
- for (size_t i = 0; i < size; ++i) {
- if (0 == i % lineBreakForNumElements) {
- printf("%s\n\t", strdump.c_str());
- strdump.clear();
- }
- snprintf(strhex, sizeof(strhex) - 1,
- "0x%02x, ", tensorData[i]);
- strdump += std::string(strhex);
- }
-
- if (!strdump.empty()) {
- printf("%s\n", strdump.c_str());
- }
+ std::string strInf =
+ std::string{"Inference: "} +
+ std::to_string(infTimeMs) +
+ std::string{"ms"};
+ platform.data_psn->present_data_text(
+ strInf.c_str(), strInf.size(),
+ dataPsnTxtStartX1, dataPsnTxtStartY1, 0);
}
+ }
+ platform.data_psn->set_text_color(COLOR_GREEN);
+
+ /* Display each result. */
+ uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
+ uint32_t rowIdx2 = dataPsnTxtStartY2;
- void DumpTensor(const TfLiteTensor* tensor, const size_t lineBreakForNumElements)
+ if(!profilingEnabled)
{
- if (!tensor) {
- printf_err("invalid tensor\n");
- return;
+ info("Final results:\n");
+ info("Total number of inferences: 1\n");
+ }
+
+ for (uint32_t i = 0; i < results.size(); ++i) {
+ std::string resultStr =
+ std::to_string(i + 1) + ") " +
+ std::to_string(results[i].m_labelIdx) +
+ " (" + std::to_string(results[i].m_normalisedVal) + ")";
+
+ platform.data_psn->present_data_text(
+ resultStr.c_str(), resultStr.size(),
+ dataPsnTxtStartX1, rowIdx1, 0);
+ rowIdx1 += dataPsnTxtYIncr;
+
+ resultStr = std::to_string(i + 1) + ") " + results[i].m_label;
+ platform.data_psn->present_data_text(
+ resultStr.c_str(), resultStr.size(),
+ dataPsnTxtStartX2, rowIdx2, 0);
+ rowIdx2 += dataPsnTxtYIncr;
+
+ if(profilingEnabled)
+ {
+ info("%" PRIu32 ") %" PRIu32 " (%f) -> %s\n", i, results[i].m_labelIdx,
+ results[i].m_normalisedVal, results[i].m_label.c_str());
}
+ else
+ {
+ info("%" PRIu32 ") %" PRIu32 " (%f) -> %s\n", i,
+ results[i].m_labelIdx, results[i].m_normalisedVal,
+ results[i].m_label.c_str());
+ }
+ }
- const uint32_t tensorSz = tensor->bytes;
- const uint8_t* tensorData = tflite::GetTensorData<uint8_t>(tensor);
+ return true;
+}
- DumpTensorData(tensorData, tensorSz, lineBreakForNumElements);
+void IncrementAppCtxIfmIdx(arm::app::ApplicationContext& ctx, std::string useCase)
+{
+ auto curImIdx = ctx.Get<uint32_t>(useCase);
+
+ if (curImIdx + 1 >= NUMBER_OF_FILES) {
+ ctx.Set<uint32_t>(useCase, 0);
+ return;
}
+ ++curImIdx;
+ ctx.Set<uint32_t>(useCase, curImIdx);
+}
- bool ListFilesHandler(ApplicationContext& ctx)
- {
- auto& model = ctx.Get<Model&>("model");
- auto& platform = ctx.Get<hal_platform&>("platform");
+bool SetAppCtxIfmIdx(arm::app::ApplicationContext& ctx, uint32_t idx, std::string ctxIfmName)
+{
+ if (idx >= NUMBER_OF_FILES) {
+ printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
+ idx, NUMBER_OF_FILES);
+ return false;
+ }
+ ctx.Set<uint32_t>(ctxIfmName, idx);
+ return true;
+}
+
+
+namespace arm {
+namespace app {
+
+
+bool RunInference(arm::app::Model& model, Profiler& profiler)
+{
+ profiler.StartProfiling("Inference");
+ bool runInf = model.RunInference();
+ profiler.StopProfiling();
+
+ return runInf;
+}
+
+int ReadUserInputAsInt(hal_platform& platform)
+{
+ char chInput[128];
+ memset(chInput, 0, sizeof(chInput));
+
+ platform.data_acq->get_input(chInput, sizeof(chInput));
+ return atoi(chInput);
+}
- constexpr uint32_t dataPsnTxtStartX = 20;
- constexpr uint32_t dataPsnTxtStartY = 40;
+void DumpTensorData(const uint8_t* tensorData,
+ size_t size,
+ size_t lineBreakForNumElements)
+{
+ char strhex[8];
+ std::string strdump;
- if (!model.IsInited()) {
- printf_err("Model is not initialised! Terminating processing.\n");
- return false;
+ for (size_t i = 0; i < size; ++i) {
+ if (0 == i % lineBreakForNumElements) {
+ printf("%s\n\t", strdump.c_str());
+ strdump.clear();
}
+ snprintf(strhex, sizeof(strhex) - 1,
+ "0x%02x, ", tensorData[i]);
+ strdump += std::string(strhex);
+ }
+
+ if (!strdump.empty()) {
+ printf("%s\n", strdump.c_str());
+ }
+}
+
+void DumpTensor(const TfLiteTensor* tensor, const size_t lineBreakForNumElements)
+{
+ if (!tensor) {
+ printf_err("invalid tensor\n");
+ return;
+ }
+
+ const uint32_t tensorSz = tensor->bytes;
+ const uint8_t* tensorData = tflite::GetTensorData<uint8_t>(tensor);
- /* Clear the LCD */
- platform.data_psn->clear(COLOR_BLACK);
+ DumpTensorData(tensorData, tensorSz, lineBreakForNumElements);
+}
- /* Show the total number of embedded files. */
- std::string strNumFiles = std::string{"Total Number of Files: "} +
- std::to_string(NUMBER_OF_FILES);
- platform.data_psn->present_data_text(strNumFiles.c_str(),
- strNumFiles.size(),
- dataPsnTxtStartX,
- dataPsnTxtStartY,
- false);
+bool ListFilesHandler(ApplicationContext& ctx)
+{
+ auto& model = ctx.Get<Model&>("model");
+ auto& platform = ctx.Get<hal_platform&>("platform");
+
+ constexpr uint32_t dataPsnTxtStartX = 20;
+ constexpr uint32_t dataPsnTxtStartY = 40;
+
+ if (!model.IsInited()) {
+ printf_err("Model is not initialised! Terminating processing.\n");
+ return false;
+ }
+
+ /* Clear the LCD */
+ platform.data_psn->clear(COLOR_BLACK);
+
+ /* Show the total number of embedded files. */
+ std::string strNumFiles = std::string{"Total Number of Files: "} +
+ std::to_string(NUMBER_OF_FILES);
+ platform.data_psn->present_data_text(strNumFiles.c_str(),
+ strNumFiles.size(),
+ dataPsnTxtStartX,
+ dataPsnTxtStartY,
+ false);
#if NUMBER_OF_FILES > 0
constexpr uint32_t dataPsnTxtYIncr = 16;
@@ -117,7 +256,7 @@ namespace app {
#endif /* NUMBER_OF_FILES > 0 */
return true;
- }
+}
} /* namespace app */
} /* namespace arm */ \ No newline at end of file
diff --git a/source/application/main/include/UseCaseCommonUtils.hpp b/source/application/main/include/UseCaseCommonUtils.hpp
index 0af22f3..a3b606d 100644
--- a/source/application/main/include/UseCaseCommonUtils.hpp
+++ b/source/application/main/include/UseCaseCommonUtils.hpp
@@ -21,6 +21,11 @@
#include "Model.hpp"
#include "AppContext.hpp"
#include "Profiler.hpp"
+#include "UseCaseHandler.hpp" /* Handlers for different user options. */
+#include "Classifier.hpp" /* Classifier. */
+#include "InputFiles.hpp"
+#include <inttypes.h>
+
/* Helper macro to convert RGB888 to RGB565 format. */
#define RGB888_TO_RGB565(R8,G8,B8) ((((R8>>3) & 0x1F) << 11) | \
@@ -31,9 +36,86 @@ constexpr uint16_t COLOR_BLACK = 0;
constexpr uint16_t COLOR_GREEN = RGB888_TO_RGB565( 0, 255, 0); // 2016;
constexpr uint16_t COLOR_YELLOW = RGB888_TO_RGB565(255, 255, 0); // 65504;
+
+void DisplayCommonMenu();
+
+namespace image{
+
+ /**
+ * @brief Helper function to convert a UINT8 image to INT8 format.
+ * @param[in,out] data Pointer to the data start.
+ * @param[in] kMaxImageSize Total number of pixels in the image.
+ **/
+ void ConvertImgToInt8(void * data, size_t kMaxImageSize);
+
+ /**
+ * @brief Presents inference results using the data presentation
+ * object.
+ * @param[in] platform Reference to the hal platform object.
+ * @param[in] results Vector of classification results to be displayed.
+ * @return true if successful, false otherwise.
+ **/
+ bool PresentInferenceResult(hal_platform & platform,
+ const std::vector < arm::app::ClassificationResult > & results);
+
+
+ /**
+ * @brief Presents inference results along with the inference time using the data presentation
+ * object.
+ * @param[in] platform Reference to the hal platform object.
+ * @param[in] results Vector of classification results to be displayed.
+ * @param[in] results Inference time in ms.
+ * @return true if successful, false otherwise.
+ **/
+ bool PresentInferenceResult(hal_platform & platform,
+ const std::vector < arm::app::ClassificationResult > & results,
+ const time_t infTimeMs);
+
+ /**
+ * @brief Presents inference results along with the inference time using the data presentation
+ * object.
+ * @param[in] platform Reference to the hal platform object.
+ * @param[in] results Vector of classification results to be displayed.
+ * @param[in] results Inference time in ms.
+ * @return true if successful, false otherwise.
+ **/
+ bool PresentInferenceResult(hal_platform & platform,
+ const std::vector < arm::app::ClassificationResult > & results,
+ bool profilingEnabled,
+ const time_t infTimeMs = 0);
+ }
+
+/**
+ * @brief Helper function to increment current input feature vector index.
+ * @param[in,out] ctx Pointer to the application context object.
+ * @param[in] useCase Use case name
+ **/
+void IncrementAppCtxIfmIdx(arm::app::ApplicationContext& ctx, std::string useCase);
+
+/**
+ * @brief Helper function to set the input feature map index.
+ * @param[in,out] ctx Pointer to the application context object.
+ * @param[in] idx Value to be set.
+ * @param[in] ctxIfmName Input Feature Map name
+ * @return true if index is set, false otherwise.
+ **/
+bool SetAppCtxIfmIdx(arm::app::ApplicationContext& ctx, uint32_t idx, std::string ctxIfmName);
+
+
+namespace common {
+
+ enum OPCODES {
+ MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector. */
+ MENU_OPT_RUN_INF_CHOSEN, /* Run on a user provided vector index. */
+ MENU_OPT_RUN_INF_ALL, /* Run inference on all. */
+ MENU_OPT_SHOW_MODEL_INFO, /* Show model info. */
+ MENU_OPT_LIST_IFM /* List the current IFM. */
+ };
+
+}
+
namespace arm {
namespace app {
-
/**
* @brief Run inference using given model
* object. If profiling is enabled, it will log the
@@ -77,4 +159,5 @@ namespace app {
} /* namespace app */
} /* namespace arm */
-#endif /* USECASE_COMMON_UTILS_HPP */ \ No newline at end of file
+
+#endif /* USECASE_COMMON_UTILS_HPP */
diff --git a/source/use_case/ad/src/UseCaseHandler.cc b/source/use_case/ad/src/UseCaseHandler.cc
index 0c78179..b20b63e 100644
--- a/source/use_case/ad/src/UseCaseHandler.cc
+++ b/source/use_case/ad/src/UseCaseHandler.cc
@@ -29,20 +29,6 @@ namespace arm {
namespace app {
/**
- * @brief Helper function to increment current audio clip index
- * @param[in,out] ctx pointer to the application context object
- **/
- static void IncrementAppCtxClipIdx(ApplicationContext& ctx);
-
- /**
- * @brief Helper function to set the audio clip index
- * @param[in,out] ctx pointer to the application context object
- * @param[in] idx value to be set
- * @return true if index is set, false otherwise
- **/
- static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
-
- /**
* @brief Presents inference results using the data presentation
* object.
* @param[in] platform reference to the hal platform object
@@ -88,7 +74,7 @@ namespace app {
/* If the request has a valid size, set the audio index */
if (clipIndex < NUMBER_OF_FILES) {
- if (!SetAppCtxClipIdx(ctx, clipIndex)) {
+ if (!SetAppCtxIfmIdx(ctx, clipIndex,"clipIndex")) {
return false;
}
}
@@ -225,35 +211,13 @@ namespace app {
profiler.PrintProfilingResult();
- IncrementAppCtxClipIdx(ctx);
+ IncrementAppCtxIfmIdx(ctx,"clipIndex");
} while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
return true;
}
- static void IncrementAppCtxClipIdx(ApplicationContext& ctx)
- {
- auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
-
- if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
- ctx.Set<uint32_t>("clipIndex", 0);
- return;
- }
- ++curAudioIdx;
- ctx.Set<uint32_t>("clipIndex", curAudioIdx);
- }
-
- static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx)
- {
- if (idx >= NUMBER_OF_FILES) {
- printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
- idx, NUMBER_OF_FILES);
- return false;
- }
- ctx.Set<uint32_t>("clipIndex", idx);
- return true;
- }
static bool PresentInferenceResult(hal_platform& platform, float result, float threshold)
{
diff --git a/source/use_case/asr/src/UseCaseHandler.cc b/source/use_case/asr/src/UseCaseHandler.cc
index 8ef318f..d469255 100644
--- a/source/use_case/asr/src/UseCaseHandler.cc
+++ b/source/use_case/asr/src/UseCaseHandler.cc
@@ -32,20 +32,6 @@ namespace arm {
namespace app {
/**
- * @brief Helper function to increment current audio clip index.
- * @param[in,out] ctx Pointer to the application context object.
- **/
- static void IncrementAppCtxClipIdx(ApplicationContext& ctx);
-
- /**
- * @brief Helper function to set the audio clip index.
- * @param[in,out] ctx Pointer to the application context object.
- * @param[in] idx Value to be set.
- * @return true if index is set, false otherwise.
- **/
- static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
-
- /**
* @brief Presents inference results using the data presentation
* object.
* @param[in] platform Reference to the hal platform object.
@@ -69,7 +55,7 @@ namespace app {
/* If the request has a valid size, set the audio index. */
if (clipIndex < NUMBER_OF_FILES) {
- if (!SetAppCtxClipIdx(ctx, clipIndex)) {
+ if (!SetAppCtxIfmIdx(ctx, clipIndex,"clipIndex")) {
return false;
}
}
@@ -214,36 +200,13 @@ namespace app {
profiler.PrintProfilingResult();
- IncrementAppCtxClipIdx(ctx);
+ IncrementAppCtxIfmIdx(ctx,"clipIndex");
} while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
return true;
}
- static void IncrementAppCtxClipIdx(ApplicationContext& ctx)
- {
- auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
-
- if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
- ctx.Set<uint32_t>("clipIndex", 0);
- return;
- }
- ++curAudioIdx;
- ctx.Set<uint32_t>("clipIndex", curAudioIdx);
- }
-
- static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx)
- {
- if (idx >= NUMBER_OF_FILES) {
- printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
- idx, NUMBER_OF_FILES);
- return false;
- }
-
- ctx.Set<uint32_t>("clipIndex", idx);
- return true;
- }
static bool PresentInferenceResult(hal_platform& platform,
const std::vector<arm::app::asr::AsrResult>& results)
diff --git a/source/use_case/img_class/src/MainLoop.cc b/source/use_case/img_class/src/MainLoop.cc
index 61a09dd..79f6018 100644
--- a/source/use_case/img_class/src/MainLoop.cc
+++ b/source/use_case/img_class/src/MainLoop.cc
@@ -24,29 +24,6 @@
using ImgClassClassifier = arm::app::Classifier;
-enum opcodes
-{
- MENU_OPT_RUN_INF_NEXT = 1, /* Run on next vector. */
- MENU_OPT_RUN_INF_CHOSEN, /* Run on a user provided vector index. */
- MENU_OPT_RUN_INF_ALL, /* Run inference on all. */
- MENU_OPT_SHOW_MODEL_INFO, /* Show model info. */
- MENU_OPT_LIST_IMAGES /* List the current baked images. */
-};
-
-static void DisplayMenu()
-{
- printf("\n\n");
- printf("User input required\n");
- printf("Enter option number from:\n\n");
- printf(" %u. Classify next image\n", MENU_OPT_RUN_INF_NEXT);
- printf(" %u. Classify image at chosen index\n", MENU_OPT_RUN_INF_CHOSEN);
- printf(" %u. Run classification on all images\n", MENU_OPT_RUN_INF_ALL);
- printf(" %u. Show NN model info\n", MENU_OPT_SHOW_MODEL_INFO);
- printf(" %u. List images\n\n", MENU_OPT_LIST_IMAGES);
- printf(" Choice: ");
- fflush(stdout);
-}
-
void main_loop(hal_platform& platform)
{
arm::app::MobileNetModel model; /* Model wrapper object. */
@@ -79,29 +56,29 @@ void main_loop(hal_platform& platform)
/* Loop. */
do {
- int menuOption = MENU_OPT_RUN_INF_NEXT;
+ int menuOption = common::MENU_OPT_RUN_INF_NEXT;
if (bUseMenu) {
- DisplayMenu();
+ DisplayCommonMenu();
menuOption = arm::app::ReadUserInputAsInt(platform);
printf("\n");
}
switch (menuOption) {
- case MENU_OPT_RUN_INF_NEXT:
+ case common::MENU_OPT_RUN_INF_NEXT:
executionSuccessful = ClassifyImageHandler(caseContext, caseContext.Get<uint32_t>("imgIndex"), false);
break;
- case MENU_OPT_RUN_INF_CHOSEN: {
+ case common::MENU_OPT_RUN_INF_CHOSEN: {
printf(" Enter the image index [0, %d]: ", NUMBER_OF_FILES-1);
auto imgIndex = static_cast<uint32_t>(arm::app::ReadUserInputAsInt(platform));
executionSuccessful = ClassifyImageHandler(caseContext, imgIndex, false);
break;
}
- case MENU_OPT_RUN_INF_ALL:
+ case common::MENU_OPT_RUN_INF_ALL:
executionSuccessful = ClassifyImageHandler(caseContext, caseContext.Get<uint32_t>("imgIndex"), true);
break;
- case MENU_OPT_SHOW_MODEL_INFO:
+ case common::MENU_OPT_SHOW_MODEL_INFO:
executionSuccessful = model.ShowModelInfoHandler();
break;
- case MENU_OPT_LIST_IMAGES:
+ case common::MENU_OPT_LIST_IFM:
executionSuccessful = ListFilesHandler(caseContext);
break;
default:
diff --git a/source/use_case/img_class/src/UseCaseHandler.cc b/source/use_case/img_class/src/UseCaseHandler.cc
index 337cb29..66df1da 100644
--- a/source/use_case/img_class/src/UseCaseHandler.cc
+++ b/source/use_case/img_class/src/UseCaseHandler.cc
@@ -39,37 +39,6 @@ namespace app {
**/
static bool LoadImageIntoTensor(uint32_t imIdx, TfLiteTensor* inputTensor);
- /**
- * @brief Helper function to increment current image index.
- * @param[in,out] ctx Pointer to the application context object.
- **/
- static void IncrementAppCtxImageIdx(ApplicationContext& ctx);
-
- /**
- * @brief Helper function to set the image index.
- * @param[in,out] ctx Pointer to the application context object.
- * @param[in] idx Value to be set.
- * @return true if index is set, false otherwise.
- **/
- static bool SetAppCtxImageIdx(ApplicationContext& ctx, uint32_t idx);
-
- /**
- * @brief Presents inference results using the data presentation
- * object.
- * @param[in] platform Reference to the hal platform object.
- * @param[in] results Vector of classification results to be displayed.
- * @return true if successful, false otherwise.
- **/
- static bool PresentInferenceResult(hal_platform& platform,
- const std::vector<ClassificationResult>& results);
-
- /**
- * @brief Helper function to convert a UINT8 image to INT8 format.
- * @param[in,out] data Pointer to the data start.
- * @param[in] kMaxImageSize Total number of pixels in the image.
- **/
- static void ConvertImgToInt8(void* data, size_t kMaxImageSize);
-
/* Image inference classification handler. */
bool ClassifyImageHandler(ApplicationContext& ctx, uint32_t imgIndex, bool runAll)
{
@@ -89,7 +58,7 @@ namespace app {
/* If the request has a valid size, set the image index. */
if (imgIndex < NUMBER_OF_FILES) {
- if (!SetAppCtxImageIdx(ctx, imgIndex)) {
+ if (!SetAppCtxIfmIdx(ctx, imgIndex, "imgIndex")) {
return false;
}
}
@@ -134,7 +103,7 @@ namespace app {
/* If the data is signed. */
if (model.IsDataSigned()) {
- ConvertImgToInt8(inputTensor->data.data, inputTensor->bytes);
+ image::ConvertImgToInt8(inputTensor->data.data, inputTensor->bytes);
}
/* Display message on the LCD - inference running. */
@@ -166,13 +135,13 @@ namespace app {
arm::app::DumpTensor(outputTensor);
#endif /* VERIFY_TEST_OUTPUT */
- if (!PresentInferenceResult(platform, results)) {
+ if (!image::PresentInferenceResult(platform, results)) {
return false;
}
profiler.PrintProfilingResult();
- IncrementAppCtxImageIdx(ctx);
+ IncrementAppCtxIfmIdx(ctx,"imgIndex");
} while (runAll && ctx.Get<uint32_t>("imgIndex") != curImIdx);
@@ -195,83 +164,6 @@ namespace app {
return true;
}
- static void IncrementAppCtxImageIdx(ApplicationContext& ctx)
- {
- auto curImIdx = ctx.Get<uint32_t>("imgIndex");
-
- if (curImIdx + 1 >= NUMBER_OF_FILES) {
- ctx.Set<uint32_t>("imgIndex", 0);
- return;
- }
- ++curImIdx;
- ctx.Set<uint32_t>("imgIndex", curImIdx);
- }
-
- static bool SetAppCtxImageIdx(ApplicationContext& ctx, uint32_t idx)
- {
- if (idx >= NUMBER_OF_FILES) {
- printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
- idx, NUMBER_OF_FILES);
- return false;
- }
- ctx.Set<uint32_t>("imgIndex", idx);
- return true;
- }
-
- static bool PresentInferenceResult(hal_platform& platform,
- const std::vector<ClassificationResult>& results)
- {
- constexpr uint32_t dataPsnTxtStartX1 = 150;
- constexpr uint32_t dataPsnTxtStartY1 = 30;
-
- constexpr uint32_t dataPsnTxtStartX2 = 10;
- constexpr uint32_t dataPsnTxtStartY2 = 150;
-
- constexpr uint32_t dataPsnTxtYIncr = 16; /* Row index increment. */
-
- platform.data_psn->set_text_color(COLOR_GREEN);
-
- /* Display each result. */
- uint32_t rowIdx1 = dataPsnTxtStartY1 + 2 * dataPsnTxtYIncr;
- uint32_t rowIdx2 = dataPsnTxtStartY2;
-
- info("Final results:\n");
- info("Total number of inferences: 1\n");
- for (uint32_t i = 0; i < results.size(); ++i) {
- std::string resultStr =
- std::to_string(i + 1) + ") " +
- std::to_string(results[i].m_labelIdx) +
- " (" + std::to_string(results[i].m_normalisedVal) + ")";
-
- platform.data_psn->present_data_text(
- resultStr.c_str(), resultStr.size(),
- dataPsnTxtStartX1, rowIdx1, 0);
- rowIdx1 += dataPsnTxtYIncr;
-
- resultStr = std::to_string(i + 1) + ") " + results[i].m_label;
- platform.data_psn->present_data_text(
- resultStr.c_str(), resultStr.size(),
- dataPsnTxtStartX2, rowIdx2, 0);
- rowIdx2 += dataPsnTxtYIncr;
-
- info("%" PRIu32 ") %" PRIu32 " (%f) -> %s\n", i,
- results[i].m_labelIdx, results[i].m_normalisedVal,
- results[i].m_label.c_str());
- }
-
- return true;
- }
-
- static void ConvertImgToInt8(void* data, const size_t kMaxImageSize)
- {
- auto* tmp_req_data = (uint8_t*) data;
- auto* tmp_signed_req_data = (int8_t*) data;
-
- for (size_t i = 0; i < kMaxImageSize; i++) {
- tmp_signed_req_data[i] = (int8_t) (
- (int32_t) (tmp_req_data[i]) - 128);
- }
- }
} /* namespace app */
} /* namespace arm */
diff --git a/source/use_case/kws/src/UseCaseHandler.cc b/source/use_case/kws/src/UseCaseHandler.cc
index 2144c03..a951e55 100644
--- a/source/use_case/kws/src/UseCaseHandler.cc
+++ b/source/use_case/kws/src/UseCaseHandler.cc
@@ -33,20 +33,7 @@ using KwsClassifier = arm::app::Classifier;
namespace arm {
namespace app {
- /**
- * @brief Helper function to increment current audio clip index.
- * @param[in,out] ctx Pointer to the application context object.
- **/
- static void IncrementAppCtxClipIdx(ApplicationContext& ctx);
-
- /**
- * @brief Helper function to set the audio clip index.
- * @param[in,out] ctx Pointer to the application context object.
- * @param[in] idx Value to be set.
- * @return true if index is set, false otherwise.
- **/
- static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
-
+
/**
* @brief Presents inference results using the data presentation
* object.
@@ -94,7 +81,7 @@ namespace app {
/* If the request has a valid size, set the audio index. */
if (clipIndex < NUMBER_OF_FILES) {
- if (!SetAppCtxClipIdx(ctx, clipIndex)) {
+ if (!SetAppCtxIfmIdx(ctx, clipIndex,"clipIndex")) {
return false;
}
}
@@ -246,36 +233,14 @@ namespace app {
profiler.PrintProfilingResult();
- IncrementAppCtxClipIdx(ctx);
+ IncrementAppCtxIfmIdx(ctx,"clipIndex");
} while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
return true;
}
- static void IncrementAppCtxClipIdx(ApplicationContext& ctx)
- {
- auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
-
- if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
- ctx.Set<uint32_t>("clipIndex", 0);
- return;
- }
- ++curAudioIdx;
- ctx.Set<uint32_t>("clipIndex", curAudioIdx);
- }
-
- static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx)
- {
- if (idx >= NUMBER_OF_FILES) {
- printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
- idx, NUMBER_OF_FILES);
- return false;
- }
- ctx.Set<uint32_t>("clipIndex", idx);
- return true;
- }
-
+
static bool PresentInferenceResult(hal_platform& platform,
const std::vector<arm::app::kws::KwsResult>& results)
{
diff --git a/source/use_case/kws_asr/src/UseCaseHandler.cc b/source/use_case/kws_asr/src/UseCaseHandler.cc
index 9080348..1d88ba1 100644
--- a/source/use_case/kws_asr/src/UseCaseHandler.cc
+++ b/source/use_case/kws_asr/src/UseCaseHandler.cc
@@ -49,20 +49,6 @@ namespace app {
};
/**
- * @brief Helper function to increment current audio clip index
- * @param[in,out] ctx pointer to the application context object
- **/
- static void IncrementAppCtxClipIdx(ApplicationContext& ctx);
-
- /**
- * @brief Helper function to set the audio clip index
- * @param[in,out] ctx pointer to the application context object
- * @param[in] idx value to be set
- * @return true if index is set, false otherwise
- **/
- static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx);
-
- /**
* @brief Presents kws inference results using the data presentation
* object.
* @param[in] platform reference to the hal platform object
@@ -440,7 +426,7 @@ namespace app {
/* If the request has a valid size, set the audio index. */
if (clipIndex < NUMBER_OF_FILES) {
- if (!SetAppCtxClipIdx(ctx, clipIndex)) {
+ if (!SetAppCtxIfmIdx(ctx, clipIndex,"kws_asr")) {
return false;
}
}
@@ -461,35 +447,13 @@ namespace app {
}
}
- IncrementAppCtxClipIdx(ctx);
+ IncrementAppCtxIfmIdx(ctx,"kws_asr");
} while (runAll && ctx.Get<uint32_t>("clipIndex") != startClipIdx);
return true;
}
- static void IncrementAppCtxClipIdx(ApplicationContext& ctx)
- {
- auto curAudioIdx = ctx.Get<uint32_t>("clipIndex");
-
- if (curAudioIdx + 1 >= NUMBER_OF_FILES) {
- ctx.Set<uint32_t>("clipIndex", 0);
- return;
- }
- ++curAudioIdx;
- ctx.Set<uint32_t>("clipIndex", curAudioIdx);
- }
-
- static bool SetAppCtxClipIdx(ApplicationContext& ctx, uint32_t idx)
- {
- if (idx >= NUMBER_OF_FILES) {
- printf_err("Invalid idx %" PRIu32 " (expected less than %u)\n",
- idx, NUMBER_OF_FILES);
- return false;
- }
- ctx.Set<uint32_t>("clipIndex", idx);
- return true;
- }
static bool PresentInferenceResult(hal_platform& platform,
std::vector<arm::app::kws::KwsResult>& results)
diff --git a/source/use_case/vww/include/UseCaseHandler.hpp b/source/use_case/vww/include/UseCaseHandler.hpp
new file mode 100644
index 0000000..7476ed8
--- /dev/null
+++ b/source/use_case/vww/include/UseCaseHandler.hpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef VISUAL_WAKE_WORD_HANDLER_HPP
+#define VISUAL_WAKE_WORD_HANDLER_HPP
+
+#include "AppContext.hpp"
+
+namespace arm {
+namespace app {
+
+ /**
+ * @brief Handles the inference event.
+ * @param[in] ctx Pointer to the application context.
+ * @param[in] imgIndex Index to the image to classify.
+ * @param[in] runAll Flag to request classification of the available images.
+ * @return true or false based on execution success.
+ **/
+ bool ClassifyImageHandler(ApplicationContext &ctx, uint32_t imgIndex, bool runAll);
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* VISUAL_WAKE_WORD_HANDLER_HPP */
diff --git a/source/use_case/vww/include/VisualWakeWordModel.hpp b/source/use_case/vww/include/VisualWakeWordModel.hpp
new file mode 100644
index 0000000..ee3a7bf
--- /dev/null
+++ b/source/use_case/vww/include/VisualWakeWordModel.hpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef VISUAL_WAKE_WORD_MODEL_HPP
+#define VISUAL_WAKE_WORD_MODEL_HPP
+
+#include "Model.hpp"
+
+namespace arm {
+namespace app {
+
+ class VisualWakeWordModel : public Model {
+
+ protected:
+ /** @brief Gets the reference to op resolver interface class. */
+ const tflite::MicroOpResolver& GetOpResolver() override;
+
+ /** @brief Adds operations to the op resolver instance. */
+ bool EnlistOperations() override;
+
+ const uint8_t* ModelPointer() override;
+
+ size_t ModelSize() override;
+ private:
+ /* Maximum number of individual operations that can be enlisted. */
+ static constexpr int ms_maxOpCnt = 7;
+
+ /* A mutable op resolver instance. */
+ tflite::MicroMutableOpResolver<ms_maxOpCnt> m_opResolver;
+ };
+
+} /* namespace app */
+} /* namespace arm */
+
+#endif /* VISUAL_WAKE_WORD_MODEL_HPP */
diff --git a/source/use_case/vww/src/MainLoop.cc b/source/use_case/vww/src/MainLoop.cc
new file mode 100644
index 0000000..f026cc2
--- /dev/null
+++ b/source/use_case/vww/src/MainLoop.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "hal.h" /* Brings in platform definitions. */
+#include "Classifier.hpp" /* Classifier. */
+#include "InputFiles.hpp" /* For input images. */
+#include "Labels.hpp" /* For label strings. */
+#include "VisualWakeWordModel.hpp" /* Model class for running inference. */
+#include "UseCaseHandler.hpp" /* Handlers for different user options. */
+#include "UseCaseCommonUtils.hpp" /* Utils functions. */
+
+using ViusalWakeWordClassifier = arm::app::Classifier;
+
+void main_loop(hal_platform &platform)
+{
+ arm::app::VisualWakeWordModel model; /* Model wrapper object. */
+
+ /* Load the model. */
+ if (!model.Init()) {
+ printf_err("Failed to initialise model\n");
+ return;
+ }
+
+ /* Instantiate application context. */
+ arm::app::ApplicationContext caseContext;
+
+ arm::app::Profiler profiler{&platform, "vww"};
+ caseContext.Set<arm::app::Profiler&>("profiler", profiler);
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+ caseContext.Set<uint32_t>("imgIndex", 0);
+
+ ViusalWakeWordClassifier classifier; /* Classifier wrapper object. */
+ caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+ std::vector <std::string> labels;
+ GetLabelsVector(labels);
+ caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+ /* Loop. */
+ bool executionSuccessful = true;
+ constexpr bool bUseMenu = NUMBER_OF_FILES > 1 ? true : false;
+ do {
+ int menuOption = common::MENU_OPT_RUN_INF_NEXT;
+ if (bUseMenu) {
+ DisplayCommonMenu();
+ menuOption = arm::app::ReadUserInputAsInt(platform);
+ printf("\n");
+ }
+
+ switch (menuOption) {
+ case common::MENU_OPT_RUN_INF_NEXT:
+ executionSuccessful = ClassifyImageHandler(caseContext, caseContext.Get<uint32_t>("imgIndex"), false);
+ break;
+ case common::MENU_OPT_RUN_INF_CHOSEN: {
+ printf(" Enter the image index [0, %d]: ", NUMBER_OF_FILES-1);
+ auto imgIndex = static_cast<uint32_t>(arm::app::ReadUserInputAsInt(platform));
+ executionSuccessful = ClassifyImageHandler(caseContext, imgIndex, false);
+ break;
+ }
+ case common::MENU_OPT_RUN_INF_ALL:
+ executionSuccessful = ClassifyImageHandler(caseContext, caseContext.Get<uint32_t>("imgIndex"), true);
+ break;
+ case common::MENU_OPT_SHOW_MODEL_INFO: {
+ executionSuccessful = model.ShowModelInfoHandler();
+ break;
+ }
+ case common::MENU_OPT_LIST_IFM:
+ executionSuccessful = ListFilesHandler(caseContext);
+ break;
+ default:
+ printf("Incorrect choice, try again.");
+ break;
+ }
+ } while (executionSuccessful && bUseMenu);
+ info("Main loop terminated.\n");
+
+}
diff --git a/source/use_case/vww/src/UseCaseHandler.cc b/source/use_case/vww/src/UseCaseHandler.cc
new file mode 100644
index 0000000..fb2e837
--- /dev/null
+++ b/source/use_case/vww/src/UseCaseHandler.cc
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UseCaseHandler.hpp"
+#include "VisualWakeWordModel.hpp"
+#include "Classifier.hpp"
+#include "InputFiles.hpp"
+#include "UseCaseCommonUtils.hpp"
+#include "hal.h"
+
+namespace arm {
+namespace app {
+
+ /**
+ * @brief Helper function to load the current image into the input
+ * tensor.
+ * @param[in] imIdx Image index (from the pool of images available
+ * to the application).
+ * @param[out] inputTensor Pointer to the input tensor to be populated.
+ * @return true if tensor is loaded, false otherwise.
+ **/
+ static bool LoadImageIntoTensor(uint32_t imIdx,
+ TfLiteTensor *inputTensor);
+
+ /* Image inference classification handler. */
+ bool ClassifyImageHandler(ApplicationContext &ctx, uint32_t imgIndex, bool runAll)
+ {
+ auto& platform = ctx.Get<hal_platform &>("platform");
+ auto& profiler = ctx.Get<Profiler&>("profiler");
+
+ constexpr uint32_t dataPsnImgDownscaleFactor = 1;
+ constexpr uint32_t dataPsnImgStartX = 10;
+ constexpr uint32_t dataPsnImgStartY = 35;
+
+ constexpr uint32_t dataPsnTxtInfStartX = 150;
+ constexpr uint32_t dataPsnTxtInfStartY = 70;
+
+
+ platform.data_psn->clear(COLOR_BLACK);
+ time_t infTimeMs = 0;
+
+ auto& model = ctx.Get<Model&>("model");
+
+ /* If the request has a valid size, set the image index. */
+ if (imgIndex < NUMBER_OF_FILES) {
+ if (!SetAppCtxIfmIdx(ctx, imgIndex,"imgIndex")) {
+ return false;
+ }
+ }
+ if (!model.IsInited()) {
+ printf_err("Model is not initialised! Terminating processing.\n");
+ return false;
+ }
+
+ auto curImIdx = ctx.Get<uint32_t>("imgIndex");
+
+ TfLiteTensor *outputTensor = model.GetOutputTensor(0);
+ TfLiteTensor *inputTensor = model.GetInputTensor(0);
+
+ if (!inputTensor->dims) {
+ printf_err("Invalid input tensor dims\n");
+ return false;
+ } else if (inputTensor->dims->size < 3) {
+ printf_err("Input tensor dimension should be >= 3\n");
+ return false;
+ }
+ TfLiteIntArray* inputShape = model.GetInputShape(0);
+ const uint32_t nCols = inputShape->data[2];
+ const uint32_t nRows = inputShape->data[1];
+ const uint32_t nChannels = (inputShape->size == 4) ? inputShape->data[3] : 1;
+
+ std::vector<ClassificationResult> results;
+
+ do {
+
+ /* Strings for presentation/logging. */
+ std::string str_inf{"Running inference... "};
+
+ /* Copy over the data. */
+ LoadImageIntoTensor(ctx.Get<uint32_t>("imgIndex"), inputTensor);
+
+ /* Display this image on the LCD. */
+ platform.data_psn->present_data_image(
+ (uint8_t *) inputTensor->data.data,
+ nCols, nRows, nChannels,
+ dataPsnImgStartX, dataPsnImgStartY, dataPsnImgDownscaleFactor);
+
+ /* If the data is signed. */
+ if (model.IsDataSigned()) {
+ image::ConvertImgToInt8(inputTensor->data.data, inputTensor->bytes);
+ }
+
+ /* Display message on the LCD - inference running. */
+ platform.data_psn->present_data_text(
+ str_inf.c_str(), str_inf.size(),
+ dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+ /* Run inference over this image. */
+ info("Running inference on image %" PRIu32 " => %s\n", ctx.Get<uint32_t>("imgIndex"),
+ get_filename(ctx.Get<uint32_t>("imgIndex")));
+
+ if (!RunInference(model, profiler)) {
+ return false;
+ }
+
+ /* Erase. */
+ str_inf = std::string(str_inf.size(), ' ');
+ platform.data_psn->present_data_text(
+ str_inf.c_str(), str_inf.size(),
+ dataPsnTxtInfStartX, dataPsnTxtInfStartY, 0);
+
+ auto& classifier = ctx.Get<Classifier&>("classifier");
+ classifier.GetClassificationResults(outputTensor, results,
+ ctx.Get<std::vector <std::string>&>("labels"), 1);
+
+ /* Add results to context for access outside handler. */
+ ctx.Set<std::vector<ClassificationResult>>("results", results);
+
+#if VERIFY_TEST_OUTPUT
+ arm::app::DumpTensor(outputTensor);
+#endif /* VERIFY_TEST_OUTPUT */
+
+ if (!image::PresentInferenceResult(platform, results, infTimeMs)) {
+ return false;
+ }
+
+ profiler.PrintProfilingResult();
+ IncrementAppCtxIfmIdx(ctx,"imgIndex");
+
+ } while (runAll && ctx.Get<uint32_t>("imgIndex") != curImIdx);
+
+ return true;
+ }
+
+ static bool LoadImageIntoTensor(const uint32_t imIdx,
+ TfLiteTensor *inputTensor)
+ {
+ const size_t copySz = inputTensor->bytes < IMAGE_DATA_SIZE ?
+ inputTensor->bytes : IMAGE_DATA_SIZE;
+ if (imIdx >= NUMBER_OF_FILES) {
+ printf_err("invalid image index %" PRIu32 " (max: %u)\n", imIdx,
+ NUMBER_OF_FILES - 1);
+ return false;
+ }
+
+ const uint32_t nChannels = (inputTensor->dims->size == 4) ? inputTensor->dims->data[3] : 1;
+
+ const uint8_t* srcPtr = get_img_array(imIdx);
+ auto* dstPtr = (uint8_t*)inputTensor->data.data;
+ if (1 == nChannels) {
+ /**
+ * Visual Wake Word model accepts only one channel =>
+ * Convert image to grayscale here
+ **/
+ for (size_t i = 0; i < copySz; ++i, srcPtr += 3) {
+ *dstPtr++ = 0.2989*(*srcPtr) +
+ 0.587*(*(srcPtr+1)) +
+ 0.114*(*(srcPtr+2));
+ }
+ } else {
+ memcpy(inputTensor->data.data, srcPtr, copySz);
+ }
+
+ debug("Image %" PRIu32 " loaded\n", imIdx);
+ return true;
+ }
+
+} /* namespace app */
+} /* namespace arm */ \ No newline at end of file
diff --git a/source/use_case/vww/src/VisualWakeWordModel.cc b/source/use_case/vww/src/VisualWakeWordModel.cc
new file mode 100644
index 0000000..3067c7a
--- /dev/null
+++ b/source/use_case/vww/src/VisualWakeWordModel.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "VisualWakeWordModel.hpp"
+
+#include "hal.h"
+
+const tflite::MicroOpResolver& arm::app::VisualWakeWordModel::GetOpResolver()
+{
+ return this->m_opResolver;
+}
+
+bool arm::app::VisualWakeWordModel::EnlistOperations()
+{
+ this->m_opResolver.AddDepthwiseConv2D();
+ this->m_opResolver.AddConv2D();
+ this->m_opResolver.AddAveragePool2D();
+ this->m_opResolver.AddReshape();
+ this->m_opResolver.AddPad();
+ this->m_opResolver.AddAdd();
+
+#if defined(ARM_NPU)
+ if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
+ info("Added %s support to op resolver\n",
+ tflite::GetString_ETHOSU());
+ } else {
+ printf_err("Failed to add Arm NPU support to op resolver.");
+ return false;
+ }
+#endif /* ARM_NPU */
+ return true;
+}
+
+extern uint8_t* GetModelPointer();
+const uint8_t* arm::app::VisualWakeWordModel::ModelPointer()
+{
+ return GetModelPointer();
+}
+
+extern size_t GetModelLen();
+size_t arm::app::VisualWakeWordModel::ModelSize()
+{
+ return GetModelLen();
+} \ No newline at end of file
diff --git a/source/use_case/vww/usecase.cmake b/source/use_case/vww/usecase.cmake
new file mode 100644
index 0000000..9a732b7
--- /dev/null
+++ b/source/use_case/vww/usecase.cmake
@@ -0,0 +1,62 @@
+# Copyright (c) 2021 Arm Limited. All rights reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+USER_OPTION(${use_case}_FILE_PATH "Directory with custom image files, or path to a single image file, to use in the evaluation application"
+ ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/samples/
+ PATH_OR_FILE)
+
+USER_OPTION(${use_case}_IMAGE_SIZE "Square image size in pixels. Images will be resized to this size."
+ 128
+ STRING)
+
+USER_OPTION(${use_case}_LABELS_TXT_FILE "Labels' txt file for the chosen model"
+ ${CMAKE_CURRENT_SOURCE_DIR}/resources/${use_case}/labels/visual_wake_word_labels.txt
+ FILEPATH)
+
+USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen model"
+ 0x00200000
+ STRING)
+
+if (ETHOS_U55_ENABLED)
+ set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/vww4_128_128_INT8_vela_H128.tflite)
+else()
+ set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/vww4_128_128_INT8.tflite)
+endif()
+
+USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
+ ${DEFAULT_MODEL_PATH}
+ FILEPATH)
+
+# Generate model file
+generate_tflite_code(
+ MODEL_PATH ${${use_case}_MODEL_TFLITE_PATH}
+ DESTINATION ${SRC_GEN_DIR}
+)
+
+# Generate labels file
+set(${use_case}_LABELS_CPP_FILE Labels)
+generate_labels_code(
+ INPUT "${${use_case}_LABELS_TXT_FILE}"
+ DESTINATION_SRC ${SRC_GEN_DIR}
+ DESTINATION_HDR ${INC_GEN_DIR}
+ OUTPUT_FILENAME "${${use_case}_LABELS_CPP_FILE}"
+)
+
+# Generate input files
+generate_images_code("${${use_case}_FILE_PATH}"
+ ${SRC_GEN_DIR}
+ ${INC_GEN_DIR}
+ "${${use_case}_IMAGE_SIZE}")
diff --git a/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc b/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc
new file mode 100644
index 0000000..c109a62
--- /dev/null
+++ b/tests/use_case/vww/InferenceVisualWakeWordModelTests.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <catch.hpp>
+#include <random>
+#include "hal.h"
+#include "InputFiles.hpp"
+#include "ImageUtils.hpp"
+#include "TestData_vww.hpp"
+#include "VisualWakeWordModel.hpp"
+#include "TensorFlowLiteMicro.hpp"
+
+
+bool RunInference(arm::app::Model& model, const int8_t* imageData)
+{
+ TfLiteTensor* inputTensor = model.GetInputTensor(0);
+ REQUIRE(inputTensor);
+
+ return model.RunInference();
+}
+
+template<typename T>
+void TestInference(int imageIdx,arm::app::Model& model) {
+
+ auto image = test::get_ifm_data_array(imageIdx);
+ auto goldenFV = test::get_ofm_data_array(imageIdx);
+
+ REQUIRE(RunInference(model, image));
+
+ TfLiteTensor* outputTensor = model.GetOutputTensor(0);
+
+ REQUIRE(outputTensor);
+ REQUIRE(outputTensor->bytes == OFM_DATA_SIZE);
+ auto tensorData = tflite::GetTensorData<T>(outputTensor);
+ REQUIRE(tensorData);
+
+ for (size_t i = 0; i < outputTensor->bytes; i++) {
+ auto testVal = static_cast<int>(tensorData[i]);
+ auto goldenVal = static_cast<int>(goldenFV[i]);
+ CHECK(testVal == goldenVal);
+ }
+}
+
+
+/**
+ * @brief Given an image name, get its index
+ * @param[in] imageName Name of the image expected
+ * @return index of the image if valid and (-1) if not found
+ */
+static int _GetImageIdx(std::string &imageName)
+{
+ int imgIdx = -1;
+ for (uint32_t i = 0 ; i < NUMBER_OF_FILES; ++i) {
+ if (imageName == std::string(get_filename(i))) {
+ info("Image %s exists at index %u\n", get_filename(i), i);
+ imgIdx = static_cast<int>(i);
+ break;
+ }
+ }
+
+ if (-1 == imgIdx) {
+ warn("Image %s not found!\n", imageName.c_str());
+ }
+
+ return imgIdx;
+}
+
diff --git a/tests/use_case/vww/VisualWakeWordTests.cc b/tests/use_case/vww/VisualWakeWordTests.cc
new file mode 100644
index 0000000..09f82da
--- /dev/null
+++ b/tests/use_case/vww/VisualWakeWordTests.cc
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define CATCH_CONFIG_MAIN
+#include <catch.hpp>
diff --git a/tests/use_case/vww/VisualWakeWordUCTests.cc b/tests/use_case/vww/VisualWakeWordUCTests.cc
new file mode 100644
index 0000000..891423b
--- /dev/null
+++ b/tests/use_case/vww/VisualWakeWordUCTests.cc
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2021 Arm Limited. All rights reserved.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <catch.hpp>
+#include "VisualWakeWordModel.hpp"
+#include "hal.h"
+
+#include "ClassificationResult.hpp"
+#include "Labels.hpp"
+#include "UseCaseHandler.hpp"
+#include "Classifier.hpp"
+#include "UseCaseCommonUtils.hpp"
+
+TEST_CASE("Model info")
+{
+ arm::app::VisualWakeWordModel model; /* model wrapper object */
+
+ /* Load the model */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<arm::app::Model&>("model", model);
+
+ REQUIRE(model.ShowModelInfoHandler());
+}
+
+TEST_CASE("Inference by index")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ arm::app::VisualWakeWordModel model; /* model wrapper object */
+
+ /* Load the model */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context */
+ arm::app::ApplicationContext caseContext;
+ arm::app::Profiler profiler{&platform, "pd"};
+ caseContext.Set<arm::app::Profiler&>("profiler", profiler);
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+ caseContext.Set<uint32_t>("imgIndex", 0);
+ arm::app::Classifier classifier; /* classifier wrapper object */
+ caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+ std::vector <std::string> labels;
+ GetLabelsVector(labels);
+ caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+ REQUIRE(arm::app::ClassifyImageHandler(caseContext, 0, false));
+
+ auto results = caseContext.Get<std::vector<arm::app::ClassificationResult>>("results");
+
+ REQUIRE(results[0].m_labelIdx == 0);
+}
+
+TEST_CASE("Inference run all images")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ arm::app::VisualWakeWordModel model; /* model wrapper object */
+
+ /* Load the model */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context */
+ arm::app::ApplicationContext caseContext;
+ arm::app::Profiler profiler{&platform, "pd"};
+ caseContext.Set<arm::app::Profiler&>("profiler", profiler);
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+ caseContext.Set<uint32_t>("imgIndex", 0);
+ arm::app::Classifier classifier; /* classifier wrapper object */
+ caseContext.Set<arm::app::Classifier&>("classifier", classifier);
+
+ std::vector <std::string> labels;
+ GetLabelsVector(labels);
+ caseContext.Set<const std::vector <std::string>&>("labels", labels);
+
+ REQUIRE(arm::app::ClassifyImageHandler(caseContext, 0, true));
+}
+
+TEST_CASE("List all images")
+{
+ hal_platform platform;
+ data_acq_module data_acq;
+ data_psn_module data_psn;
+ platform_timer timer;
+
+ /* Initialise the HAL and platform */
+ hal_init(&platform, &data_acq, &data_psn, &timer);
+ hal_platform_init(&platform);
+
+ arm::app::VisualWakeWordModel model; /* model wrapper object */
+
+ /* Load the model */
+ REQUIRE(model.Init());
+
+ /* Instantiate application context */
+ arm::app::ApplicationContext caseContext;
+
+ caseContext.Set<hal_platform&>("platform", platform);
+ caseContext.Set<arm::app::Model&>("model", model);
+
+ REQUIRE(arm::app::ListFilesHandler(caseContext));
+} \ No newline at end of file