summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Burton <richard.burton@arm.com>2021-08-12 17:26:30 +0100
committerRichard Burton <richard.burton@arm.com>2021-08-12 17:26:30 +0100
commit0d110594b8a50ce3311be5187f01de2e3b8fe995 (patch)
tree1e56414f491f1bbd29df4912e2354ac5e1682133
parentd2b9853ca848f11dee55beedbb9d650763b3ed53 (diff)
downloadml-embedded-evaluation-kit-0d110594b8a50ce3311be5187f01de2e3b8fe995.tar.gz
MLECO-1904: Update to use latest TFLu
* Now uses seperate TFLu github repo * Fixes to align with API changes * Update ASR model ops and re-enable ASR inference tests * Set default release level to release_with_logs Signed-off-by: Richard Burton <richard.burton@arm.com> Change-Id: I57612088985dece1413c5c00a6e442381e07dd91
-rw-r--r--.gitmodules2
-rw-r--r--Readme.md2
m---------dependencies/tensorflow0
-rw-r--r--docs/quick_start.md12
-rw-r--r--docs/use_cases/img_class.md48
-rw-r--r--scripts/cmake/tensorflow.cmake13
-rwxr-xr-xset_up_default_resources.py8
-rw-r--r--source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld26
-rw-r--r--source/application/tensorflow-lite-micro/Model.cc20
-rw-r--r--source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc5
-rw-r--r--source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp1
-rw-r--r--source/use_case/asr/src/Wav2LetterModel.cc4
-rw-r--r--source/use_case/img_class/usecase.cmake4
-rw-r--r--source/use_case/kws_asr/src/Wav2LetterModel.cc4
-rw-r--r--tests/common/ClassifierTests.cc2
-rw-r--r--tests/use_case/asr/AsrClassifierTests.cc4
-rw-r--r--tests/use_case/asr/InferenceTestWav2Letter.cc5
-rw-r--r--tests/use_case/asr/Wav2LetterPreprocessingTest.cc2
-rw-r--r--tests/use_case/img_class/InferenceTestMobilenetV2.cc2
-rw-r--r--tests/use_case/kws_asr/InferenceTestWav2Letter.cc5
-rw-r--r--tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc2
21 files changed, 93 insertions, 78 deletions
diff --git a/.gitmodules b/.gitmodules
index 12a4df7..66bff3c 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,6 @@
[submodule "dependencies/tensorflow"]
path = dependencies/tensorflow
- url = https://github.com/tensorflow/tensorflow
+ url = https://github.com/tensorflow/tflite-micro.git
[submodule "dependencies/cmsis"]
path = dependencies/cmsis
url = https://github.com/ARM-software/CMSIS_5.git
diff --git a/Readme.md b/Readme.md
index 57ae858..332a51c 100644
--- a/Readme.md
+++ b/Readme.md
@@ -27,7 +27,7 @@ The example application at your disposal and the utilized models are listed in t
| ML application | Description | Neural Network Model |
| :----------------------------------: | :-----------------------------------------------------: | :----: |
-| [Image classification](./docs/use_cases/img_class.md) | Recognize the presence of objects in a given image | [Mobilenet V2](https://github.com/ARM-software/ML-zoo/blob/master/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8) |
+| [Image classification](./docs/use_cases/img_class.md) | Recognize the presence of objects in a given image | [Mobilenet V2](https://github.com/ARM-software/ML-zoo/tree/master/models/image_classification/mobilenet_v2_1.0_224/tflite_int8) |
| [Keyword spotting(KWS)](./docs/use_cases/kws.md) | Recognize the presence of a key word in a recording | [DS-CNN-L](https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8) |
| [Automated Speech Recognition(ASR)](./docs/use_cases/asr.md) | Transcribe words in a recording | [Wav2Letter](https://github.com/ARM-software/ML-zoo/blob/master/models/speech_recognition/wav2letter/tflite_int8) |
| [KWS and ASR](./docs/use_cases/kws_asr.md) | Utilise Cortex-M and Ethos-U to transcribe words in a recording after a keyword was spotted | [DS-CNN-L](https://github.com/ARM-software/ML-zoo/blob/master/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8) [Wav2Letter](https://github.com/ARM-software/ML-zoo/blob/master/models/speech_recognition/wav2letter/tflite_int8) |
diff --git a/dependencies/tensorflow b/dependencies/tensorflow
-Subproject 6cff09aee1f832d495b3cae40cab0de58155a0a
+Subproject f510d38d0eaa3195ce3af66e3f32648740f08af
diff --git a/docs/quick_start.md b/docs/quick_start.md
index d1039fe..878bdcf 100644
--- a/docs/quick_start.md
+++ b/docs/quick_start.md
@@ -78,11 +78,11 @@ curl -L https://github.com/ARM-software/ML-zoo/raw/1a92aa08c0de49a7304e0a7f3f59d
--output ./resources_downloaded/asr/ifm0.npy
curl -L https://github.com/ARM-software/ML-zoo/raw/1a92aa08c0de49a7304e0a7f3f59df6f4fd33ac8/models/speech_recognition/wav2letter/tflite_pruned_int8/testing_output/Identity_int8/0.npy \
--output ./resources_downloaded/asr/ofm0.npy
-curl -L https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8/mobilenet_v2_1.0_224_quantized_1_default_1.tflite \
- --output ./resources_downloaded/img_class/mobilenet_v2_1.0_224_quantized_1_default_1.tflite
-curl -L https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8/testing_input/input/0.npy \
+curl -L https://github.com/ARM-software/ML-zoo/raw/e0aa361b03c738047b9147d1a50e3f2dcb13dbcb/models/image_classification/mobilenet_v2_1.0_224/tflite_int8/mobilenet_v2_1.0_224_INT8.tflite \
+ --output ./resources_downloaded/img_class/mobilenet_v2_1.0_224_INT8.tflite
+curl -L https://github.com/ARM-software/ML-zoo/raw/e0aa361b03c738047b9147d1a50e3f2dcb13dbcb/models/image_classification/mobilenet_v2_1.0_224/tflite_int8/testing_input/tfl.quantize/0.npy \
--output ./resources_downloaded/img_class/ifm0.npy
-curl -L https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8/testing_output/output/0.npy \
+curl -L https://github.com/ARM-software/ML-zoo/raw/e0aa361b03c738047b9147d1a50e3f2dcb13dbcb/models/image_classification/mobilenet_v2_1.0_224/tflite_int8/testing_output/MobilenetV2/Predictions/Reshape_11/0.npy \
--output ./resources_downloaded/img_class/ofm0.npy
curl -L https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/keyword_spotting/ds_cnn_large/tflite_clustered_int8/ds_cnn_clustered_int8.tflite \
--output ./resources_downloaded/kws/ds_cnn_clustered_int8.tflite
@@ -137,13 +137,13 @@ mv resources_downloaded/kws_asr/ds_cnn_clustered_int8_vela.tflite resources_down
--output-dir=resources_downloaded/inference_runner
mv resources_downloaded/inference_runner/dnn_s_quantized_vela.tflite resources_downloaded/inference_runner/dnn_s_quantized_vela_H128.tflite
-. resources_downloaded/env/bin/activate && vela resources_downloaded/img_class/mobilenet_v2_1.0_224_quantized_1_default_1.tflite \
+. resources_downloaded/env/bin/activate && vela resources_downloaded/img_class/mobilenet_v2_1.0_224_INT8.tflite \
--accelerator-config=ethos-u55-128 \
--optimise Performance --config scripts/vela/default_vela.ini \
--memory-mode=Shared_Sram \
--system-config=Ethos_U55_High_End_Embedded \
--output-dir=resources_downloaded/img_class
-mv resources_downloaded/img_class/mobilenet_v2_1.0_224_quantized_1_default_1_vela.tflite resources_downloaded/img_class/mobilenet_v2_1.0_224_quantized_1_default_1_vela_H128.tflite
+mv resources_downloaded/img_class/mobilenet_v2_1.0_224_INT8.tflite_vela.tflite resources_downloaded/img_class/mobilenet_v2_1.0_224_INT8.tflite_vela_H128.tflite
. resources_downloaded/env/bin/activate && vela resources_downloaded/asr/wav2letter_int8.tflite \
--accelerator-config=ethos-u55-128 \
diff --git a/docs/use_cases/img_class.md b/docs/use_cases/img_class.md
index b3544de..ae74d8a 100644
--- a/docs/use_cases/img_class.md
+++ b/docs/use_cases/img_class.md
@@ -319,7 +319,6 @@ What the preceding choices do:
4. Show NN model info: Prints information about the model data type, input, and output, tensor sizes:
```log
- INFO - uTFL version: 2.5.0
INFO - Model info:
INFO - Model INPUT tensors:
INFO - tensor type is UINT8
@@ -329,19 +328,20 @@ What the preceding choices do:
INFO - 2: 224
INFO - 3: 3
INFO - Quant dimension: 0
- INFO - Scale[0] = 0.007812
- INFO - ZeroPoint[0] = 128
+ INFO - Scale[0] = 0.007843
+ INFO - ZeroPoint[0] = -1
INFO - Model OUTPUT tensors:
- INFO - tensor type is UINT8
+ INFO - tensor type is INT8
INFO - tensor occupies 1001 bytes with dimensions
INFO - 0: 1
INFO - 1: 1001
INFO - Quant dimension: 0
- INFO - Scale[0] = 0.098893
- INFO - ZeroPoint[0] = 58
- INFO - Activation buffer (a.k.a tensor arena) size used: 521760
+ INFO - Scale[0] = 0.03906
+ INFO - ZeroPoint[0] = -128
+ INFO - Activation buffer (a.k.a tensor arena) size used: 1510012
INFO - Number of operators: 1
INFO - Operator 0: ethos-u
+
```
5. List Images: Prints a list of pair image indexes. The original filenames are embedded in the application, like so:
@@ -364,18 +364,18 @@ The following example illustrates an application output for classification:
INFO - Running inference on image 0 => cat.bmp
INFO - Final results:
INFO - Total number of inferences: 1
-INFO - 0) 282 (14.636096) -> tabby, tabby cat
-INFO - 1) 286 (14.537203) -> Egyptian cat
-INFO - 2) 283 (12.757138) -> tiger cat
-INFO - 3) 458 (7.021370) -> bow tie, bow-tie, bowtie
-INFO - 4) 288 (7.021370) -> lynx, catamount
+INFO - 0) 282 (0.753906) -> tabby, tabby cat
+INFO - 1) 286 (0.148438) -> Egyptian cat
+INFO - 2) 283 (0.062500) -> tiger cat
+INFO - 3) 458 (0.003906) -> bow tie, bow-tie, bowtie
+INFO - 4) 288 (0.003906) -> lynx, catamount
INFO - Profile for Inference:
-INFO - NPU AXI0_RD_DATA_BEAT_RECEIVED beats: 2489726
-INFO - NPU AXI0_WR_DATA_BEAT_WRITTEN beats: 1098726
-INFO - NPU AXI1_RD_DATA_BEAT_RECEIVED beats: 471129
-INFO - NPU ACTIVE cycles: 7489258
-INFO - NPU IDLE cycles: 914
-INFO - NPU TOTAL cycles: 7490172
+INFO - NPU AXI0_RD_DATA_BEAT_RECEIVED beats: 2468259
+INFO - NPU AXI0_WR_DATA_BEAT_WRITTEN beats: 1151319
+INFO - NPU AXI1_RD_DATA_BEAT_RECEIVED beats: 432351
+INFO - NPU ACTIVE cycles: 7345741
+INFO - NPU IDLE cycles: 431
+INFO - NPU TOTAL cycles: 7346172
```
It can take several minutes to complete one inference run. The average time is around 2-3 minutes.
@@ -387,18 +387,18 @@ The profiling section of the log shows that for this inference:
- *Ethos-U* PMU report:
- - 7,490,172 total cycle: The number of NPU cycles.
+ - 7,346,172 total cycle: The number of NPU cycles.
- - 7,489,258 active cycles: The number of NPU cycles that were used for computation.
+ - 7,345,741 active cycles: The number of NPU cycles that were used for computation.
- - 914 idle cycles: The number of cycles for which the NPU was idle.
+ - 413 idle cycles: The number of cycles for which the NPU was idle.
- - 2,489,726 AXI0 read beats: The number of AXI beats with read transactions from AXI0 bus. AXI0 is the bus where the
+ - 2,468,259 AXI0 read beats: The number of AXI beats with read transactions from AXI0 bus. AXI0 is the bus where the
*Ethos-U* NPU reads and writes to the computation buffers, activation buf, or tensor arenas.
- - 1,098,726 AXI0 write beats: The number of AXI beats with write transactions to AXI0 bus.
+ - 1,151,319 AXI0 write beats: The number of AXI beats with write transactions to AXI0 bus.
- - 471,129 AXI1 read beats: The number of AXI beats with read transactions from AXI1 bus. AXI1 is the bus where the
+ - 432,351 AXI1 read beats: The number of AXI beats with read transactions from AXI1 bus. AXI1 is the bus where the
*Ethos-U* NPU reads the model. So, read-only.
- For FPGA platforms, a CPU cycle count can also be enabled. However, do not use cycle counters for FVP, as the CPU
diff --git a/scripts/cmake/tensorflow.cmake b/scripts/cmake/tensorflow.cmake
index d0654b8..c2906f5 100644
--- a/scripts/cmake/tensorflow.cmake
+++ b/scripts/cmake/tensorflow.cmake
@@ -20,14 +20,12 @@ ProcessorCount(J)
if (CMAKE_BUILD_TYPE STREQUAL Debug)
set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "debug")
- set(TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL "-O0")
+ set(TENSORFLOW_LITE_MICRO_CORE_OPTIMIZATION_LEVEL "-O0")
+ set(TENSORFLOW_LITE_MICRO_KERNEL_OPTIMIZATION_LEVEL "-O0")
elseif (CMAKE_BUILD_TYPE STREQUAL Release)
- set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "release")
- set(TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL "-O3")
-elseif(CMAKE_BUILD_TYPE STREQUAL RelWithDebInfo)
set(TENSORFLOW_LITE_MICRO_DEFAULT_BUILD_TYPE "release_with_logs")
- # No override for optimsiation level; we rely on the default
- # optimisation applied by TensorFlow Lite Micro build here.
+ set(TENSORFLOW_LITE_MICRO_CORE_OPTIMIZATION_LEVEL "-O3")
+ set(TENSORFLOW_LITE_MICRO_KERNEL_OPTIMIZATION_LEVEL "-O3")
elseif (NOT DEFINED TENSORFLOW_LITE_MICRO_BUILD_TYPE)
message(WARNING "TENSORFLOW_LITE_MICRO_BUILD_TYPE is not set.")
message(FATAL_ERROR "Build type ${CMAKE_BUILD_TYPE} does not have a corresponding "
@@ -109,7 +107,8 @@ add_custom_target(tensorflow_build ALL
# Conditional arguments
$<$<BOOL:${ARMCLANG_DEBUG_DWARF_LEVEL}>:ARMCLANG_DEBUG_DWARF_LEVEL=${ARMCLANG_DEBUG_DWARF_LEVEL}>
- $<$<BOOL:${TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL}>:OPTIMIZATION_LEVEL=${TENSORFLOW_LITE_MICRO_OPTIMIZATION_LEVEL}>
+ $<$<BOOL:${TENSORFLOW_LITE_MICRO_CORE_OPTIMIZATION_LEVEL}>:CORE_OPTIMIZATION_LEVEL=${TENSORFLOW_LITE_MICRO_CORE_OPTIMIZATION_LEVEL}>
+ $<$<BOOL:${TENSORFLOW_LITE_MICRO_KERNEL_OPTIMIZATION_LEVEL}>:KERNEL_OPTIMIZATION_LEVEL=${TENSORFLOW_LITE_MICRO_KERNEL_OPTIMIZATION_LEVEL}>
$<$<BOOL:${TENSORFLOW_LITE_MICRO_OPTIMIZED_KERNEL}>:OPTIMIZED_KERNEL_DIR=${TENSORFLOW_LITE_MICRO_OPTIMIZED_KERNEL}>
$<$<BOOL:${TENSORFLOW_LITE_MICRO_CO_PROCESSOR}>:CO_PROCESSOR=${TENSORFLOW_LITE_MICRO_CO_PROCESSOR}>
diff --git a/set_up_default_resources.py b/set_up_default_resources.py
index 6cf5a88..418af4f 100755
--- a/set_up_default_resources.py
+++ b/set_up_default_resources.py
@@ -45,12 +45,12 @@ json_uc_res = [{
},
{
"use_case_name": "img_class",
- "resources": [{"name": "mobilenet_v2_1.0_224_quantized_1_default_1.tflite",
- "url": "https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8/mobilenet_v2_1.0_224_quantized_1_default_1.tflite"},
+ "resources": [{"name": "mobilenet_v2_1.0_224_INT8.tflite",
+ "url": "https://github.com/ARM-software/ML-zoo/raw/e0aa361b03c738047b9147d1a50e3f2dcb13dbcb/models/image_classification/mobilenet_v2_1.0_224/tflite_int8/mobilenet_v2_1.0_224_INT8.tflite"},
{"name": "ifm0.npy",
- "url": "https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8/testing_input/input/0.npy"},
+ "url": "https://github.com/ARM-software/ML-zoo/raw/e0aa361b03c738047b9147d1a50e3f2dcb13dbcb/models/image_classification/mobilenet_v2_1.0_224/tflite_int8/testing_input/tfl.quantize/0.npy"},
{"name": "ofm0.npy",
- "url": "https://github.com/ARM-software/ML-zoo/raw/68b5fbc77ed28e67b2efc915997ea4477c1d9d5b/models/image_classification/mobilenet_v2_1.0_224/tflite_uint8/testing_output/output/0.npy"}]
+ "url": "https://github.com/ARM-software/ML-zoo/raw/e0aa361b03c738047b9147d1a50e3f2dcb13dbcb/models/image_classification/mobilenet_v2_1.0_224/tflite_int8/testing_output/MobilenetV2/Predictions/Reshape_11/0.npy"}]
},
{
"use_case_name": "kws",
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
index 8bb99cd..46fc2e5 100644
--- a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
@@ -65,7 +65,14 @@ SECTIONS
.text.at_itcm :
{
KEEP(*(.vectors))
- *(.text*)
+
+ /**
+ * All code goes here, with one exception of
+ * all_ops_resolver object file. This code
+ * instead placed on BRAM. See comment in the
+ * BRAM section for details.
+ **/
+ *(EXCLUDE_FILE(*all_ops_resolver.o) .text*)
KEEP(*(.init))
KEEP(*(.fini))
@@ -87,11 +94,6 @@ SECTIONS
KEEP(*(.eh_frame*))
} > ITCM
- .ARM.extab.at_itcm :
- {
- *(.ARM.extab* .gnu.linkonce.armextab.*)
- } > ITCM
-
__exidx_start = .;
.ARM.exidx.at_itcm :
{
@@ -208,6 +210,18 @@ SECTIONS
KEEP(*(.jcr*))
. = ALIGN(4);
+ *(.ARM.extab* .gnu.linkonce.armextab.*)
+ . = ALIGN(4);
+
+ /**
+ * Place the all ops resolver code data here. This accounts
+ * for ~4k worth of saving on the ITCM load region. It is
+ * only designed to be included (by default) for the inference
+ * runner use case.
+ **/
+ *all_ops_resolver.o (*.text*)
+ . = ALIGN(4);
+
__data_end__ = .;
} > BRAM
diff --git a/source/application/tensorflow-lite-micro/Model.cc b/source/application/tensorflow-lite-micro/Model.cc
index e9c6cd3..80ef3c3 100644
--- a/source/application/tensorflow-lite-micro/Model.cc
+++ b/source/application/tensorflow-lite-micro/Model.cc
@@ -196,14 +196,22 @@ void arm::app::Model::LogInterpreterInfo()
info("Activation buffer (a.k.a tensor arena) size used: %zu\n",
this->m_pInterpreter->arena_used_bytes());
- const size_t nOperators = this->m_pInterpreter->operators_size();
- info("Number of operators: %zu\n", nOperators);
+ /* We expect there to be only one subgraph. */
+ const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0);
+ info("Number of operators: %" PRIu32 "\n", nOperators);
- /* For each operator, display registration information */
+ const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0);
+
+ auto* opcodes = this->m_pModel->operator_codes();
+
+ /* For each operator, display registration information. */
for (size_t i = 0 ; i < nOperators; ++i) {
- const tflite::NodeAndRegistration nodeReg =
- this->m_pInterpreter->node_and_registration(i);
- const TfLiteRegistration* reg = nodeReg.registration;
+ const tflite::Operator* op = subgraph->operators()->Get(i);
+ const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
+ const TfLiteRegistration* reg = nullptr;
+
+ tflite::GetRegistrationFromOpCode(opcode, this->GetOpResolver(),
+ this->m_pErrorReporter, &reg);
std::string opName{""};
if (reg) {
diff --git a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
index ce36a8f..0b08513 100644
--- a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
+++ b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
@@ -19,10 +19,7 @@
#include "hal.h"
void PrintTensorFlowVersion()
-{
- info("uTFL version: %u.%u.%u\n", TF_MAJOR_VERSION, TF_MINOR_VERSION,
- TF_PATCH_VERSION);
-}
+{}
arm::app::QuantParams arm::app::GetTensorQuantParams(TfLiteTensor* tensor)
{
diff --git a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
index 677b4ba..1333f6c 100644
--- a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
+++ b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
@@ -51,7 +51,6 @@
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/schema/schema_generated.h"
-#include "tensorflow/lite/version.h"
#if defined (TESTS)
#include "tensorflow/lite/micro/test_helpers.h"
diff --git a/source/use_case/asr/src/Wav2LetterModel.cc b/source/use_case/asr/src/Wav2LetterModel.cc
index 6f87be8..a22dc55 100644
--- a/source/use_case/asr/src/Wav2LetterModel.cc
+++ b/source/use_case/asr/src/Wav2LetterModel.cc
@@ -26,9 +26,9 @@ const tflite::MicroOpResolver& arm::app::Wav2LetterModel::GetOpResolver()
bool arm::app::Wav2LetterModel::EnlistOperations()
{
this->m_opResolver.AddConv2D();
- this->m_opResolver.AddMul();
- this->m_opResolver.AddMaximum();
this->m_opResolver.AddReshape();
+ this->m_opResolver.AddLeakyRelu();
+ this->m_opResolver.AddSoftmax();
#if defined(ARM_NPU)
if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
diff --git a/source/use_case/img_class/usecase.cmake b/source/use_case/img_class/usecase.cmake
index 63a4c2a..e46de00 100644
--- a/source/use_case/img_class/usecase.cmake
+++ b/source/use_case/img_class/usecase.cmake
@@ -47,9 +47,9 @@ USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen
STRING)
if (ETHOS_U_NPU_ENABLED)
- set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/mobilenet_v2_1.0_224_quantized_1_default_1_vela_H128.tflite)
+ set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/mobilenet_v2_1.0_224_INT8_vela_H128.tflite)
else()
- set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/mobilenet_v2_1.0_224_quantized_1_default_1.tflite)
+ set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/mobilenet_v2_1.0_224_INT8.tflite)
endif()
USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
diff --git a/source/use_case/kws_asr/src/Wav2LetterModel.cc b/source/use_case/kws_asr/src/Wav2LetterModel.cc
index 62245b9..affa1a6 100644
--- a/source/use_case/kws_asr/src/Wav2LetterModel.cc
+++ b/source/use_case/kws_asr/src/Wav2LetterModel.cc
@@ -35,8 +35,8 @@ const tflite::MicroOpResolver& arm::app::Wav2LetterModel::GetOpResolver()
bool arm::app::Wav2LetterModel::EnlistOperations()
{
this->m_opResolver.AddConv2D();
- this->m_opResolver.AddMul();
- this->m_opResolver.AddMaximum();
+ this->m_opResolver.AddLeakyRelu();
+ this->m_opResolver.AddSoftmax();
this->m_opResolver.AddReshape();
#if defined(ARM_NPU)
diff --git a/tests/common/ClassifierTests.cc b/tests/common/ClassifierTests.cc
index a04e4c2..d950304 100644
--- a/tests/common/ClassifierTests.cc
+++ b/tests/common/ClassifierTests.cc
@@ -21,7 +21,7 @@
template<typename T>
void test_classifier_result(std::vector<std::pair<uint32_t, T>>& selectedResults, T defaultTensorValue) {
- const int dimArray[] = {1, 1001};
+ int dimArray[] = {1, 1001};
std::vector <std::string> labels(1001);
std::vector<T> outputVec(1001, defaultTensorValue);
TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
diff --git a/tests/use_case/asr/AsrClassifierTests.cc b/tests/use_case/asr/AsrClassifierTests.cc
index 12523aa..e2bfb18 100644
--- a/tests/use_case/asr/AsrClassifierTests.cc
+++ b/tests/use_case/asr/AsrClassifierTests.cc
@@ -30,7 +30,7 @@ TEST_CASE("Test invalid classifier")
TEST_CASE("Test valid classifier UINT8") {
- const int dimArray[] = {4, 1, 1, 246, 29};
+ int dimArray[] = {4, 1, 1, 246, 29};
std::vector <std::string> labels(29);
std::vector <uint8_t> outputVec(7134);
TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
@@ -46,7 +46,7 @@ TEST_CASE("Test valid classifier UINT8") {
TEST_CASE("Get classification results") {
- const int dimArray[] = {4, 1, 1, 10, 15};
+ int dimArray[] = {4, 1, 1, 10, 15};
std::vector <std::string> labels(15);
std::vector<uint8_t> outputVec(150, static_cast<uint8_t>(1));
TfLiteIntArray* dims= tflite::testing::IntArrayFromInts(dimArray);
diff --git a/tests/use_case/asr/InferenceTestWav2Letter.cc b/tests/use_case/asr/InferenceTestWav2Letter.cc
index 0943db8..d5e6c35 100644
--- a/tests/use_case/asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/asr/InferenceTestWav2Letter.cc
@@ -54,8 +54,7 @@ bool RunInferenceRandom(arm::app::Model& model)
return true;
}
-/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
-TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8", "[Wav2Letter][.]")
+TEST_CASE("Running random inference with TensorFlow Lite Micro and Wav2LetterModel Int8", "[Wav2Letter]")
{
arm::app::Wav2LetterModel model{};
@@ -86,7 +85,7 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
}
}
-TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
{
for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
auto input_goldenFV = get_ifm_data_array(i);;
diff --git a/tests/use_case/asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
index 1391011..8af9014 100644
--- a/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
+++ b/tests/use_case/asr/Wav2LetterPreprocessingTest.cc
@@ -108,7 +108,7 @@ TEST_CASE("Preprocessing calculation INT8")
/* Constants. */
const uint32_t windowLen = 512;
const uint32_t windowStride = 160;
- const int dimArray[] = {3, 1, numMfccFeatures * 3, numMfccVectors};
+ int dimArray[] = {3, 1, numMfccFeatures * 3, numMfccVectors};
const float quantScale = 0.1410219967365265;
const int quantOffset = -11;
diff --git a/tests/use_case/img_class/InferenceTestMobilenetV2.cc b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
index b2720a8..6fbf374 100644
--- a/tests/use_case/img_class/InferenceTestMobilenetV2.cc
+++ b/tests/use_case/img_class/InferenceTestMobilenetV2.cc
@@ -24,7 +24,7 @@
using namespace test;
-bool RunInference(arm::app::Model& model, const uint8_t imageData[])
+bool RunInference(arm::app::Model& model, const int8_t imageData[])
{
TfLiteTensor* inputTensor = model.GetInputTensor(0);
REQUIRE(inputTensor);
diff --git a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
index 897ad0a..5f5ad98 100644
--- a/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
+++ b/tests/use_case/kws_asr/InferenceTestWav2Letter.cc
@@ -55,8 +55,7 @@ bool RunInferenceRandom(arm::app::Model& model)
return true;
}
-/* Skip this test, Wav2LetterModel if not Vela optimized but only from ML-zoo will fail. */
-TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+TEST_CASE("Running random inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
{
arm::app::Wav2LetterModel model{};
@@ -88,7 +87,7 @@ void TestInference(const T* input_goldenFV, const T* output_goldenFV, arm::app::
}
}
-TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter][.]")
+TEST_CASE("Running inference with Tflu and Wav2LetterModel Int8", "[Wav2Letter]")
{
for (uint32_t i = 0 ; i < NUMBER_OF_FM_FILES; ++i) {
auto input_goldenFV = get_ifm_data_array(i);;
diff --git a/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
index e71366a..16dbea2 100644
--- a/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
+++ b/tests/use_case/kws_asr/Wav2LetterPreprocessingTest.cc
@@ -108,7 +108,7 @@ TEST_CASE("Preprocessing calculation INT8")
/* Constants. */
const uint32_t windowLen = 512;
const uint32_t windowStride = 160;
- const int dimArray[] = {3, 1, numMfccFeatures * 3, numMfccVectors};
+ int dimArray[] = {3, 1, numMfccFeatures * 3, numMfccVectors};
const float quantScale = 0.1410219967365265;
const int quantOffset = -11;