summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorRichard Burton <richard.burton@arm.com>2021-08-12 17:26:30 +0100
committerRichard Burton <richard.burton@arm.com>2021-08-12 17:26:30 +0100
commit0d110594b8a50ce3311be5187f01de2e3b8fe995 (patch)
tree1e56414f491f1bbd29df4912e2354ac5e1682133 /source
parentd2b9853ca848f11dee55beedbb9d650763b3ed53 (diff)
downloadml-embedded-evaluation-kit-0d110594b8a50ce3311be5187f01de2e3b8fe995.tar.gz
MLECO-1904: Update to use latest TFLu
* Now uses seperate TFLu github repo * Fixes to align with API changes * Update ASR model ops and re-enable ASR inference tests * Set default release level to release_with_logs Signed-off-by: Richard Burton <richard.burton@arm.com> Change-Id: I57612088985dece1413c5c00a6e442381e07dd91
Diffstat (limited to 'source')
-rw-r--r--source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld26
-rw-r--r--source/application/tensorflow-lite-micro/Model.cc20
-rw-r--r--source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc5
-rw-r--r--source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp1
-rw-r--r--source/use_case/asr/src/Wav2LetterModel.cc4
-rw-r--r--source/use_case/img_class/usecase.cmake4
-rw-r--r--source/use_case/kws_asr/src/Wav2LetterModel.cc4
7 files changed, 41 insertions, 23 deletions
diff --git a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
index 8bb99cd..46fc2e5 100644
--- a/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
+++ b/source/application/hal/platforms/bare-metal/bsp/mem_layout/mps3-sse-300.ld
@@ -65,7 +65,14 @@ SECTIONS
.text.at_itcm :
{
KEEP(*(.vectors))
- *(.text*)
+
+ /**
+ * All code goes here, with one exception of
+ * all_ops_resolver object file. This code
+ * instead placed on BRAM. See comment in the
+ * BRAM section for details.
+ **/
+ *(EXCLUDE_FILE(*all_ops_resolver.o) .text*)
KEEP(*(.init))
KEEP(*(.fini))
@@ -87,11 +94,6 @@ SECTIONS
KEEP(*(.eh_frame*))
} > ITCM
- .ARM.extab.at_itcm :
- {
- *(.ARM.extab* .gnu.linkonce.armextab.*)
- } > ITCM
-
__exidx_start = .;
.ARM.exidx.at_itcm :
{
@@ -208,6 +210,18 @@ SECTIONS
KEEP(*(.jcr*))
. = ALIGN(4);
+ *(.ARM.extab* .gnu.linkonce.armextab.*)
+ . = ALIGN(4);
+
+ /**
+ * Place the all ops resolver code data here. This accounts
+ * for ~4k worth of saving on the ITCM load region. It is
+ * only designed to be included (by default) for the inference
+ * runner use case.
+ **/
+ *all_ops_resolver.o (*.text*)
+ . = ALIGN(4);
+
__data_end__ = .;
} > BRAM
diff --git a/source/application/tensorflow-lite-micro/Model.cc b/source/application/tensorflow-lite-micro/Model.cc
index e9c6cd3..80ef3c3 100644
--- a/source/application/tensorflow-lite-micro/Model.cc
+++ b/source/application/tensorflow-lite-micro/Model.cc
@@ -196,14 +196,22 @@ void arm::app::Model::LogInterpreterInfo()
info("Activation buffer (a.k.a tensor arena) size used: %zu\n",
this->m_pInterpreter->arena_used_bytes());
- const size_t nOperators = this->m_pInterpreter->operators_size();
- info("Number of operators: %zu\n", nOperators);
+ /* We expect there to be only one subgraph. */
+ const uint32_t nOperators = tflite::NumSubgraphOperators(this->m_pModel, 0);
+ info("Number of operators: %" PRIu32 "\n", nOperators);
- /* For each operator, display registration information */
+ const tflite::SubGraph* subgraph = this->m_pModel->subgraphs()->Get(0);
+
+ auto* opcodes = this->m_pModel->operator_codes();
+
+ /* For each operator, display registration information. */
for (size_t i = 0 ; i < nOperators; ++i) {
- const tflite::NodeAndRegistration nodeReg =
- this->m_pInterpreter->node_and_registration(i);
- const TfLiteRegistration* reg = nodeReg.registration;
+ const tflite::Operator* op = subgraph->operators()->Get(i);
+ const tflite::OperatorCode* opcode = opcodes->Get(op->opcode_index());
+ const TfLiteRegistration* reg = nullptr;
+
+ tflite::GetRegistrationFromOpCode(opcode, this->GetOpResolver(),
+ this->m_pErrorReporter, &reg);
std::string opName{""};
if (reg) {
diff --git a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
index ce36a8f..0b08513 100644
--- a/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
+++ b/source/application/tensorflow-lite-micro/TensorFlowLiteMicro.cc
@@ -19,10 +19,7 @@
#include "hal.h"
void PrintTensorFlowVersion()
-{
- info("uTFL version: %u.%u.%u\n", TF_MAJOR_VERSION, TF_MINOR_VERSION,
- TF_PATCH_VERSION);
-}
+{}
arm::app::QuantParams arm::app::GetTensorQuantParams(TfLiteTensor* tensor)
{
diff --git a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
index 677b4ba..1333f6c 100644
--- a/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
+++ b/source/application/tensorflow-lite-micro/include/TensorFlowLiteMicro.hpp
@@ -51,7 +51,6 @@
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/schema/schema_generated.h"
-#include "tensorflow/lite/version.h"
#if defined (TESTS)
#include "tensorflow/lite/micro/test_helpers.h"
diff --git a/source/use_case/asr/src/Wav2LetterModel.cc b/source/use_case/asr/src/Wav2LetterModel.cc
index 6f87be8..a22dc55 100644
--- a/source/use_case/asr/src/Wav2LetterModel.cc
+++ b/source/use_case/asr/src/Wav2LetterModel.cc
@@ -26,9 +26,9 @@ const tflite::MicroOpResolver& arm::app::Wav2LetterModel::GetOpResolver()
bool arm::app::Wav2LetterModel::EnlistOperations()
{
this->m_opResolver.AddConv2D();
- this->m_opResolver.AddMul();
- this->m_opResolver.AddMaximum();
this->m_opResolver.AddReshape();
+ this->m_opResolver.AddLeakyRelu();
+ this->m_opResolver.AddSoftmax();
#if defined(ARM_NPU)
if (kTfLiteOk == this->m_opResolver.AddEthosU()) {
diff --git a/source/use_case/img_class/usecase.cmake b/source/use_case/img_class/usecase.cmake
index 63a4c2a..e46de00 100644
--- a/source/use_case/img_class/usecase.cmake
+++ b/source/use_case/img_class/usecase.cmake
@@ -47,9 +47,9 @@ USER_OPTION(${use_case}_ACTIVATION_BUF_SZ "Activation buffer size for the chosen
STRING)
if (ETHOS_U_NPU_ENABLED)
- set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/mobilenet_v2_1.0_224_quantized_1_default_1_vela_H128.tflite)
+ set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/mobilenet_v2_1.0_224_INT8_vela_H128.tflite)
else()
- set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/mobilenet_v2_1.0_224_quantized_1_default_1.tflite)
+ set(DEFAULT_MODEL_PATH ${DEFAULT_MODEL_DIR}/mobilenet_v2_1.0_224_INT8.tflite)
endif()
USER_OPTION(${use_case}_MODEL_TFLITE_PATH "NN models file to be used in the evaluation application. Model files must be in tflite format."
diff --git a/source/use_case/kws_asr/src/Wav2LetterModel.cc b/source/use_case/kws_asr/src/Wav2LetterModel.cc
index 62245b9..affa1a6 100644
--- a/source/use_case/kws_asr/src/Wav2LetterModel.cc
+++ b/source/use_case/kws_asr/src/Wav2LetterModel.cc
@@ -35,8 +35,8 @@ const tflite::MicroOpResolver& arm::app::Wav2LetterModel::GetOpResolver()
bool arm::app::Wav2LetterModel::EnlistOperations()
{
this->m_opResolver.AddConv2D();
- this->m_opResolver.AddMul();
- this->m_opResolver.AddMaximum();
+ this->m_opResolver.AddLeakyRelu();
+ this->m_opResolver.AddSoftmax();
this->m_opResolver.AddReshape();
#if defined(ARM_NPU)