diff options
author | Ledion Daja <ledion.daja@arm.com> | 2022-11-21 09:23:50 +0100 |
---|---|---|
committer | Ledion Daja <ledion.daja@arm.com> | 2022-11-30 14:07:22 +0100 |
commit | a7d025a89dff1d532add998392e2dba6ec7129a4 (patch) | |
tree | 06b76e6500e8e46d33b73356593a4491219b8107 /applications/inference_process/src | |
parent | 3227fbddb304683e74af7a485455067bdea40fb8 (diff) | |
download | ethos-u-core-software-a7d025a89dff1d532add998392e2dba6ec7129a4.tar.gz |
Extend inference_process library to use MicroMutableOpResolver
Added compilation flag to allow the inference_process library to
use a MicroMutableOpResolver as an alternative to AllOpsResolver. This
allows to only make use of the needed operators instead of the complete
list of operators, reducing thus the memory footprint of the
application.
Change-Id: If1d6751b12e8aa301bb466e3ffae92406200eab4
Diffstat (limited to 'applications/inference_process/src')
-rw-r--r-- | applications/inference_process/src/inference_process.cpp | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/applications/inference_process/src/inference_process.cpp b/applications/inference_process/src/inference_process.cpp index 71a3128..88bc8f4 100644 --- a/applications/inference_process/src/inference_process.cpp +++ b/applications/inference_process/src/inference_process.cpp @@ -16,7 +16,13 @@ * limitations under the License. */ +#ifndef INFERENCE_PROCESS_OPS_RESOLVER #include "tensorflow/lite/micro/all_ops_resolver.h" +#else +#define _STRINGIFY(a) #a +#define STRINGIFY(a) _STRINGIFY(a) +#include STRINGIFY(INFERENCE_PROCESS_OPS_RESOLVER) +#endif #include "tensorflow/lite/micro/cortex_m_generic/debug_log_callback.h" #include "tensorflow/lite/micro/micro_interpreter.h" #include "tensorflow/lite/micro/micro_time.h" @@ -144,7 +150,11 @@ bool InferenceProcess::runJob(InferenceJob &job) { } // Create the TFL micro interpreter +#ifndef INFERENCE_PROCESS_OPS_RESOLVER tflite::AllOpsResolver resolver; +#else + tflite::MicroMutableOpResolver<kNumberOperators> resolver = get_resolver(); +#endif tflite::ArmProfiler profiler; tflite::MicroInterpreter interpreter(model, resolver, tensorArena, tensorArenaSize, nullptr, &profiler); |