From 66ed182cd7520537e73ec37f17f8bf549d8297a2 Mon Sep 17 00:00:00 2001 From: Anton Moberg Date: Wed, 10 Feb 2021 08:49:28 +0100 Subject: core_software - Move TensorArena Moved TensorArena in inference_process.ccp to application level. InferenceProcess class now takes TensorArena pointer and TensorArenaSize as parameters. Needs to be set by application before runJob() is called. Change-Id: I530b96039868305fa903ae7f93419d9d00f9c16f --- applications/inference_process/CMakeLists.txt | 7 ++----- .../inference_process/include/inference_process.hpp | 5 ++++- applications/inference_process/src/inference_process.cpp | 13 +------------ 3 files changed, 7 insertions(+), 18 deletions(-) diff --git a/applications/inference_process/CMakeLists.txt b/applications/inference_process/CMakeLists.txt index 3348d28..97d4d8f 100644 --- a/applications/inference_process/CMakeLists.txt +++ b/applications/inference_process/CMakeLists.txt @@ -16,14 +16,11 @@ # limitations under the License. # -set(TR_ARENA_SIZE "200000" CACHE STRING "Arena size.") set(TR_PRINT_OUTPUT_BYTES "" CACHE STRING "Print output data.") add_library(inference_process STATIC) + target_include_directories(inference_process PUBLIC include PRIVATE ${TENSORFLOW_PATH} ${TENSORFLOW_PATH}/tensorflow/lite/micro/tools/make/downloads/flatbuffers/include) target_link_libraries(inference_process PUBLIC tflu cmsis_core cmsis_device) -target_sources(inference_process PRIVATE src/inference_process.cpp) - -# Set arena size -target_compile_definitions(inference_process PRIVATE TENSOR_ARENA_SIZE=${TR_ARENA_SIZE}) +target_sources(inference_process PRIVATE src/inference_process.cpp) \ No newline at end of file diff --git a/applications/inference_process/include/inference_process.hpp b/applications/inference_process/include/inference_process.hpp index 67b30c5..880e28d 100644 --- a/applications/inference_process/include/inference_process.hpp +++ b/applications/inference_process/include/inference_process.hpp @@ -63,7 +63,8 @@ struct InferenceJob { class InferenceProcess { public: - InferenceProcess(); + InferenceProcess(uint8_t *_tensorArena, size_t _tensorArenaSize) : + lock(0), tensorArena(_tensorArena), tensorArenaSize(_tensorArenaSize) {} bool push(const InferenceJob &job); bool runJob(InferenceJob &job); @@ -71,6 +72,8 @@ public: private: volatile uint32_t lock; + uint8_t *tensorArena; + const size_t tensorArenaSize; std::queue inferenceJobQueue; void getLock(); diff --git a/applications/inference_process/src/inference_process.cpp b/applications/inference_process/src/inference_process.cpp index b5ed5c4..cc2b378 100644 --- a/applications/inference_process/src/inference_process.cpp +++ b/applications/inference_process/src/inference_process.cpp @@ -30,14 +30,8 @@ #include -#ifndef TENSOR_ARENA_SIZE -#define TENSOR_ARENA_SIZE (1024) -#endif - using namespace std; -__attribute__((section(".bss.NoInit"), aligned(16))) uint8_t inferenceProcessTensorArena[TENSOR_ARENA_SIZE]; - namespace { void tflu_debug_log(const char *s) { @@ -151,8 +145,6 @@ void InferenceJob::clean() { } } -InferenceProcess::InferenceProcess() : lock(0) {} - // NOTE: Adding code for get_lock & free_lock with some corrections from // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHEJCHB.html // TODO: check correctness? @@ -216,9 +208,7 @@ bool InferenceProcess::runJob(InferenceJob &job) { ethosu_pmu_event_type(job.pmuEventConfig[2]), ethosu_pmu_event_type(job.pmuEventConfig[3])); #endif - - tflite::MicroInterpreter interpreter( - model, resolver, inferenceProcessTensorArena, TENSOR_ARENA_SIZE, reporter, &profiler); + tflite::MicroInterpreter interpreter(model, resolver, tensorArena, tensorArenaSize, reporter, &profiler); // Allocate tensors TfLiteStatus allocate_status = interpreter.AllocateTensors(); @@ -236,7 +226,6 @@ bool InferenceProcess::runJob(InferenceJob &job) { inputTensors.push_back(tensor); } } - if (job.input.size() != inputTensors.size()) { printf("Number of input buffers does not match number of non empty network tensors. input=%zu, network=%zu\n", job.input.size(), -- cgit v1.2.1