diff options
author | Anton Moberg <anton.moberg@arm.com> | 2021-02-10 08:49:28 +0100 |
---|---|---|
committer | Anton Moberg <anton.moberg@arm.com> | 2021-02-12 14:37:30 +0100 |
commit | 66ed182cd7520537e73ec37f17f8bf549d8297a2 (patch) | |
tree | 7ce2ce2f2c10f24d3b6fd5b4cb3e3bd9024ad2a3 /applications/inference_process | |
parent | 83e49967d1c4eeff21025ea0bd449c938c91c5f8 (diff) | |
download | ethos-u-core-software-66ed182cd7520537e73ec37f17f8bf549d8297a2.tar.gz |
core_software - Move TensorArena21.02-rc2
Moved TensorArena in inference_process.ccp to application level.
InferenceProcess class now takes TensorArena pointer and TensorArenaSize as parameters. Needs to be set by application before runJob() is called.
Change-Id: I530b96039868305fa903ae7f93419d9d00f9c16f
Diffstat (limited to 'applications/inference_process')
-rw-r--r-- | applications/inference_process/CMakeLists.txt | 7 | ||||
-rw-r--r-- | applications/inference_process/include/inference_process.hpp | 5 | ||||
-rw-r--r-- | applications/inference_process/src/inference_process.cpp | 13 |
3 files changed, 7 insertions, 18 deletions
diff --git a/applications/inference_process/CMakeLists.txt b/applications/inference_process/CMakeLists.txt index 3348d28..97d4d8f 100644 --- a/applications/inference_process/CMakeLists.txt +++ b/applications/inference_process/CMakeLists.txt @@ -16,14 +16,11 @@ # limitations under the License. # -set(TR_ARENA_SIZE "200000" CACHE STRING "Arena size.") set(TR_PRINT_OUTPUT_BYTES "" CACHE STRING "Print output data.") add_library(inference_process STATIC) + target_include_directories(inference_process PUBLIC include PRIVATE ${TENSORFLOW_PATH} ${TENSORFLOW_PATH}/tensorflow/lite/micro/tools/make/downloads/flatbuffers/include) target_link_libraries(inference_process PUBLIC tflu cmsis_core cmsis_device) -target_sources(inference_process PRIVATE src/inference_process.cpp) - -# Set arena size -target_compile_definitions(inference_process PRIVATE TENSOR_ARENA_SIZE=${TR_ARENA_SIZE}) +target_sources(inference_process PRIVATE src/inference_process.cpp)
\ No newline at end of file diff --git a/applications/inference_process/include/inference_process.hpp b/applications/inference_process/include/inference_process.hpp index 67b30c5..880e28d 100644 --- a/applications/inference_process/include/inference_process.hpp +++ b/applications/inference_process/include/inference_process.hpp @@ -63,7 +63,8 @@ struct InferenceJob { class InferenceProcess { public: - InferenceProcess(); + InferenceProcess(uint8_t *_tensorArena, size_t _tensorArenaSize) : + lock(0), tensorArena(_tensorArena), tensorArenaSize(_tensorArenaSize) {} bool push(const InferenceJob &job); bool runJob(InferenceJob &job); @@ -71,6 +72,8 @@ public: private: volatile uint32_t lock; + uint8_t *tensorArena; + const size_t tensorArenaSize; std::queue<InferenceJob> inferenceJobQueue; void getLock(); diff --git a/applications/inference_process/src/inference_process.cpp b/applications/inference_process/src/inference_process.cpp index b5ed5c4..cc2b378 100644 --- a/applications/inference_process/src/inference_process.cpp +++ b/applications/inference_process/src/inference_process.cpp @@ -30,14 +30,8 @@ #include <inttypes.h> -#ifndef TENSOR_ARENA_SIZE -#define TENSOR_ARENA_SIZE (1024) -#endif - using namespace std; -__attribute__((section(".bss.NoInit"), aligned(16))) uint8_t inferenceProcessTensorArena[TENSOR_ARENA_SIZE]; - namespace { void tflu_debug_log(const char *s) { @@ -151,8 +145,6 @@ void InferenceJob::clean() { } } -InferenceProcess::InferenceProcess() : lock(0) {} - // NOTE: Adding code for get_lock & free_lock with some corrections from // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHEJCHB.html // TODO: check correctness? @@ -216,9 +208,7 @@ bool InferenceProcess::runJob(InferenceJob &job) { ethosu_pmu_event_type(job.pmuEventConfig[2]), ethosu_pmu_event_type(job.pmuEventConfig[3])); #endif - - tflite::MicroInterpreter interpreter( - model, resolver, inferenceProcessTensorArena, TENSOR_ARENA_SIZE, reporter, &profiler); + tflite::MicroInterpreter interpreter(model, resolver, tensorArena, tensorArenaSize, reporter, &profiler); // Allocate tensors TfLiteStatus allocate_status = interpreter.AllocateTensors(); @@ -236,7 +226,6 @@ bool InferenceProcess::runJob(InferenceJob &job) { inputTensors.push_back(tensor); } } - if (job.input.size() != inputTensors.size()) { printf("Number of input buffers does not match number of non empty network tensors. input=%zu, network=%zu\n", job.input.size(), |