aboutsummaryrefslogtreecommitdiff
path: root/applications/inference_process/src/inference_process.cpp
diff options
context:
space:
mode:
authorAnton Moberg <anton.moberg@arm.com>2021-02-10 08:49:28 +0100
committerAnton Moberg <anton.moberg@arm.com>2021-02-12 14:37:30 +0100
commit66ed182cd7520537e73ec37f17f8bf549d8297a2 (patch)
tree7ce2ce2f2c10f24d3b6fd5b4cb3e3bd9024ad2a3 /applications/inference_process/src/inference_process.cpp
parent83e49967d1c4eeff21025ea0bd449c938c91c5f8 (diff)
downloadethos-u-core-software-66ed182cd7520537e73ec37f17f8bf549d8297a2.tar.gz
core_software - Move TensorArena21.02-rc2
Moved TensorArena in inference_process.ccp to application level. InferenceProcess class now takes TensorArena pointer and TensorArenaSize as parameters. Needs to be set by application before runJob() is called. Change-Id: I530b96039868305fa903ae7f93419d9d00f9c16f
Diffstat (limited to 'applications/inference_process/src/inference_process.cpp')
-rw-r--r--applications/inference_process/src/inference_process.cpp13
1 files changed, 1 insertions, 12 deletions
diff --git a/applications/inference_process/src/inference_process.cpp b/applications/inference_process/src/inference_process.cpp
index b5ed5c4..cc2b378 100644
--- a/applications/inference_process/src/inference_process.cpp
+++ b/applications/inference_process/src/inference_process.cpp
@@ -30,14 +30,8 @@
#include <inttypes.h>
-#ifndef TENSOR_ARENA_SIZE
-#define TENSOR_ARENA_SIZE (1024)
-#endif
-
using namespace std;
-__attribute__((section(".bss.NoInit"), aligned(16))) uint8_t inferenceProcessTensorArena[TENSOR_ARENA_SIZE];
-
namespace {
void tflu_debug_log(const char *s) {
@@ -151,8 +145,6 @@ void InferenceJob::clean() {
}
}
-InferenceProcess::InferenceProcess() : lock(0) {}
-
// NOTE: Adding code for get_lock & free_lock with some corrections from
// http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dai0321a/BIHEJCHB.html
// TODO: check correctness?
@@ -216,9 +208,7 @@ bool InferenceProcess::runJob(InferenceJob &job) {
ethosu_pmu_event_type(job.pmuEventConfig[2]),
ethosu_pmu_event_type(job.pmuEventConfig[3]));
#endif
-
- tflite::MicroInterpreter interpreter(
- model, resolver, inferenceProcessTensorArena, TENSOR_ARENA_SIZE, reporter, &profiler);
+ tflite::MicroInterpreter interpreter(model, resolver, tensorArena, tensorArenaSize, reporter, &profiler);
// Allocate tensors
TfLiteStatus allocate_status = interpreter.AllocateTensors();
@@ -236,7 +226,6 @@ bool InferenceProcess::runJob(InferenceJob &job) {
inputTensors.push_back(tensor);
}
}
-
if (job.input.size() != inputTensors.size()) {
printf("Number of input buffers does not match number of non empty network tensors. input=%zu, network=%zu\n",
job.input.size(),