aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavide Grohmann <davide.grohmann@arm.com>2022-02-15 17:19:56 +0100
committerDavide Grohmann <davide.grohmann@arm.com>2022-02-17 12:31:05 +0100
commit41dc341e91cb0d406bcac26294305dbb0879af61 (patch)
tree588a91d62be120d50fbae49bf95709eede90a7c3
parentcd8754ec0fbedbe79288d6132f3c3e33318665e8 (diff)
downloadethos-u-core-platform-41dc341e91cb0d406bcac26294305dbb0879af61.tar.gz
Fixing compilation warnings in threadx demo app
Change-Id: Id765ba9adf9a2d134cc6cc0c04f9e6d7dada3657
-rw-r--r--applications/threadx_demo/main.cpp29
1 files changed, 20 insertions, 9 deletions
diff --git a/applications/threadx_demo/main.cpp b/applications/threadx_demo/main.cpp
index e2e41b3..dc9055b 100644
--- a/applications/threadx_demo/main.cpp
+++ b/applications/threadx_demo/main.cpp
@@ -46,6 +46,8 @@ using namespace InferenceProcess;
// Nr. of jobs to create per job thread
#define NUM_JOBS_PER_THREAD 1
+#define MAX_THREAD_NAME_SIZE 128
+
#define PROCESS_THREAD_STACK_SIZE (20 * 1024)
#define SENDER_THREAD_STACK_SIZE (2 * 1024)
#define PROCESS_THREAD_CONTEXT_SIZE (sizeof(TX_THREAD))
@@ -112,10 +114,13 @@ int totalCompletedJobs = 0;
const size_t arenaSize = TENSOR_ARENA_SIZE_PER_INFERENCE;
TX_QUEUE inferenceProcessQueue;
+char inferenceProcessQueueName[] = "inferenceProcessQueue";
ProcessThreadParams threadParams[NUM_INFERENCE_THREADS];
TX_BYTE_POOL bytePool;
+char bytePoolName[] = "byte pool";
+
ULONG memoryArea[BYTE_POOL_SIZE / sizeof(ULONG)];
} // namespace
@@ -132,7 +137,7 @@ void *ethosu_mutex_create(void) {
TX_MUTEX *mutex;
mutex = new TX_MUTEX;
- status = tx_mutex_create(mutex, "mutex 0", TX_NO_INHERIT);
+ status = tx_mutex_create(mutex, nullptr, TX_NO_INHERIT);
if (status != TX_SUCCESS) {
printf("mutex create failed, error - %d\n", status);
}
@@ -162,7 +167,7 @@ void *ethosu_semaphore_create(void) {
TX_SEMAPHORE *semaphore;
semaphore = new TX_SEMAPHORE;
- status = tx_semaphore_create(semaphore, "semaphore", 1);
+ status = tx_semaphore_create(semaphore, nullptr, 1);
if (status != TX_SUCCESS) {
printf("Semaphore create failed, error - %d\n", status);
@@ -242,7 +247,8 @@ void inferenceSenderThread(ULONG pvParameters) {
UINT status = TX_QUEUE_ERROR;
TX_QUEUE *inferenceProcessQueueLocal = reinterpret_cast<TX_QUEUE *>(pvParameters);
xInferenceJob jobs[NUM_JOBS_PER_THREAD];
- CHAR *senderQueuePtr = nullptr;
+ CHAR *senderQueuePtr = nullptr;
+ char senderQueueName[] = "senderQueue";
/* Allocate memory for this inference sender thread responses queue */
status = tx_byte_allocate(&bytePool, reinterpret_cast<VOID **>(&senderQueuePtr), SENDER_QUEUE_SIZE, TX_NO_WAIT);
@@ -253,7 +259,7 @@ void inferenceSenderThread(ULONG pvParameters) {
/* Create responses queue for this inference sender thread */
status = tx_queue_create(
- &senderQueue, "senderQueue", sizeof(xInferenceJob *) / sizeof(uint32_t), senderQueuePtr, SENDER_QUEUE_SIZE);
+ &senderQueue, senderQueueName, sizeof(xInferenceJob *) / sizeof(uint32_t), senderQueuePtr, SENDER_QUEUE_SIZE);
if (status != TX_SUCCESS) {
printf("Sender thread failed to create Queue, error - %d\n", status);
@@ -321,10 +327,12 @@ void tx_application_define(void *first_unused_memory) {
CHAR *processThreadStackPtr[NUM_INFERENCE_THREADS] = {nullptr};
CHAR *processQueuePtr = nullptr;
CHAR *senderThreadPtr[NUM_JOB_THREADS] = {nullptr};
- CHAR *processThreadPtr[NUM_INFERENCE_THREADS] = {nullptr};
+ CHAR senderThreadNames[NUM_JOB_THREADS][MAX_THREAD_NAME_SIZE];
+ CHAR *processThreadPtr[NUM_INFERENCE_THREADS] = {nullptr};
+ CHAR processThreadNames[NUM_JOB_THREADS][MAX_THREAD_NAME_SIZE];
/* Create a byte memory pool from which to allocate the threads stacks and queues. */
- status = tx_byte_pool_create(&bytePool, "byte pool", memoryArea, BYTE_POOL_SIZE);
+ status = tx_byte_pool_create(&bytePool, bytePoolName, memoryArea, BYTE_POOL_SIZE);
if (status != TX_SUCCESS) {
printf("Main failed to allocate pool of bytes, error - %d\n", status);
exit(1);
@@ -338,7 +346,7 @@ void tx_application_define(void *first_unused_memory) {
}
status = tx_queue_create(&inferenceProcessQueue,
- "inferenceProcessQueue",
+ inferenceProcessQueueName,
sizeof(xInferenceJob *) / sizeof(uint32_t),
processQueuePtr,
PROCESS_QUEUE_SIZE);
@@ -366,9 +374,11 @@ void tx_application_define(void *first_unused_memory) {
exit(1);
}
+ snprintf(senderThreadNames[n], MAX_THREAD_NAME_SIZE, "senderThread-%d", n);
+
/* Create the inference sender thread. */
status = tx_thread_create(reinterpret_cast<TX_THREAD *>(senderThreadPtr[n]),
- "senderThread",
+ senderThreadNames[n],
inferenceSenderThread,
reinterpret_cast<ULONG>(&inferenceProcessQueue),
senderThreadStackPtr[n],
@@ -404,10 +414,11 @@ void tx_application_define(void *first_unused_memory) {
threadParams[n] = ProcessThreadParams(
&inferenceProcessQueue, inferenceProcessTensorArena[n], reinterpret_cast<size_t>(arenaSize));
+ snprintf(processThreadNames[n], MAX_THREAD_NAME_SIZE, "processThread-%d", n);
/* Create the inference process thread. */
status = tx_thread_create(reinterpret_cast<TX_THREAD *>(processThreadPtr[n]),
- "processThread",
+ processThreadNames[n],
inferenceProcessThread,
reinterpret_cast<ULONG>(&threadParams[n]),
processThreadStackPtr[n],