diff options
author | Anton Moberg <anton.moberg@arm.com> | 2021-07-07 11:08:17 +0200 |
---|---|---|
committer | Fredrik Knutsson <fredrik.knutsson@arm.com> | 2021-07-15 09:46:02 +0000 |
commit | 07cf70b46cd2bf7db94a8d0e0d845eb44be24d1c (patch) | |
tree | 8c54c8d1785d58d2c336f695bd986350af3c0960 /applications | |
parent | 67536759dce9d6a41d42c6403edc4482623eea0b (diff) | |
download | ethos-u-core-software-07cf70b46cd2bf7db94a8d0e0d845eb44be24d1c.tar.gz |
Improved Logging - Core Software
Logging macros:
Added ethosu_logging.h as a lib, containing logging macros controlled by
a parameter ETHOSU_LOG_SEVERITY set in core_software CMakeLists.txt
Updated inference_process:
Updated inference_process to include ethosu_logging and use the logging
macros rather than printf()
Updated message_process:
Updated message_process to include ethosu_logging and use the
logging macros rather than printf()
Updated ethosu_monitor:
Updated ethosu_monitor to include ethosu_logging and use the
logging macros rather than printf()
Updated layer_by_layer_profiler:
Updated layer_by_layer_profiler to include ethosu_logging and use the
logging macros rather than printf()
Updated mhu_v2:
Updated mhu_v2 to include ethosu_logging and use the
logging macros rather than printf()
Change-Id: I5d6fd80b7645b3e0af5b494eea6dbb7755f02122
Diffstat (limited to 'applications')
-rw-r--r-- | applications/inference_process/CMakeLists.txt | 5 | ||||
-rw-r--r-- | applications/inference_process/src/inference_process.cpp | 115 | ||||
-rw-r--r-- | applications/message_process/CMakeLists.txt | 4 | ||||
-rw-r--r-- | applications/message_process/src/message_process.cpp | 58 |
4 files changed, 92 insertions, 90 deletions
diff --git a/applications/inference_process/CMakeLists.txt b/applications/inference_process/CMakeLists.txt index 1378181..19777fd 100644 --- a/applications/inference_process/CMakeLists.txt +++ b/applications/inference_process/CMakeLists.txt @@ -30,5 +30,8 @@ endif() if (TARGET arm_profiler) target_link_libraries(inference_process PRIVATE arm_profiler) endif() +if (TARGET ethosu_log) + target_link_libraries(inference_process PRIVATE ethosu_log) +endif() -target_sources(inference_process PRIVATE src/inference_process.cpp) +target_sources(inference_process PRIVATE src/inference_process.cpp)
\ No newline at end of file diff --git a/applications/inference_process/src/inference_process.cpp b/applications/inference_process/src/inference_process.cpp index 13cabb2..7058f9c 100644 --- a/applications/inference_process/src/inference_process.cpp +++ b/applications/inference_process/src/inference_process.cpp @@ -27,6 +27,7 @@ #ifdef ETHOSU #include "layer_by_layer_profiler.hpp" #endif +#include "ethosu_log.h" #include "inference_process.hpp" @@ -39,30 +40,28 @@ using namespace std; namespace { void tflu_debug_log(const char *s) { - fprintf(stderr, "%s", s); + LOG_DEBUG("%s", s); } void print_output_data(TfLiteTensor *output, size_t bytesToPrint) { const int numBytesToPrint = min(output->bytes, bytesToPrint); - - int dims_size = output->dims->size; - printf("{\n"); - printf("\"dims\": [%d,", dims_size); + int dims_size = output->dims->size; + LOG("{\n"); + LOG("\"dims\": [%d,", dims_size); for (int i = 0; i < output->dims->size - 1; ++i) { - printf("%d,", output->dims->data[i]); + LOG("%d,", output->dims->data[i]); } - printf("%d],\n", output->dims->data[dims_size - 1]); - - printf("\"data_address\": \"%08" PRIx32 "\",\n", (uint32_t)output->data.data); - printf("\"data\":\""); + LOG("%d],\n", output->dims->data[dims_size - 1]); + LOG("\"data_address\": \"%08" PRIx32 "\",\n", (uint32_t)output->data.data); + LOG("\"data\":\""); for (int i = 0; i < numBytesToPrint - 1; ++i) { if (i % 16 == 0 && i != 0) { - printf("\n"); + LOG("\n"); } - printf("0x%02x,", output->data.uint8[i]); + LOG("0x%02x,", output->data.uint8[i]); } - printf("0x%02x\"\n", output->data.uint8[numBytesToPrint - 1]); - printf("}"); + LOG("0x%02x\"\n", output->data.uint8[numBytesToPrint - 1]); + LOG("}"); } bool copyOutput(const TfLiteTensor &src, InferenceProcess::DataPtr &dst) { @@ -71,7 +70,7 @@ bool copyOutput(const TfLiteTensor &src, InferenceProcess::DataPtr &dst) { } if (src.bytes > dst.size) { - printf("Tensor size %d does not match output size %d.\n", src.bytes, dst.size); + LOG_ERR("Tensor size mismatch (bytes): actual=%d, expected%d.\n", src.bytes, dst.size); return true; } @@ -181,7 +180,7 @@ bool InferenceProcess::push(const InferenceJob &job) { } bool InferenceProcess::runJob(InferenceJob &job) { - printf("Running inference job: %s\n", job.name.c_str()); + LOG_INFO("Running inference job: %s\n", job.name.c_str()); // Register debug log callback for profiling RegisterDebugLogCallback(tflu_debug_log); @@ -192,9 +191,9 @@ bool InferenceProcess::runJob(InferenceJob &job) { // Get model handle and verify that the version is correct const tflite::Model *model = ::tflite::GetModel(job.networkModel.data); if (model->version() != TFLITE_SCHEMA_VERSION) { - printf("Model provided is schema version %" PRIu32 " not equal to supported version %d.\n", - model->version(), - TFLITE_SCHEMA_VERSION); + LOG_ERR("Model schema version unsupported: version=%" PRIu32 ", supported=%d.\n", + model->version(), + TFLITE_SCHEMA_VERSION); return true; } @@ -211,7 +210,7 @@ bool InferenceProcess::runJob(InferenceJob &job) { // Allocate tensors TfLiteStatus allocate_status = interpreter.AllocateTensors(); if (allocate_status != kTfLiteOk) { - printf("AllocateTensors failed for inference job: %s\n", job.name.c_str()); + LOG_ERR("Failed to allocate tensors for inference: job=%s\n", job.name.c_str()); return true; } @@ -225,9 +224,9 @@ bool InferenceProcess::runJob(InferenceJob &job) { } } if (job.input.size() != inputTensors.size()) { - printf("Number of input buffers does not match number of non empty network tensors. input=%zu, network=%zu\n", - job.input.size(), - inputTensors.size()); + LOG_ERR("Number of input buffers does not match number of non empty network tensors: input=%zu, network=%zu\n", + job.input.size(), + inputTensors.size()); return true; } @@ -237,11 +236,11 @@ bool InferenceProcess::runJob(InferenceJob &job) { const TfLiteTensor *tensor = inputTensors[i]; if (input.size != tensor->bytes) { - printf("Input size does not match network size. job=%s, index=%zu, input=%zu, network=%u\n", - job.name.c_str(), - i, - input.size, - tensor->bytes); + LOG_ERR("Job input size does not match network input size: job=%s, index=%zu, input=%zu, network=%u\n", + job.name.c_str(), + i, + input.size, + tensor->bytes); return true; } @@ -251,13 +250,13 @@ bool InferenceProcess::runJob(InferenceJob &job) { // Run the inference TfLiteStatus invoke_status = interpreter.Invoke(); if (invoke_status != kTfLiteOk) { - printf("Invoke failed for inference job: %s\n", job.name.c_str()); + LOG_ERR("Invoke failed for inference: job=%s\n", job.name.c_str()); return true; } - printf("%s : %zu\r\n", "arena_used_bytes", interpreter.arena_used_bytes()); + LOG("arena_used_bytes : %zu\n", interpreter.arena_used_bytes()); - printf("Inference runtime: %u cycles\r\n", (unsigned int)profiler.GetTotalTicks()); + LOG("Inference runtime: %u cycles\n", (unsigned int)profiler.GetTotalTicks()); if (job.pmuCycleCounterEnable != 0) { job.pmuCycleCounterCount = profiler.GetTotalTicks(); @@ -266,7 +265,7 @@ bool InferenceProcess::runJob(InferenceJob &job) { // Copy output data if (job.output.size() > 0) { if (interpreter.outputs_size() != job.output.size()) { - printf("Number of outputs mismatch. job=%zu, network=%u\n", job.output.size(), interpreter.outputs_size()); + LOG_ERR("Output size mismatch: job=%zu, network=%u\n", job.output.size(), interpreter.outputs_size()); return true; } @@ -280,28 +279,26 @@ bool InferenceProcess::runJob(InferenceJob &job) { if (job.numBytesToPrint > 0) { // Print all of the output data, or the first NUM_BYTES_TO_PRINT bytes, // whichever comes first as well as the output shape. - printf("num_of_outputs: %d\n", interpreter.outputs_size()); - printf("output_begin\n"); - printf("[\n"); - + LOG("num_of_outputs: %d\n", interpreter.outputs_size()); + LOG("output_begin\n"); + LOG("[\n"); for (unsigned int i = 0; i < interpreter.outputs_size(); i++) { TfLiteTensor *output = interpreter.output(i); print_output_data(output, job.numBytesToPrint); if (i != interpreter.outputs_size() - 1) { - printf(",\n"); + LOG(",\n"); } } - - printf("]\n"); - printf("output_end\n"); + LOG("]\n"); + LOG("output_end\n"); } if (job.expectedOutput.size() > 0) { if (job.expectedOutput.size() != interpreter.outputs_size()) { - printf("Expected number of output tensors does not match network. job=%s, expected=%zu, network=%zu\n", - job.name.c_str(), - job.expectedOutput.size(), - interpreter.outputs_size()); + LOG_ERR("Expected number of output tensors mismatch: job=%s, expected=%zu, network=%zu\n", + job.name.c_str(), + job.expectedOutput.size(), + interpreter.outputs_size()); return true; } @@ -310,33 +307,33 @@ bool InferenceProcess::runJob(InferenceJob &job) { const TfLiteTensor *output = interpreter.output(i); if (expected.size != output->bytes) { - printf("Expected tensor size does not match output size. job=%s, index=%u, expected=%zu, network=%zu\n", - job.name.c_str(), - i, - expected.size, - output->bytes); + LOG_ERR("Expected output tensor size mismatch: job=%s, index=%u, expected=%zu, network=%zu\n", + job.name.c_str(), + i, + expected.size, + output->bytes); return true; } for (unsigned int j = 0; j < output->bytes; ++j) { if (output->data.uint8[j] != static_cast<uint8_t *>(expected.data)[j]) { - printf("Expected data does not match output data. job=%s, index=%u, offset=%u, " - "expected=%02x, network=%02x\n", - job.name.c_str(), - i, - j, - static_cast<uint8_t *>(expected.data)[j], - output->data.uint8[j]); + LOG_ERR("Expected output tensor data mismatch: job=%s, index=%u, offset=%u, " + "expected=%02x, network=%02x\n", + job.name.c_str(), + i, + j, + static_cast<uint8_t *>(expected.data)[j], + output->data.uint8[j]); return true; } } } } - printf("Finished running job: %s\n", job.name.c_str()); + LOG_INFO("Finished running job: %s\n", job.name.c_str()); return false; -} +} // namespace InferenceProcess bool InferenceProcess::run(bool exitOnEmpty) { bool anyJobFailed = false; @@ -348,7 +345,7 @@ bool InferenceProcess::run(bool exitOnEmpty) { if (empty) { if (exitOnEmpty) { - printf("Exit from InferenceProcess::run() on empty job queue!\n"); + LOG_INFO("Exit from InferenceProcess::run() due to empty job queue\n"); break; } diff --git a/applications/message_process/CMakeLists.txt b/applications/message_process/CMakeLists.txt index 1e31d68..2122f14 100644 --- a/applications/message_process/CMakeLists.txt +++ b/applications/message_process/CMakeLists.txt @@ -19,7 +19,7 @@ add_library(message_process STATIC src/message_process.cpp) target_include_directories(message_process PUBLIC include ${LINUX_DRIVER_STACK_PATH}/kernel) -target_link_libraries(message_process PRIVATE cmsis_device inference_process ethosu_mailbox) +target_link_libraries(message_process PRIVATE cmsis_device inference_process ethosu_mailbox ethosu_log) if (CORE_SOFTWARE_ACCELERATOR STREQUAL "NPU") target_link_libraries(message_process PRIVATE ethosu_core_driver) -endif() +endif()
\ No newline at end of file diff --git a/applications/message_process/src/message_process.cpp b/applications/message_process/src/message_process.cpp index cdd5c35..1669d3f 100644 --- a/applications/message_process/src/message_process.cpp +++ b/applications/message_process/src/message_process.cpp @@ -24,6 +24,8 @@ #include "cmsis_compiler.h" +#include "ethosu_log.h" + #include <cstddef> #include <cstdio> #include <cstring> @@ -198,7 +200,7 @@ bool MessageProcess::handleMessage() { return false; } - printf("Msg: header magic=%" PRIX32 ", type=%" PRIu32 ", length=%" PRIu32 "\n", msg.magic, msg.type, msg.length); + LOG_INFO("Msg: header magic=%" PRIX32 ", type=%" PRIu32 ", length=%" PRIu32 "\n", msg.magic, msg.type, msg.length); if (msg.magic != ETHOSU_CORE_MSG_MAGIC) { sndErrorRspAndResetQueue(ETHOSU_CORE_MSG_ERR_INVALID_MAGIC, "Invalid magic"); @@ -207,21 +209,21 @@ bool MessageProcess::handleMessage() { switch (msg.type) { case ETHOSU_CORE_MSG_PING: - printf("Msg: Ping\n"); + LOG_INFO("Msg: Ping\n"); sendPong(); break; case ETHOSU_CORE_MSG_ERR: { struct ethosu_core_msg_err error = {0}; if (!queueIn.read(error)) { - printf("ERROR: Msg: Failed to receive error message\n"); + LOG_ERR("Msg: Failed to receive error message\n"); } else { - printf("Msg: Received an error response, type=%" PRIu32 ", msg=\"%s\"\n", error.type, error.msg); + LOG_INFO("Msg: Received an error response, type=%" PRIu32 ", msg=\"%s\"\n", error.type, error.msg); } queueIn.reset(); return false; } case ETHOSU_CORE_MSG_VERSION_REQ: - printf("Msg: Version request\n"); + LOG_INFO("Msg: Version request\n"); sendVersionRsp(); break; case ETHOSU_CORE_MSG_CAPABILITIES_REQ: { @@ -231,7 +233,7 @@ bool MessageProcess::handleMessage() { return false; } - printf("Msg: Capability request.user_arg=0x%" PRIx64 "", req.user_arg); + LOG_INFO("Msg: Capability request.user_arg=0x%" PRIx64 "\n", req.user_arg); sendCapabilityRsp(req.user_arg); break; @@ -244,30 +246,30 @@ bool MessageProcess::handleMessage() { return false; } - printf("Msg: InferenceReq. user_arg=0x%" PRIx64 ", network={0x%" PRIx32 ", %" PRIu32 "}", - req.user_arg, - req.network.ptr, - req.network.size); + LOG_INFO("Msg: InferenceReq. user_arg=0x%" PRIx64 ", network={0x%" PRIx32 ", %" PRIu32 "}", + req.user_arg, + req.network.ptr, + req.network.size); - printf(", ifm_count=%" PRIu32 ", ifm=[", req.ifm_count); + LOG_DEBUG_N(", ifm_count=%" PRIu32 ", ifm=[", req.ifm_count); for (uint32_t i = 0; i < req.ifm_count; ++i) { if (i > 0) { - printf(", "); + LOG_DEBUG_N(", "); } - printf("{0x%" PRIx32 ", %" PRIu32 "}", req.ifm[i].ptr, req.ifm[i].size); + LOG_DEBUG_N("{0x%" PRIx32 ", %" PRIu32 "}", req.ifm[i].ptr, req.ifm[i].size); } - printf("]"); + LOG_DEBUG_N("]"); - printf(", ofm_count=%" PRIu32 ", ofm=[", req.ofm_count); + LOG_DEBUG_N(", ofm_count=%" PRIu32 ", ofm=[", req.ofm_count); for (uint32_t i = 0; i < req.ofm_count; ++i) { if (i > 0) { - printf(", "); + LOG_DEBUG_N(", "); } - printf("{0x%" PRIx32 ", %" PRIu32 "}", req.ofm[i].ptr, req.ofm[i].size); + LOG_DEBUG_N("{0x%" PRIx32 ", %" PRIu32 "}", req.ofm[i].ptr, req.ofm[i].size); } - printf("]\n"); + LOG_DEBUG_N("]\n"); DataPtr networkModel(reinterpret_cast<void *>(req.network.ptr), req.network.size); @@ -320,7 +322,7 @@ bool MessageProcess::handleMessage() { void MessageProcess::sendPong() { if (!queueOut.write(ETHOSU_CORE_MSG_PONG)) { - printf("ERROR: Msg: Failed to write pong response. No mailbox message sent\n"); + LOG_ERR("Msg: Failed to write pong response. No mailbox message sent\n"); } else { mailbox.sendMessage(); } @@ -335,7 +337,7 @@ void MessageProcess::sendVersionRsp() { }; if (!queueOut.write(ETHOSU_CORE_MSG_VERSION_RSP, ver)) { - printf("ERROR: Failed to write version response. No mailbox message sent\n"); + LOG_ERR("Msg: Failed to write version response. No mailbox message sent\n"); } else { mailbox.sendMessage(); } @@ -389,7 +391,7 @@ void MessageProcess::sendCapabilityRsp(uint64_t userArg) { #endif if (!queueOut.write(ETHOSU_CORE_MSG_CAPABILITIES_RSP, capabilities)) { - printf("ERROR: Failed to write capability response. No mailbox message sent\n"); + LOG_ERR("Failed to write capability response. No mailbox message sent\n"); } else { mailbox.sendMessage(); } @@ -405,9 +407,9 @@ void MessageProcess::sndErrorRspAndResetQueue(ethosu_core_msg_err_type type, con error.msg[i] = message[i]; } } - printf("ERROR: Msg: \"%s\"\n", message); + LOG_ERR("Msg: \"%s\"\n", message); if (!queueOut.write(ETHOSU_CORE_MSG_ERR, &error)) { - printf("ERROR: Msg: Failed to write error response. No mailbox message sent\n"); + LOG_ERR("Msg: Failed to write error response. No mailbox message sent\n"); return; } queueIn.reset(); @@ -445,13 +447,13 @@ void MessageProcess::sendInferenceRsp(uint64_t userArg, } rsp.pmu_cycle_counter_count = pmuCycleCounterCount; - printf("Sending inference response. userArg=0x%" PRIx64 ", ofm_count=%" PRIu32 ", status=%" PRIu32 "\n", - rsp.user_arg, - rsp.ofm_count, - rsp.status); + LOG_INFO("Sending inference response. userArg=0x%" PRIx64 ", ofm_count=%" PRIu32 ", status=%" PRIu32 "\n", + rsp.user_arg, + rsp.ofm_count, + rsp.status); if (!queueOut.write(ETHOSU_CORE_MSG_INFERENCE_RSP, rsp)) { - printf("ERROR: Msg: Failed to write inference response. No mailbox message sent\n"); + LOG_ERR("Msg: Failed to write inference response. No mailbox message sent\n"); } else { mailbox.sendMessage(); } |