diff options
author | Mikael Olsson <mikael.olsson@arm.com> | 2023-10-30 11:05:39 +0100 |
---|---|---|
committer | Mikael Olsson <mikael.olsson@arm.com> | 2023-11-06 09:36:00 +0100 |
commit | 9c999fdd40c0bf2ae420f6f3bfe013dc6baa73c1 (patch) | |
tree | 9306ed881d5e11c467f80ea2b68c17614daaae50 /kernel/ethosu_mailbox.c | |
parent | 075451507cda3e8f543caecacfadf226a69e5a05 (diff) | |
download | ethos-u-linux-driver-stack-9c999fdd40c0bf2ae420f6f3bfe013dc6baa73c1.tar.gz |
Split DMA memory and buffer setup in kernel driver
To allow the NPU kernel driver to allocate and use DMA memory internally
without creating a buffer instance, the DMA memory management has been
split out from the buffer code.
Change-Id: I46fdeee51b5ef786a54b8e7c866d137d91222724
Signed-off-by: Mikael Olsson <mikael.olsson@arm.com>
Diffstat (limited to 'kernel/ethosu_mailbox.c')
-rw-r--r-- | kernel/ethosu_mailbox.c | 22 |
1 files changed, 14 insertions, 8 deletions
diff --git a/kernel/ethosu_mailbox.c b/kernel/ethosu_mailbox.c index e499860..9b9cd18 100644 --- a/kernel/ethosu_mailbox.c +++ b/kernel/ethosu_mailbox.c @@ -26,6 +26,8 @@ #include "ethosu_buffer.h" #include "ethosu_core_rpmsg.h" #include "ethosu_device.h" +#include "ethosu_dma_mem.h" +#include "ethosu_network.h" #include <linux/atomic.h> #include <linux/jiffies.h> @@ -121,11 +123,11 @@ static int ethosu_send_locked(struct ethosu_mailbox *mbox, return ret; } -static void ethosu_core_set_size(struct ethosu_buffer *buf, - struct ethosu_core_buffer *cbuf) +static void ethosu_core_buffer_dma_mem_set(struct ethosu_dma_mem *dma_mem, + struct ethosu_core_buffer *cbuf) { - cbuf->ptr = (uint32_t)buf->dma_addr; - cbuf->size = (uint32_t)buf->size; + cbuf->ptr = (uint32_t)dma_mem->dma_addr; + cbuf->size = (uint32_t)dma_mem->size; } int ethosu_mailbox_register(struct ethosu_mailbox *mbox, @@ -267,17 +269,20 @@ int ethosu_mailbox_inference(struct ethosu_mailbox *mbox, inf_req->pmu_cycle_counter_enable = pmu_cycle_counter_enable; for (i = 0; i < ifm_count; i++) - ethosu_core_set_size(ifm[i], &inf_req->ifm[i]); + ethosu_core_buffer_dma_mem_set(ifm[i]->dma_mem, + &inf_req->ifm[i]); for (i = 0; i < ofm_count; i++) - ethosu_core_set_size(ofm[i], &inf_req->ofm[i]); + ethosu_core_buffer_dma_mem_set(ofm[i]->dma_mem, + &inf_req->ofm[i]); for (i = 0; i < ETHOSU_CORE_PMU_MAX; i++) inf_req->pmu_event_config[i] = pmu_event_config[i]; if (network != NULL) { inf_req->network.type = ETHOSU_CORE_NETWORK_BUFFER; - ethosu_core_set_size(network, &inf_req->network.buffer); + ethosu_core_buffer_dma_mem_set(network->dma_mem, + &inf_req->network.buffer); } else { inf_req->network.type = ETHOSU_CORE_NETWORK_INDEX; inf_req->network.index = network_index; @@ -305,7 +310,8 @@ int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox, if (network != NULL) { info_req->network.type = ETHOSU_CORE_NETWORK_BUFFER; - ethosu_core_set_size(network, &info_req->network.buffer); + ethosu_core_buffer_dma_mem_set(network->dma_mem, + &info_req->network.buffer); } else { info_req->network.type = ETHOSU_CORE_NETWORK_INDEX; info_req->network.index = network_index; |