aboutsummaryrefslogtreecommitdiff
path: root/kernel/ethosu_mailbox.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/ethosu_mailbox.c')
-rw-r--r--kernel/ethosu_mailbox.c101
1 files changed, 90 insertions, 11 deletions
diff --git a/kernel/ethosu_mailbox.c b/kernel/ethosu_mailbox.c
index 4f7f5b7..5cc2465 100644
--- a/kernel/ethosu_mailbox.c
+++ b/kernel/ethosu_mailbox.c
@@ -28,6 +28,7 @@
#include "ethosu_core_rpmsg.h"
#include "ethosu_device.h"
+#include <linux/atomic.h>
#include <linux/jiffies.h>
#include <linux/resource.h>
#include <linux/uio.h>
@@ -46,9 +47,81 @@
#endif
/****************************************************************************
+ * Defines
+ ****************************************************************************/
+
+#define MAILBOX_SEND_TIMEOUT_MS 15000
+
+/****************************************************************************
* Functions
****************************************************************************/
+/**
+ * ethosu_send_locked() - Blocking mailbox message sender
+ *
+ * Context: Can sleep and must be called with the device mutex locked.
+ *
+ * Return: 0 on success, else error code.
+ */
+static int ethosu_send_locked(struct ethosu_mailbox *mbox,
+ void *data,
+ size_t length)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct device *dev = mbox->dev;
+ long timeout = msecs_to_jiffies(MAILBOX_SEND_TIMEOUT_MS);
+ bool try_send = !wq_has_sleeper(&mbox->send_queue);
+ int ret;
+
+ might_sleep();
+
+ /* Exclusive wait to only wake up one task at a time */
+ add_wait_queue_exclusive(&mbox->send_queue, &wait);
+ for (;;) {
+ /* Stop if the mailbox is closing down */
+ if (atomic_read(&mbox->done)) {
+ ret = -ENODEV;
+ break;
+ }
+
+ /* Attempt to send if queue is empty or task was woken up */
+ if (try_send) {
+ ret = rpmsg_trysend(mbox->ept, data, length);
+ if (ret != -ENOMEM)
+ break;
+ } else {
+ try_send = true;
+ }
+
+ /* Unlock device mutex while waiting to not block other tasks */
+ device_unlock(dev);
+ timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
+ device_lock(dev);
+
+ /* Stop if the wait was interrupted */
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (!timeout) {
+ ret = -ETIME;
+ break;
+ }
+ }
+
+ remove_wait_queue(&mbox->send_queue, &wait);
+
+ /*
+ * If the message was sent successfully, there may be more TX buffers
+ * available so wake up the next waiting task.
+ */
+ if (!ret && wq_has_sleeper(&mbox->send_queue))
+ wake_up(&mbox->send_queue);
+
+ return ret;
+}
+
static void ethosu_core_set_size(struct ethosu_buffer *buf,
struct ethosu_core_buffer *cbuf)
{
@@ -119,7 +192,7 @@ int ethosu_mailbox_ping(struct ethosu_mailbox *mbox)
}
};
- return rpmsg_send(mbox->ept, &rpmsg, sizeof(rpmsg.header));
+ return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
}
int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
@@ -131,7 +204,7 @@ int ethosu_mailbox_pong(struct ethosu_mailbox *mbox)
}
};
- return rpmsg_send(mbox->ept, &rpmsg, sizeof(rpmsg.header));
+ return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
}
int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox,
@@ -147,7 +220,7 @@ int ethosu_mailbox_version_request(struct ethosu_mailbox *mbox,
msg->type = rpmsg.header.type;
- return rpmsg_send(mbox->ept, &rpmsg, sizeof(rpmsg.header));
+ return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
}
int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
@@ -163,7 +236,7 @@ int ethosu_mailbox_capabilities_request(struct ethosu_mailbox *mbox,
msg->type = rpmsg.header.type;
- return rpmsg_send(mbox->ept, &rpmsg, sizeof(rpmsg.header));
+ return ethosu_send_locked(mbox, &rpmsg, sizeof(rpmsg.header));
}
int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
@@ -218,8 +291,8 @@ int ethosu_mailbox_inference(struct ethosu_mailbox *mbox,
inf_req->network.index = network_index;
}
- return rpmsg_send(mbox->ept, &rpmsg,
- sizeof(rpmsg.header) + sizeof(rpmsg.inf_req));
+ return ethosu_send_locked(mbox, &rpmsg,
+ sizeof(rpmsg.header) + sizeof(rpmsg.inf_req));
}
int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
@@ -246,8 +319,9 @@ int ethosu_mailbox_network_info_request(struct ethosu_mailbox *mbox,
info_req->network.index = network_index;
}
- return rpmsg_send(mbox->ept, &rpmsg,
- sizeof(rpmsg.header) + sizeof(rpmsg.net_info_req));
+ return ethosu_send_locked(mbox, &rpmsg,
+ sizeof(rpmsg.header) +
+ sizeof(rpmsg.net_info_req));
}
int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
@@ -268,8 +342,9 @@ int ethosu_mailbox_cancel_inference(struct ethosu_mailbox *mbox,
msg->type = rpmsg.header.type;
- return rpmsg_send(mbox->ept, &rpmsg,
- sizeof(rpmsg.header) + sizeof(rpmsg.cancel_req));
+ return ethosu_send_locked(mbox, &rpmsg,
+ sizeof(rpmsg.header) +
+ sizeof(rpmsg.cancel_req));
}
int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
@@ -279,9 +354,13 @@ int ethosu_mailbox_init(struct ethosu_mailbox *mbox,
mbox->dev = dev;
mbox->ept = ept;
idr_init(&mbox->msg_idr);
+ init_waitqueue_head(&mbox->send_queue);
return 0;
}
void ethosu_mailbox_deinit(struct ethosu_mailbox *mbox)
-{}
+{
+ atomic_set(&mbox->done, 1);
+ wake_up_all(&mbox->send_queue);
+}