aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathan Benavides-Luu <nate1299@me.com>2024-03-05 10:20:46 -0500
committerNathan Benavides-Luu <nate1299@me.com>2024-03-05 10:20:46 -0500
commit7a684c5fb743d1e03d59db49fe283cfd4b0439a6 (patch)
tree439a98e5d607a842ef9eab08cba672a4943b3fcd
parent62b85026be4f1de475cd96c58200075f6093a28b (diff)
Add kmutex.c
-rw-r--r--kernel/proc/kmutex.c119
1 files changed, 119 insertions, 0 deletions
diff --git a/kernel/proc/kmutex.c b/kernel/proc/kmutex.c
new file mode 100644
index 0000000..7e39d63
--- /dev/null
+++ b/kernel/proc/kmutex.c
@@ -0,0 +1,119 @@
+// SMP.1 + SMP.3
+// spinlock + mask interrupts
+#include "proc/kmutex.h"
+#include "globals.h"
+#include "main/interrupt.h"
+#include <errno.h>
+
+/*
+ * IMPORTANT: Mutexes can _NEVER_ be locked or unlocked from an
+ * interrupt context. Mutexes are _ONLY_ lock or unlocked from a
+ * thread context.
+ */
+
+/*
+ * Checks for the specific deadlock case where:
+ * curthr wants mtx, but the owner of mtx is waiting on a mutex that curthr is
+ * holding
+ */
+#define DEBUG_DEADLOCKS 1
+void detect_deadlocks(kmutex_t *mtx)
+{
+#if DEBUG_DEADLOCKS
+ list_iterate(&curthr->kt_mutexes, held, kmutex_t, km_link)
+ {
+ list_iterate(&held->km_waitq.tq_list, waiter, kthread_t, kt_qlink)
+ {
+ if (waiter == mtx->km_holder)
+ {
+ panic(
+ "detected deadlock between P%d and P%d (mutexes 0x%p, "
+ "0x%p)\n",
+ curproc->p_pid, waiter->kt_proc->p_pid, held, mtx);
+ }
+ }
+ }
+#endif
+}
+
+/*
+ * Initializes the members of mtx
+ */
+void kmutex_init(kmutex_t *mtx)
+{
+ /* PROCS {{{ */
+ mtx->km_holder = NULL;
+ sched_queue_init(&mtx->km_waitq);
+ list_link_init(&mtx->km_link);
+ /* PROCS }}} */
+}
+
+/*
+ * Obtains a mutex, potentially blocking.
+ *
+ * Hints:
+ * You are strongly advised to maintain the kt_mutexes member of curthr and call
+ * detect_deadlocks() to help debugging.
+ */
+void kmutex_lock(kmutex_t *mtx)
+{
+ /* PROCS {{{ */
+
+ dbg(DBG_ERROR, "locked mutex: %p\n", mtx);
+ KASSERT(curthr && "need thread context to lock mutex");
+ KASSERT(!kmutex_owns_mutex(mtx) && "already owner");
+
+ if (mtx->km_holder)
+ {
+ detect_deadlocks(mtx);
+ sched_sleep_on(&mtx->km_waitq);
+ KASSERT(kmutex_owns_mutex(mtx));
+ }
+ else
+ {
+ mtx->km_holder = curthr;
+ list_insert_tail(&curthr->kt_mutexes, &mtx->km_link);
+ }
+ /* PROCS }}} */
+}
+
+/*
+ * Releases a mutex.
+ *
+ * Hints:
+ * Again, you are strongly advised to maintain kt_mutexes.
+ * Use sched_wakeup_on() to hand off the mutex - think carefully about how
+ * these two functions interact to ensure that the mutex's km_holder is
+ * properly set before the new owner is runnable.
+ */
+void kmutex_unlock(kmutex_t *mtx)
+{
+ /* PROCS {{{ */
+ dbg(DBG_ERROR, "unlocked mutex: %p\n", mtx);
+ KASSERT(curthr && (curthr == mtx->km_holder) &&
+ "unlocking a mutex we don\'t own");
+ sched_wakeup_on(&mtx->km_waitq, &mtx->km_holder);
+ KASSERT(!kmutex_owns_mutex(mtx));
+ list_remove(&mtx->km_link);
+ if (mtx->km_holder)
+ list_insert_tail(&mtx->km_holder->kt_mutexes, &mtx->km_link);
+
+ /* PROCS }}} */
+}
+
+/*
+ * Checks if mtx's wait queue is empty.
+ */
+long kmutex_has_waiters(kmutex_t *mtx)
+{
+ return !sched_queue_empty(&mtx->km_waitq);
+ ;
+}
+
+/*
+ * Checks if the current thread owns mtx.
+ */
+inline long kmutex_owns_mutex(kmutex_t *mtx)
+{
+ return curthr && mtx->km_holder == curthr;
+}