1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
|
// SMP.1 + SMP.3
// spinlock + mask interrupts
#include "proc/kmutex.h"
#include "globals.h"
#include "main/interrupt.h"
#include <errno.h>
/*
* IMPORTANT: Mutexes can _NEVER_ be locked or unlocked from an
* interrupt context. Mutexes are _ONLY_ lock or unlocked from a
* thread context.
*/
/*
* Checks for the specific deadlock case where:
* curthr wants mtx, but the owner of mtx is waiting on a mutex that curthr is
* holding
*/
#define DEBUG_DEADLOCKS 1
void detect_deadlocks(kmutex_t *mtx)
{
#if DEBUG_DEADLOCKS
list_iterate(&curthr->kt_mutexes, held, kmutex_t, km_link)
{
list_iterate(&held->km_waitq.tq_list, waiter, kthread_t, kt_qlink)
{
if (waiter == mtx->km_holder)
{
panic(
"detected deadlock between P%d and P%d (mutexes 0x%p, "
"0x%p)\n",
curproc->p_pid, waiter->kt_proc->p_pid, held, mtx);
}
}
}
#endif
}
/*
* Initializes the members of mtx
*/
void kmutex_init(kmutex_t *mtx)
{
/* PROCS {{{ */
mtx->km_holder = NULL;
sched_queue_init(&mtx->km_waitq);
list_link_init(&mtx->km_link);
/* PROCS }}} */
}
/*
* Obtains a mutex, potentially blocking.
*
* Hints:
* You are strongly advised to maintain the kt_mutexes member of curthr and call
* detect_deadlocks() to help debugging.
*/
void kmutex_lock(kmutex_t *mtx)
{
/* PROCS {{{ */
dbg(DBG_ERROR, "locked mutex: %p\n", mtx);
KASSERT(curthr && "need thread context to lock mutex");
KASSERT(!kmutex_owns_mutex(mtx) && "already owner");
if (mtx->km_holder)
{
detect_deadlocks(mtx);
sched_sleep_on(&mtx->km_waitq);
KASSERT(kmutex_owns_mutex(mtx));
}
else
{
mtx->km_holder = curthr;
list_insert_tail(&curthr->kt_mutexes, &mtx->km_link);
}
/* PROCS }}} */
}
/*
* Releases a mutex.
*
* Hints:
* Again, you are strongly advised to maintain kt_mutexes.
* Use sched_wakeup_on() to hand off the mutex - think carefully about how
* these two functions interact to ensure that the mutex's km_holder is
* properly set before the new owner is runnable.
*/
void kmutex_unlock(kmutex_t *mtx)
{
/* PROCS {{{ */
dbg(DBG_ERROR, "unlocked mutex: %p\n", mtx);
KASSERT(curthr && (curthr == mtx->km_holder) &&
"unlocking a mutex we don\'t own");
sched_wakeup_on(&mtx->km_waitq, &mtx->km_holder);
KASSERT(!kmutex_owns_mutex(mtx));
list_remove(&mtx->km_link);
if (mtx->km_holder)
list_insert_tail(&mtx->km_holder->kt_mutexes, &mtx->km_link);
/* PROCS }}} */
}
/*
* Checks if mtx's wait queue is empty.
*/
long kmutex_has_waiters(kmutex_t *mtx)
{
return !sched_queue_empty(&mtx->km_waitq);
;
}
/*
* Checks if the current thread owns mtx.
*/
inline long kmutex_owns_mutex(kmutex_t *mtx)
{
return curthr && mtx->km_holder == curthr;
}
|