aboutsummaryrefslogtreecommitdiff
path: root/kernel/proc/sched.c
diff options
context:
space:
mode:
authornthnluu <nate1299@me.com>2024-01-28 21:20:27 -0500
committernthnluu <nate1299@me.com>2024-01-28 21:20:27 -0500
commitc63f340d90800895f007de64b7d2d14624263331 (patch)
tree2c0849fa597dd6da831c8707b6f2603403778d7b /kernel/proc/sched.c
Created student weenix repository
Diffstat (limited to 'kernel/proc/sched.c')
-rw-r--r--kernel/proc/sched.c368
1 files changed, 368 insertions, 0 deletions
diff --git a/kernel/proc/sched.c b/kernel/proc/sched.c
new file mode 100644
index 0000000..9162875
--- /dev/null
+++ b/kernel/proc/sched.c
@@ -0,0 +1,368 @@
+// SMP.1 + SMP.2 + SMP.3 + SMP.4
+// spinlocks + mask interrupts
+#include "api/syscall.h"
+#include "errno.h"
+#include "fs/vfs.h"
+#include "globals.h"
+#include "main/apic.h"
+#include "main/inits.h"
+#include "types.h"
+#include "util/debug.h"
+#include <util/time.h>
+
+/*==========
+ * Variables
+ *=========*/
+
+/*
+ * The run queue of threads waiting to be run.
+ */
+static ktqueue_t kt_runq CORE_SPECIFIC_DATA;
+
+/*
+ * Helper tracking most recent thread context before a context_switch().
+ */
+static context_t *last_thread_context CORE_SPECIFIC_DATA;
+
+/*===================
+ * Preemption helpers
+ *==================*/
+
+inline void preemption_disable()
+{
+ if (curthr)
+ curthr->kt_preemption_count++;
+}
+
+inline void preemption_enable()
+{
+ if (curthr)
+ {
+ KASSERT(curthr->kt_preemption_count);
+ curthr->kt_preemption_count--;
+ }
+}
+
+inline void preemption_reset()
+{
+ KASSERT(curthr);
+ curthr->kt_preemption_count = 0;
+}
+
+inline long preemption_enabled()
+{
+ return curthr && !curthr->kt_preemption_count;
+}
+
+/*==================
+ * ktqueue functions
+ *=================*/
+
+/*
+ * Initializes queue.
+ */
+void sched_queue_init(ktqueue_t *queue)
+{
+ list_init(&queue->tq_list);
+ queue->tq_size = 0;
+}
+
+/*
+ * Adds thr to the tail of queue.
+ *
+ * queue must be locked
+ */
+static void ktqueue_enqueue(ktqueue_t *queue, kthread_t *thr)
+{
+ KASSERT(!thr->kt_wchan);
+
+ list_assert_sanity(&queue->tq_list);
+ /* Because of the way core-specific data is handled, we add to the front
+ * of the queue (and remove from the back). */
+ list_insert_head(&queue->tq_list, &thr->kt_qlink);
+ list_assert_sanity(&queue->tq_list);
+
+ thr->kt_wchan = queue;
+ queue->tq_size++;
+}
+
+/*
+ * Removes and returns a thread from the head of queue.
+ *
+ * queue must be locked
+ */
+static kthread_t *ktqueue_dequeue(ktqueue_t *queue)
+{
+ if (sched_queue_empty(queue))
+ {
+ return NULL;
+ }
+
+ list_assert_sanity(&queue->tq_list);
+
+ list_link_t *link = queue->tq_list.l_prev;
+ kthread_t *thr = list_item(link, kthread_t, kt_qlink);
+ list_remove(link);
+ thr->kt_wchan = NULL;
+
+ list_assert_sanity(&queue->tq_list);
+
+ queue->tq_size--;
+ return thr;
+}
+
+/*
+ * Removes thr from queue
+ *
+ * queue must be locked
+ */
+static void ktqueue_remove(ktqueue_t *queue, kthread_t *thr)
+{
+ // KASSERT(spinlock_ownslock(&queue->tq_lock));
+ KASSERT(thr->kt_qlink.l_next && thr->kt_qlink.l_prev);
+ list_remove(&thr->kt_qlink);
+ thr->kt_wchan = NULL;
+ queue->tq_size--;
+ list_assert_sanity(&queue->tq_list);
+}
+
+/*
+ * Returns 1 if queue is empty, 0 if's not
+ *
+ * If using this for branching / conditional logic on the queue, it should be
+ * locked for this call to avoid a TOCTTOU bug. This is, however, up to the
+ * callee and not enforced at this level.
+ */
+inline long sched_queue_empty(ktqueue_t *queue) { return queue->tq_size == 0; }
+
+/*==========
+ * Functions
+ *=========*/
+
+/*
+ * Initializes the run queue.
+ */
+void sched_init(void)
+{
+ sched_queue_init(GET_CSD(curcore.kc_id, ktqueue_t, kt_runq));
+}
+
+/*
+ * Puts curthr into the cancellable sleep state, and calls sched_switch() with
+ * the passed in arguments. Cancellable sleep means that the thread can be woken
+ * up from sleep for two reasons:
+ * 1. The event it is waiting for has occurred.
+ * 2. It was cancelled.
+ *
+ * Returns 0, or:
+ * - EINTR: If curthr is cancelled before or after the call to sched_switch()
+ *
+ * Hints:
+ * Do not enqueue the thread directly, let sched_switch handle this.
+ */
+long sched_cancellable_sleep_on(ktqueue_t *queue)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+ return 0;
+}
+
+/*
+ * If the given thread is in a cancellable sleep, removes it from whatever queue
+ * it is sleeping on and makes the thread runnable again.
+ *
+ * Regardless of the thread's state, this should mark the thread as cancelled.
+ */
+void sched_cancel(kthread_t *thr)
+{
+ // KASSERT(spinlock_ownslock(&thr->kt_lock));
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Switches into the context of the current core, which is constantly in a loop
+ * in core_switch() to choose a new runnable thread and switch into its thread
+ * context.
+ *
+ * We want to switch to the current core because the idle process handles the
+ * actual switching of the threads. Please see section 3.3 Boot Sequence to
+ * find a more in depth explantion about the idle process and its
+ * relationship with core_switch().
+ *
+ * Hints:
+ * curthr state must NOT be KT_ON_CPU upon entry.
+ * To ensure that curthr is enqueued on queue only once it is no longer executing,
+ * set the kc_queue field of curcore (the current core) to the queue. See
+ * core_switch() to see how the queue is handled.
+ *
+ * Protect the context switch from interrupts: Use intr_disable(), intr_setipl(),
+ * intr_enable(), and IPL_LOW.
+ *
+ * Even though we want to disable interrupts while modifying the run queue,
+ * core_switch() will actually enable interrupts before sleeping,
+ * but it doesn't modify the IPL. Because we want an interrupt of any level
+ * to wake up the idling core, IPL should be set to IPL_LOW.
+ *
+ * Do not directly call core_switch. The curcore's thread is stuck in a loop
+ * inside core_switch, so switching to its context brings you there.
+ *
+ * For debugging purposes, you may find it useful to set
+ * last_thread_context to the context of the current thread here before the call
+ * to context_switch.
+ */
+void sched_switch(ktqueue_t *queue)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Set the state of the current thread to runnable and sched_switch() with the
+ * current core's runq. Protect access to the thread via its lock.
+ */
+void sched_yield()
+{
+ KASSERT(curthr->kt_state == KT_ON_CPU);
+ curthr->kt_state = KT_RUNNABLE;
+ sched_switch(&kt_runq);
+}
+
+/*
+ * Makes the given thread runnable by setting its state and enqueuing it in the
+ * run queue (kt_runq).
+ *
+ * Hints:
+ * Cannot be called on curthr (it is already running).
+ * Because this can be called from an interrupt context, temporarily mask
+ * interrupts. Use intr_setipl() and IPL_HIGH in order to avoid being interrupted
+ * while modifying the queue.
+ */
+void sched_make_runnable(kthread_t *thr)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Places curthr in an uninterruptible sleep on q. I.e. if the thread is cancelled
+ * while sleeping, it will NOT notice until it is woken up by the event it's
+ * waiting for.
+ *
+ * Hints:
+ * Temporarily mask interrupts using intr_setipl() and IPL_HIGH.
+ * IPL should be set to IPL_HIGH because the act of changing the thread's state
+ * and enqueuing the thread on the queue should not be interrupted
+ * (as sched_wakeup_on) could be called from an interrupt context.
+ *
+ * Do not enqueue the thread directly, let sched_switch handle this (pass both
+ * q and lock to sched_switch()).
+ */
+void sched_sleep_on(ktqueue_t *q)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Wakes up a thread on the given queue by taking it off the queue and
+ * making it runnable. If given an empty queue, do nothing.
+ *
+ * Hints:
+ * Make sure to set *ktp (if it is provided--i.e. ktp is not NULL) to the
+ * dequeued thread before making it runnable. This allows the caller to get a
+ * handle to the thread that was woken up (useful, for instance, when
+ * implementing unlock() on a mutex: the mutex can wake up a sleeping thread
+ * and make it the new owner).
+ */
+void sched_wakeup_on(ktqueue_t *q, kthread_t **ktp)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Wake up all the threads on the given queue by making them all runnable.
+ */
+void sched_broadcast_on(ktqueue_t *q)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*===============
+ * Functions: SMP
+ *==============*/
+
+/*
+ * A sad, but functional, attempt at load balancing when a core is idle
+ */
+#define LOAD_BALANCING_IDLE_THRESHOLD 4096
+static inline kthread_t *load_balance()
+{
+ return NULL;
+}
+
+/*
+ * The meat of our SMP-system.
+ *
+ * You will want to (in this exact order):
+ * 1) perform the operations on curcore.kc_queue and curcore.kc_lock
+ * 2) set curproc to idleproc, and curthr to NULL
+ * 3) try to get the next thread to run
+ * a) try to use your oqn runq (kt_runq), which is core-specific data
+ * b) if, using core_uptime(), at least LOAD_BALANCING_IDLE_THRESHOLD have
+ * passed, then call load_balance() to try to get the next thread to run c) if
+ * neither (a) nor (b) work, the core is idle. Wait for an interrupt using
+ * intr_wait(). Note that you will need to re-disable interrupts after returning
+ * from intr_wait(). 4) ensure the context's PML4 for the selected thread is
+ * correctly setup with curcore's core-specific data. Use kt_recent_core and
+ * map_in_core_specific_data. 5) set curthr and curproc 6) context_switch out
+ */
+void core_switch()
+{
+ while (1)
+ {
+ KASSERT(!intr_enabled());
+ KASSERT(!curthr || curthr->kt_state != KT_ON_CPU);
+
+ if (curcore.kc_queue)
+ {
+ ktqueue_enqueue(curcore.kc_queue, curthr);
+ }
+
+ curproc = &idleproc;
+ curthr = NULL;
+
+ kthread_t *next_thread = NULL;
+
+ size_t idle_start = core_uptime();
+ while (1)
+ {
+ next_thread = ktqueue_dequeue(&kt_runq);
+
+ if (!next_thread &&
+ core_uptime() - idle_start >= LOAD_BALANCING_IDLE_THRESHOLD)
+ next_thread = load_balance();
+
+ if (next_thread)
+ break;
+
+ intr_wait();
+ intr_disable();
+ }
+
+ KASSERT(next_thread->kt_state == KT_RUNNABLE);
+ KASSERT(next_thread->kt_proc);
+
+ if (curcore.kc_id != next_thread->kt_recent_core)
+ {
+ map_in_core_specific_data(next_thread->kt_ctx.c_pml4);
+ next_thread->kt_recent_core = curcore.kc_id;
+ }
+
+ uintptr_t mapped_paddr = pt_virt_to_phys_helper(
+ next_thread->kt_ctx.c_pml4, (uintptr_t)&next_thread);
+ uintptr_t expected_paddr =
+ pt_virt_to_phys_helper(pt_get(), (uintptr_t)&next_thread);
+ KASSERT(mapped_paddr == expected_paddr);
+
+ curthr = next_thread;
+ curthr->kt_state = KT_ON_CPU;
+ curproc = curthr->kt_proc;
+ context_switch(&curcore.kc_ctx, &curthr->kt_ctx);
+ }
+} \ No newline at end of file