aboutsummaryrefslogtreecommitdiff
path: root/kernel/proc
diff options
context:
space:
mode:
authornthnluu <nate1299@me.com>2024-01-28 21:20:27 -0500
committernthnluu <nate1299@me.com>2024-01-28 21:20:27 -0500
commitc63f340d90800895f007de64b7d2d14624263331 (patch)
tree2c0849fa597dd6da831c8707b6f2603403778d7b /kernel/proc
Created student weenix repository
Diffstat (limited to 'kernel/proc')
-rw-r--r--kernel/proc/context.c150
-rw-r--r--kernel/proc/fork.c62
-rw-r--r--kernel/proc/kmutex.c88
-rw-r--r--kernel/proc/kthread.c136
-rw-r--r--kernel/proc/kthread.gdb39
-rw-r--r--kernel/proc/proc.c440
-rw-r--r--kernel/proc/proc.py38
-rw-r--r--kernel/proc/sched.c368
-rw-r--r--kernel/proc/spinlock.c21
9 files changed, 1342 insertions, 0 deletions
diff --git a/kernel/proc/context.c b/kernel/proc/context.c
new file mode 100644
index 0000000..b1902d8
--- /dev/null
+++ b/kernel/proc/context.c
@@ -0,0 +1,150 @@
+
+#include "proc/context.h"
+#include "proc/kthread.h"
+#include <main/cpuid.h>
+
+#include "main/apic.h"
+#include "main/gdt.h"
+
+typedef struct context_initial_func_args
+{
+ context_func_t func;
+ long arg1;
+ void *arg2;
+} packed context_initial_func_args_t;
+
+static void __context_thread_initial_func(context_initial_func_args_t args)
+{
+ preemption_reset();
+ apic_setipl(IPL_LOW);
+ intr_enable();
+
+ void *result = (args.func)(args.arg1, args.arg2);
+ kthread_exit(result);
+
+ panic("\nReturned from kthread_exit.\n");
+}
+
+void context_setup_raw(context_t *c, void (*func)(), void *kstack,
+ size_t kstacksz, pml4_t *pml4)
+{
+ KASSERT(NULL != pml4);
+ KASSERT(PAGE_ALIGNED(kstack));
+ c->c_kstack = (uintptr_t)kstack;
+ c->c_kstacksz = kstacksz;
+ c->c_pml4 = pml4;
+ c->c_rsp = (uintptr_t)kstack + kstacksz;
+ c->c_rsp -= sizeof(uintptr_t);
+ *((uintptr_t *)c->c_rsp) = 0;
+ c->c_rbp = c->c_rsp;
+ c->c_rip = (uintptr_t)func;
+}
+
+/*
+ * Initializes a context_t struct with the given parameters. arg1 and arg2 will
+ * appear as arguments to the function passed in when this context is first
+ * used.
+ */
+void context_setup(context_t *c, context_func_t func, long arg1, void *arg2,
+ void *kstack, size_t kstacksz, pml4_t *pml4)
+{
+ KASSERT(NULL != pml4);
+ KASSERT(PAGE_ALIGNED(kstack));
+
+ c->c_kstack = (uintptr_t)kstack;
+ c->c_kstacksz = kstacksz;
+ c->c_pml4 = pml4;
+
+ /* put the arguments for __context_thread_initial_func onto the
+ * stack */
+ c->c_rsp = (uintptr_t)kstack + kstacksz;
+ c->c_rsp -= sizeof(arg2);
+ *(void **)c->c_rsp = arg2;
+ c->c_rsp -= sizeof(arg1);
+ *(long *)c->c_rsp = arg1;
+ c->c_rsp -= sizeof(context_func_t);
+ *(context_func_t *)c->c_rsp = func;
+ // Take space for the function return address (unused)
+ c->c_rsp -= sizeof(uintptr_t);
+
+ c->c_rbp = c->c_rsp;
+ c->c_rip = (uintptr_t)__context_thread_initial_func;
+}
+
+/*
+ * WARNING!! POTENTIAL EDITOR BEWARE!!
+ * IF YOU REMOVE THE PT_SET CALLS BELOW,
+ * YOU ***MUST*** DEAL WITH SMP TLB SHOOTDOWN
+ *
+ * IN OTHER WORDS, THINK *VERY* CAREFULLY BEFORE
+ * REMOVING THE CALLS TO PT_SET BELOW
+ */
+
+void context_make_active(context_t *c)
+{
+ // gdt_set_kernel_stack((void *)((uintptr_t)c->c_kstack + c->c_kstacksz));
+ pt_set(c->c_pml4);
+
+ /* Switch stacks and run the thread */
+ __asm__ volatile(
+ "movq %0,%%rbp\n\t" /* update rbp */
+ "movq %1,%%rsp\n\t" /* update rsp */
+ "push %2\n\t" /* save rip */
+ "ret" /* jump to new rip */
+ ::"m"(c->c_rbp),
+ "m"(c->c_rsp), "m"(c->c_rip));
+}
+
+void context_switch(context_t *oldc, context_t *newc)
+{
+ gdt_set_kernel_stack(
+ (void *)((uintptr_t)newc->c_kstack + newc->c_kstacksz));
+
+ // sanity check that core-specific data is being managed (paged in)
+ // correctly
+ KASSERT(oldc->c_pml4 == pt_get());
+ uintptr_t curthr_paddr =
+ pt_virt_to_phys_helper(oldc->c_pml4, (uintptr_t)&curthr);
+ uintptr_t new_curthr_paddr =
+ pt_virt_to_phys_helper(newc->c_pml4, (uintptr_t)&curthr);
+
+ kthread_t *prev_curthr = curthr;
+ pt_set(newc->c_pml4);
+ KASSERT(pt_get() == newc->c_pml4);
+
+ KASSERT(curthr_paddr == new_curthr_paddr);
+ KASSERT(prev_curthr == curthr);
+
+ /*
+ * Save the current value of the stack pointer and the frame pointer into
+ * the old context. Set the instruction pointer to the return address
+ * (whoever called us).
+ */
+ __asm__ volatile(
+ "pushfq;" /* save RFLAGS on the stack */
+ "pushq %%rbp \n" /* save base pointer */
+ "pushq %%rbx \n" /* save other callee-saved registers */
+ "pushq %%r12 \n"
+ "pushq %%r13 \n"
+ "pushq %%r14 \n"
+ "pushq %%r15 \n"
+ "movq %%rsp, %0 \n" /* save RSP into oldc */
+ "movq %2, %%rsp \n" /* restore RSP from newc */
+ "pushq %%rax\n\t"
+ "movabs $1f, %%rax \n\t" /* save RIP into oldc (saves the label '1'
+ below) */
+ "mov %%rax, %1\n\t"
+ "popq %%rax\n\t"
+ "pushq %3 \n\t" /* restore RIP */
+ "ret \n\t"
+ "1:\t" /* this is where oldc starts executing later */
+ "popq %%r15 \n\t" /* restore callee-saved registers */
+ "popq %%r14 \n\t"
+ "popq %%r13 \n\t"
+ "popq %%r12 \n\t"
+ "popq %%rbx \n\t"
+ "popq %%rbp \n\t" /* restore base pointer */
+ "popfq" /* restore RFLAGS */
+ : "=m"(oldc->c_rsp), "=m"(oldc->c_rip)
+ : "m"(newc->c_rsp), "m"(newc->c_rip));
+}
diff --git a/kernel/proc/fork.c b/kernel/proc/fork.c
new file mode 100644
index 0000000..358b891
--- /dev/null
+++ b/kernel/proc/fork.c
@@ -0,0 +1,62 @@
+#include "errno.h"
+#include "globals.h"
+#include "types.h"
+
+#include "util/debug.h"
+#include "util/string.h"
+
+#include "mm/mm.h"
+#include "mm/mman.h"
+#include "mm/pframe.h"
+#include "mm/tlb.h"
+
+#include "fs/vnode.h"
+
+#include "vm/shadow.h"
+
+#include "api/exec.h"
+
+/* Pushes the appropriate things onto the kernel stack of a newly forked thread
+ * so that it can begin execution in userland_entry.
+ * regs: registers the new thread should have on execution
+ * kstack: location of the new thread's kernel stack
+ * Returns the new stack pointer on success. */
+static uintptr_t fork_setup_stack(const regs_t *regs, void *kstack)
+{
+ /* Pointer argument and dummy return address, and userland dummy return
+ * address */
+ uint64_t rsp =
+ ((uint64_t)kstack) + DEFAULT_STACK_SIZE - (sizeof(regs_t) + 16);
+ memcpy((void *)(rsp + 8), regs, sizeof(regs_t)); /* Copy over struct */
+ return rsp;
+}
+
+/*
+ * This function implements the fork(2) system call.
+ *
+ * TODO:
+ * 1) Use proc_create() and kthread_clone() to set up a new process and thread. If
+ * either fails, perform any appropriate cleanup.
+ * 2) Finish any initialization work for the new process and thread.
+ * 3) Fix the values of the registers and the rest of the kthread's ctx.
+ * Some registers can be accessed from the cloned kthread's context (see the context_t
+ * and kthread_t structs for more details):
+ * a) We want the child process to also enter userland execution.
+ * For this, the instruction pointer should point to userland_entry (see exec.c).
+ * b) Remember that the only difference between the parent and child processes
+ * is the return value of fork(). This value is returned in the RAX register,
+ * and the return value should be 0 for the child. The parent's return value would
+ * be the process id of the newly created child process.
+ * c) Before the process begins execution in userland_entry,
+ * we need to push all registers onto the kernel stack of the kthread.
+ * Use fork_setup_stack to do this, and set RSP accordingly.
+ * d) Use pt_unmap_range and tlb_flush_all on the parent in advance of
+ * copy-on-write.
+ * 5) Prepare the child process to be run on the CPU.
+ * 6) Return the child's process id to the parent.
+ */
+long do_fork(struct regs *regs)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return -1;
+}
diff --git a/kernel/proc/kmutex.c b/kernel/proc/kmutex.c
new file mode 100644
index 0000000..0433468
--- /dev/null
+++ b/kernel/proc/kmutex.c
@@ -0,0 +1,88 @@
+// SMP.1 + SMP.3
+// spinlock + mask interrupts
+#include "proc/kmutex.h"
+#include "globals.h"
+#include "main/interrupt.h"
+#include <errno.h>
+
+/*
+ * IMPORTANT: Mutexes can _NEVER_ be locked or unlocked from an
+ * interrupt context. Mutexes are _ONLY_ lock or unlocked from a
+ * thread context.
+ */
+
+/*
+ * Checks for the specific deadlock case where:
+ * curthr wants mtx, but the owner of mtx is waiting on a mutex that curthr is
+ * holding
+ */
+#define DEBUG_DEADLOCKS 1
+void detect_deadlocks(kmutex_t *mtx)
+{
+#if DEBUG_DEADLOCKS
+ list_iterate(&curthr->kt_mutexes, held, kmutex_t, km_link)
+ {
+ list_iterate(&held->km_waitq.tq_list, waiter, kthread_t, kt_qlink)
+ {
+ if (waiter == mtx->km_holder)
+ {
+ panic(
+ "detected deadlock between P%d and P%d (mutexes 0x%p, "
+ "0x%p)\n",
+ curproc->p_pid, waiter->kt_proc->p_pid, held, mtx);
+ }
+ }
+ }
+#endif
+}
+
+/*
+ * Initializes the members of mtx
+ */
+void kmutex_init(kmutex_t *mtx)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Obtains a mutex, potentially blocking.
+ *
+ * Hints:
+ * You are strongly advised to maintain the kt_mutexes member of curthr and call
+ * detect_deadlocks() to help debugging.
+ */
+void kmutex_lock(kmutex_t *mtx)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Releases a mutex.
+ *
+ * Hints:
+ * Again, you are strongly advised to maintain kt_mutexes.
+ * Use sched_wakeup_on() to hand off the mutex - think carefully about how
+ * these two functions interact to ensure that the mutex's km_holder is
+ * properly set before the new owner is runnable.
+ */
+void kmutex_unlock(kmutex_t *mtx)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Checks if mtx's wait queue is empty.
+ */
+long kmutex_has_waiters(kmutex_t *mtx)
+{
+ return !sched_queue_empty(&mtx->km_waitq);
+ ;
+}
+
+/*
+ * Checks if the current thread owns mtx.
+ */
+inline long kmutex_owns_mutex(kmutex_t *mtx)
+{
+ return curthr && mtx->km_holder == curthr;
+}
diff --git a/kernel/proc/kthread.c b/kernel/proc/kthread.c
new file mode 100644
index 0000000..f1c541c
--- /dev/null
+++ b/kernel/proc/kthread.c
@@ -0,0 +1,136 @@
+// SMP.1 for non-curthr actions; none for curthr
+#include "config.h"
+#include "globals.h"
+#include "mm/slab.h"
+#include "util/debug.h"
+#include "util/string.h"
+
+/*==========
+ * Variables
+ *=========*/
+
+/*
+ * Global variable maintaining the current thread on the cpu
+ */
+kthread_t *curthr CORE_SPECIFIC_DATA;
+
+/*
+ * Private slab for kthread structs
+ */
+static slab_allocator_t *kthread_allocator = NULL;
+
+/*=================
+ * Helper functions
+ *================*/
+
+/*
+ * Allocates a new kernel stack. Returns null when not enough memory.
+ */
+static char *alloc_stack() { return page_alloc_n(DEFAULT_STACK_SIZE_PAGES); }
+
+/*
+ * Frees an existing kernel stack.
+ */
+static void free_stack(char *stack)
+{
+ page_free_n(stack, DEFAULT_STACK_SIZE_PAGES);
+}
+
+/*==========
+ * Functions
+ *=========*/
+
+/*
+ * Initializes the kthread_allocator.
+ */
+void kthread_init()
+{
+ KASSERT(__builtin_popcount(DEFAULT_STACK_SIZE_PAGES) == 1 &&
+ "stack size should be a power of 2 pages to reduce fragmentation");
+ kthread_allocator = slab_allocator_create("kthread", sizeof(kthread_t));
+ KASSERT(kthread_allocator);
+}
+
+/*
+ * Creates and initializes a thread.
+ * Returns a new kthread, or NULL on failure.
+ *
+ * Hints:
+ * Use kthread_allocator to allocate a kthread
+ * Use alloc_stack() to allocate a kernel stack
+ * Use context_setup() to set up the thread's context -
+ * also use DEFAULT_STACK_SIZE and the process's pagetable (p_pml4)
+ * Remember to initialize all the thread's fields
+ * Remember to add the thread to proc's threads list
+ * Initialize the thread's kt_state to KT_NO_STATE
+ * Initialize the thread's kt_recent_core to ~0UL (unsigned -1)
+ */
+kthread_t *kthread_create(proc_t *proc, kthread_func_t func, long arg1,
+ void *arg2)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+ return NULL;
+}
+
+/*
+ * Creates and initializes a thread that is a clone of thr.
+ * Returns a new kthread, or null on failure.
+ *
+ * P.S. Note that you do not need to implement this function until VM.
+ *
+ * Hints:
+ * The only parts of the context that must be initialized are c_kstack and
+ * c_kstacksz. The thread's process should be set outside of this function. Copy
+ * over thr's retval, errno, and cancelled; other fields should be freshly
+ * initialized. See kthread_create() for more hints.
+ */
+kthread_t *kthread_clone(kthread_t *thr)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return NULL;
+}
+
+/*
+ * Free the thread's stack, remove it from its process's list of threads, and
+ * free the kthread_t struct itself. Protect access to the kthread using its
+ * kt_lock.
+ *
+ * You cannot destroy curthr.
+ */
+void kthread_destroy(kthread_t *thr)
+{
+ KASSERT(thr != curthr);
+ KASSERT(thr && thr->kt_kstack);
+ if (thr->kt_state != KT_EXITED)
+ panic("destroying thread in state %d\n", thr->kt_state);
+ free_stack(thr->kt_kstack);
+ if (list_link_is_linked(&thr->kt_plink))
+ list_remove(&thr->kt_plink);
+
+ slab_obj_free(kthread_allocator, thr);
+}
+
+/*
+ * Sets the thread's return value and cancels the thread.
+ *
+ * Note: Check out the use of check_curthr_cancelled() in syscall_handler()
+ * to see how a thread eventually notices it is cancelled and handles exiting
+ * itself.
+ *
+ * Hints:
+ * This should not be called on curthr.
+ * Use sched_cancel() to actually mark the thread as cancelled. This way you
+ * can take care of all cancellation cases.
+ */
+void kthread_cancel(kthread_t *thr, void *retval)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Wrapper around proc_thread_exiting().
+ */
+void kthread_exit(void *retval)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
diff --git a/kernel/proc/kthread.gdb b/kernel/proc/kthread.gdb
new file mode 100644
index 0000000..9c6e160
--- /dev/null
+++ b/kernel/proc/kthread.gdb
@@ -0,0 +1,39 @@
+define kstack
+ if $argc == 0
+ set $kthr = curthr
+ else
+ set $kthr = $arg0
+ end
+
+ set $save_eip = $eip
+ set $save_ebp = $ebp
+ set $save_esp = $esp
+
+ if ($kthr == curthr) && (_intr_regs != NULL)
+ set $eip = _intr_regs->r_eip
+ set $ebp = _intr_regs->r_ebp
+ set $esp = _intr_regs->r_esp
+ info stack
+ else if $kthr != curthr
+ set $eip = $kthr->kt_ctx.c_eip
+ set $ebp = $kthr->kt_ctx.c_ebp
+ set $esp = $kthr->kt_ctx.c_esp
+ info stack
+ else
+ info stack
+ end
+
+ set $eip = $save_eip
+ set $ebp = $save_ebp
+ set $esp = $save_esp
+end
+document kstack
+usage: kthread [kthread_t*]
+Takes a single, optional kthread_t as an argument.
+If no argument is given curthr is used instead. This
+command prints the current stack of the given thread.
+This includes detecting if the given thread is has
+been interrupted, and looking up the interrupted
+stack, rather than the interrupt stack (useful for
+viewing the stack trace which caused a page-fault).
+end \ No newline at end of file
diff --git a/kernel/proc/proc.c b/kernel/proc/proc.c
new file mode 100644
index 0000000..17ff5db
--- /dev/null
+++ b/kernel/proc/proc.c
@@ -0,0 +1,440 @@
+// SMP.1 + SMP.3
+// spinlock + mask interrupts
+#include "config.h"
+#include "errno.h"
+#include "fs/file.h"
+#include "fs/vfs.h"
+#include "fs/vnode.h"
+#include "globals.h"
+#include "kernel.h"
+#include "mm/slab.h"
+#include "util/debug.h"
+#include "util/printf.h"
+#include "util/string.h"
+#include "util/time.h"
+#include <drivers/screen.h>
+#include <fs/vfs_syscall.h>
+#include <main/apic.h>
+
+/*==========
+ * Variables
+ *=========*/
+
+/*
+ * Global variable that maintains the current process
+ */
+proc_t *curproc CORE_SPECIFIC_DATA;
+
+/*
+ * Global list of all processes (except for the idle process) and its lock
+ */
+static list_t proc_list = LIST_INITIALIZER(proc_list);
+
+/*
+ * Allocator for process descriptors
+ */
+static slab_allocator_t *proc_allocator = NULL;
+
+/*
+ * Statically allocated idle process
+ * Each core has its own idleproc, so the idleproc is stored in static memory
+ * rather than in the global process list
+ */
+proc_t idleproc CORE_SPECIFIC_DATA;
+
+/*
+ * Pointer to the init process
+ */
+static proc_t *proc_initproc = NULL;
+
+/*===============
+ * System startup
+ *==============*/
+
+/*
+ * Initializes the allocator for process descriptors.
+ */
+void proc_init()
+{
+ proc_allocator = slab_allocator_create("proc", sizeof(proc_t));
+ KASSERT(proc_allocator);
+}
+
+/*
+ * Initializes idleproc for the current core. Sets initial values for curproc
+ * and curthr.
+ */
+void proc_idleproc_init()
+{
+ proc_t *proc = &idleproc;
+
+ proc->p_pid = 0;
+ list_init(&proc->p_threads);
+ list_init(&proc->p_children);
+ proc->p_pproc = NULL;
+
+ list_link_init(&proc->p_child_link);
+ list_link_init(&proc->p_list_link);
+
+ proc->p_status = 0;
+ proc->p_state = PROC_RUNNING;
+
+ memset(&proc->p_wait, 0, sizeof(ktqueue_t)); // should not be used
+
+ proc->p_pml4 = pt_get();
+ proc->p_vmmap = vmmap_create();
+
+ proc->p_cwd = NULL;
+
+ memset(proc->p_files, 0, sizeof(proc->p_files));
+
+ char name[8];
+ snprintf(name, sizeof(name), "idle%ld", curcore.kc_id);
+ strncpy(proc->p_name, name, PROC_NAME_LEN);
+ proc->p_name[PROC_NAME_LEN - 1] = '\0';
+
+ dbg(DBG_PROC, "created %s\n", proc->p_name);
+ curproc = &idleproc;
+ curthr = NULL;
+}
+
+/*=================
+ * Helper functions
+ *================*/
+
+/*
+ * Gets the next available process ID (pid).
+ */
+static pid_t next_pid = 1;
+static pid_t _proc_getid()
+{
+ pid_t pid = next_pid;
+restart:
+ list_iterate(&proc_list, p, proc_t, p_list_link)
+ {
+ if (p->p_pid == pid)
+ {
+ pid = pid + 1 == PROC_MAX_COUNT ? 1 : pid + 1;
+ if (pid == next_pid)
+ {
+ return -1;
+ }
+ else
+ {
+ goto restart;
+ }
+ }
+ }
+ next_pid = pid + 1 == PROC_MAX_COUNT ? 1 : pid + 1;
+ KASSERT(pid);
+ return pid;
+}
+
+/*
+ * Searches the global process list for the process descriptor corresponding to
+ * a pid.
+ */
+proc_t *proc_lookup(pid_t pid)
+{
+ if (pid == 0)
+ {
+ return &idleproc;
+ }
+ list_iterate(&proc_list, p, proc_t, p_list_link)
+ {
+ if (p->p_pid == pid)
+ {
+ return p;
+ }
+ }
+ return NULL;
+}
+
+/*==========
+ * Functions
+ *=========*/
+
+/*
+ * Creates a new process with the given name.
+ * Returns the newly created process, or NULL on failure.
+ *
+ * Hints:
+ * Use _proc_getid() to get a new pid.
+ * Allocate a new proc_t with the process slab allocator (proc_allocator).
+ * Use pt_create() to create a new page table (p_pml4).
+ * If the newly created process is the init process (i.e. the generated PID
+ * matches the init process's PID, given by the macro PID_INIT), set the
+ * global proc_initproc to the created process.
+ *
+ * There is some setup to be done for VFS and VM - remember to return to this
+ * function! For VFS, clone and ref the files from curproc. For VM, clone the
+ * vmmap from curproc.
+ *
+ * Be sure to free resources appropriately if proc_create() fails midway!
+ */
+proc_t *proc_create(const char *name)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+ return NULL;
+}
+
+/*
+ * Helper for proc_thread_exiting() that cleans up resources from the current
+ * process in preparation for its destruction (which occurs later via proc_destroy()).
+ * Reparents child processes to the init process, or initiates Weenix shutdown
+ * if the current process is the init process.
+ *
+ * Hints:
+ * You won't have much to clean up until VFS and VM -- remember to revisit this
+ * function later!
+ * **VFS/VM** - there may be some repeat code in proc_destroy()). The initial process
+ * does not have a parent process and thus cleans itself up, hence why we need to cleanup
+ * here as well.
+ *
+ * Remember to set the state and status of the process.
+ * The init process' PID is given by PID_INIT.
+ * Use initproc_finish() to shutdown Weenix when cleaning up the init process.
+ */
+void proc_cleanup(long status)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Cleans up the current process and the current thread, broadcasts on its
+ * parent's p_wait, then forces a context switch. After this, the process is
+ * essentially dead -- this function does not return. The parent must eventually
+ * finish destroying the process.
+ *
+ * Hints:
+ * Use proc_cleanup() to clean up the current process. As retval specifies the current
+ * thread's return value, you should pass (long)retval as the status argument to
+ * proc_cleanup().
+ * Remember to set the exit state and return value of the current thread after calling
+ * proc_cleanup(), as this may block and cause the thread's state to be overwritten.
+ * The context switch should be performed by a call to sched_switch().
+ */
+void proc_thread_exiting(void *retval)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Cancels all the threads of proc. This should never be called on curproc.
+ *
+ * Hints:
+ * The status argument should be passed to kthread_cancel() as the retval.
+ */
+void proc_kill(proc_t *proc, long status)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Kills all processes that are not curproc and not a direct child of idleproc (i.e.,
+ * the init process), then kills the current process.
+ *
+ * Hints:
+ * The PID of the idle process is given by PID_IDLE.
+ * Processes should be killed with a status of -1.
+ * Use do_exit() to kill the current process.
+ */
+void proc_kill_all()
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Destroy / free everything from proc. Be sure to remember reference counting
+ * when working on VFS.
+ *
+ * In contrast with proc_cleanup() (in which a process begins to clean itself up), this
+ * will be called on proc by some other process to complete its cleanup.
+ * I.e., the process we are destroying should not be curproc.
+ */
+void proc_destroy(proc_t *proc)
+{
+ list_remove(&proc->p_list_link);
+
+ list_iterate(&proc->p_threads, thr, kthread_t, kt_plink)
+ {
+ kthread_destroy(thr);
+ }
+
+#ifdef __VFS__
+ for (int fd = 0; fd < NFILES; fd++)
+ {
+ if (proc->p_files[fd])
+ fput(proc->p_files + fd);
+ }
+ if (proc->p_cwd)
+ {
+ vput(&proc->p_cwd);
+ }
+#endif
+
+#ifdef __VM__
+ if (proc->p_vmmap)
+ vmmap_destroy(&proc->p_vmmap);
+#endif
+
+ dbg(DBG_THR, "destroying P%d\n", proc->p_pid);
+
+ KASSERT(proc->p_pml4);
+ pt_destroy(proc->p_pml4);
+
+ slab_obj_free(proc_allocator, proc);
+}
+
+/*=============
+ * System calls
+ *============*/
+
+/*
+ * Waits for a child process identified by pid to exit. Finishes destroying the
+ * process and optionally returns the child's status in status.
+ *
+ * If pid is a positive integer, tries to clean up the process specified by pid.
+ * If pid is -1, cleans up any child process of curproc that exits.
+ *
+ * Returns the pid of the child process that exited, or error cases:
+ * - ENOTSUP: pid is 0, a negative number not equal to -1,
+ * or options are specified (options does not equal 0)
+ * - ECHILD: pid is a positive integer but not a child of curproc, or
+ * pid is -1 and the process has no children
+ *
+ * Hints:
+ * Use sched_sleep_on() to be notified of a child process exiting.
+ * Destroy an exited process by removing it from any lists and calling
+ * proc_destroy(). Remember to set status (if it was provided) to the child's
+ * status before destroying the process.
+ * If waiting on a specific child PID, wakeups from other exiting child
+ * processes should be ignored.
+ * If waiting on any child (-1), do_waitpid can return when *any* child has exited,
+ * it does not have to return the one that exited earliest.
+ * Which field can you use to determine whether a given process exited?
+ */
+pid_t do_waitpid(pid_t pid, int *status, int options)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+ return 0;
+}
+
+/*
+ * Wrapper around kthread_exit.
+ */
+void do_exit(long status)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*==========
+ * Debugging
+ *=========*/
+
+size_t proc_info(const void *arg, char *buf, size_t osize)
+{
+ const proc_t *p = (proc_t *)arg;
+ size_t size = osize;
+ proc_t *child;
+
+ KASSERT(NULL != p);
+ KASSERT(NULL != buf);
+
+ iprintf(&buf, &size, "pid: %i\n", p->p_pid);
+ iprintf(&buf, &size, "name: %s\n", p->p_name);
+ if (NULL != p->p_pproc)
+ {
+ iprintf(&buf, &size, "parent: %i (%s)\n", p->p_pproc->p_pid,
+ p->p_pproc->p_name);
+ }
+ else
+ {
+ iprintf(&buf, &size, "parent: -\n");
+ }
+
+ if (list_empty(&p->p_children))
+ {
+ iprintf(&buf, &size, "children: -\n");
+ }
+ else
+ {
+ iprintf(&buf, &size, "children:\n");
+ }
+ list_iterate(&p->p_children, child, proc_t, p_child_link)
+ {
+ iprintf(&buf, &size, " %i (%s)\n", child->p_pid, child->p_name);
+ }
+
+ iprintf(&buf, &size, "status: %ld\n", p->p_status);
+ iprintf(&buf, &size, "state: %i\n", p->p_state);
+
+#ifdef __VFS__
+#ifdef __GETCWD__
+ if (NULL != p->p_cwd)
+ {
+ char cwd[256];
+ lookup_dirpath(p->p_cwd, cwd, sizeof(cwd));
+ iprintf(&buf, &size, "cwd: %-s\n", cwd);
+ }
+ else
+ {
+ iprintf(&buf, &size, "cwd: -\n");
+ }
+#endif /* __GETCWD__ */
+#endif
+
+#ifdef __VM__
+ iprintf(&buf, &size, "start brk: 0x%p\n", p->p_start_brk);
+ iprintf(&buf, &size, "brk: 0x%p\n", p->p_brk);
+#endif
+
+ return size;
+}
+
+size_t proc_list_info(const void *arg, char *buf, size_t osize)
+{
+ size_t size = osize;
+
+ KASSERT(NULL == arg);
+ KASSERT(NULL != buf);
+
+#if defined(__VFS__) && defined(__GETCWD__)
+ iprintf(&buf, &size, "%5s %-13s %-18s %-s\n", "PID", "NAME", "PARENT",
+ "CWD");
+#else
+ iprintf(&buf, &size, "%5s %-13s %-s\n", "PID", "NAME", "PARENT");
+#endif
+
+ list_iterate(&proc_list, p, proc_t, p_list_link)
+ {
+ char parent[64];
+ if (NULL != p->p_pproc)
+ {
+ snprintf(parent, sizeof(parent), "%3i (%s)", p->p_pproc->p_pid,
+ p->p_pproc->p_name);
+ }
+ else
+ {
+ snprintf(parent, sizeof(parent), " -");
+ }
+
+#if defined(__VFS__) && defined(__GETCWD__)
+ if (NULL != p->p_cwd)
+ {
+ char cwd[256];
+ lookup_dirpath(p->p_cwd, cwd, sizeof(cwd));
+ iprintf(&buf, &size, " %3i %-13s %-18s %-s\n", p->p_pid, p->p_name,
+ parent, cwd);
+ }
+ else
+ {
+ iprintf(&buf, &size, " %3i %-13s %-18s -\n", p->p_pid, p->p_name,
+ parent);
+ }
+#else
+ iprintf(&buf, &size, " %3i %-13s %-s\n", p->p_pid, p->p_name, parent);
+#endif
+ }
+ return size;
+}
diff --git a/kernel/proc/proc.py b/kernel/proc/proc.py
new file mode 100644
index 0000000..11a5f31
--- /dev/null
+++ b/kernel/proc/proc.py
@@ -0,0 +1,38 @@
+import gdb
+
+import weenix
+import weenix.list
+import weenix.proc
+
+
+class ProcCommand(weenix.Command):
+ """proc [<pids...>]
+ Prints information about the listed pids. If no
+ pids are listed the full process tree is printed."""
+
+ def __init__(self):
+ weenix.Command.__init__(self, "proc", gdb.COMMAND_DATA)
+
+ def invoke(self, args, tty):
+ print("invoking...")
+ if (len(args.strip()) == 0):
+ print(weenix.proc.str_proc_tree())
+ else:
+ for pid in args.split():
+ if (pid == "curproc"):
+ print(weenix.proc.curproc())
+ else:
+ print(weenix.proc.lookup(pid))
+
+ def complete(self, line, word):
+ print("completing...")
+ l = map(lambda x: str(x.pid()), weenix.proc.iter())
+ l.append("curproc")
+ l = filter(lambda x: x.startswith(word), l)
+ for used in line.split():
+ l = filter(lambda x: x != used, l)
+ l.sort()
+ return l
+
+
+ProcCommand()
diff --git a/kernel/proc/sched.c b/kernel/proc/sched.c
new file mode 100644
index 0000000..9162875
--- /dev/null
+++ b/kernel/proc/sched.c
@@ -0,0 +1,368 @@
+// SMP.1 + SMP.2 + SMP.3 + SMP.4
+// spinlocks + mask interrupts
+#include "api/syscall.h"
+#include "errno.h"
+#include "fs/vfs.h"
+#include "globals.h"
+#include "main/apic.h"
+#include "main/inits.h"
+#include "types.h"
+#include "util/debug.h"
+#include <util/time.h>
+
+/*==========
+ * Variables
+ *=========*/
+
+/*
+ * The run queue of threads waiting to be run.
+ */
+static ktqueue_t kt_runq CORE_SPECIFIC_DATA;
+
+/*
+ * Helper tracking most recent thread context before a context_switch().
+ */
+static context_t *last_thread_context CORE_SPECIFIC_DATA;
+
+/*===================
+ * Preemption helpers
+ *==================*/
+
+inline void preemption_disable()
+{
+ if (curthr)
+ curthr->kt_preemption_count++;
+}
+
+inline void preemption_enable()
+{
+ if (curthr)
+ {
+ KASSERT(curthr->kt_preemption_count);
+ curthr->kt_preemption_count--;
+ }
+}
+
+inline void preemption_reset()
+{
+ KASSERT(curthr);
+ curthr->kt_preemption_count = 0;
+}
+
+inline long preemption_enabled()
+{
+ return curthr && !curthr->kt_preemption_count;
+}
+
+/*==================
+ * ktqueue functions
+ *=================*/
+
+/*
+ * Initializes queue.
+ */
+void sched_queue_init(ktqueue_t *queue)
+{
+ list_init(&queue->tq_list);
+ queue->tq_size = 0;
+}
+
+/*
+ * Adds thr to the tail of queue.
+ *
+ * queue must be locked
+ */
+static void ktqueue_enqueue(ktqueue_t *queue, kthread_t *thr)
+{
+ KASSERT(!thr->kt_wchan);
+
+ list_assert_sanity(&queue->tq_list);
+ /* Because of the way core-specific data is handled, we add to the front
+ * of the queue (and remove from the back). */
+ list_insert_head(&queue->tq_list, &thr->kt_qlink);
+ list_assert_sanity(&queue->tq_list);
+
+ thr->kt_wchan = queue;
+ queue->tq_size++;
+}
+
+/*
+ * Removes and returns a thread from the head of queue.
+ *
+ * queue must be locked
+ */
+static kthread_t *ktqueue_dequeue(ktqueue_t *queue)
+{
+ if (sched_queue_empty(queue))
+ {
+ return NULL;
+ }
+
+ list_assert_sanity(&queue->tq_list);
+
+ list_link_t *link = queue->tq_list.l_prev;
+ kthread_t *thr = list_item(link, kthread_t, kt_qlink);
+ list_remove(link);
+ thr->kt_wchan = NULL;
+
+ list_assert_sanity(&queue->tq_list);
+
+ queue->tq_size--;
+ return thr;
+}
+
+/*
+ * Removes thr from queue
+ *
+ * queue must be locked
+ */
+static void ktqueue_remove(ktqueue_t *queue, kthread_t *thr)
+{
+ // KASSERT(spinlock_ownslock(&queue->tq_lock));
+ KASSERT(thr->kt_qlink.l_next && thr->kt_qlink.l_prev);
+ list_remove(&thr->kt_qlink);
+ thr->kt_wchan = NULL;
+ queue->tq_size--;
+ list_assert_sanity(&queue->tq_list);
+}
+
+/*
+ * Returns 1 if queue is empty, 0 if's not
+ *
+ * If using this for branching / conditional logic on the queue, it should be
+ * locked for this call to avoid a TOCTTOU bug. This is, however, up to the
+ * callee and not enforced at this level.
+ */
+inline long sched_queue_empty(ktqueue_t *queue) { return queue->tq_size == 0; }
+
+/*==========
+ * Functions
+ *=========*/
+
+/*
+ * Initializes the run queue.
+ */
+void sched_init(void)
+{
+ sched_queue_init(GET_CSD(curcore.kc_id, ktqueue_t, kt_runq));
+}
+
+/*
+ * Puts curthr into the cancellable sleep state, and calls sched_switch() with
+ * the passed in arguments. Cancellable sleep means that the thread can be woken
+ * up from sleep for two reasons:
+ * 1. The event it is waiting for has occurred.
+ * 2. It was cancelled.
+ *
+ * Returns 0, or:
+ * - EINTR: If curthr is cancelled before or after the call to sched_switch()
+ *
+ * Hints:
+ * Do not enqueue the thread directly, let sched_switch handle this.
+ */
+long sched_cancellable_sleep_on(ktqueue_t *queue)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+ return 0;
+}
+
+/*
+ * If the given thread is in a cancellable sleep, removes it from whatever queue
+ * it is sleeping on and makes the thread runnable again.
+ *
+ * Regardless of the thread's state, this should mark the thread as cancelled.
+ */
+void sched_cancel(kthread_t *thr)
+{
+ // KASSERT(spinlock_ownslock(&thr->kt_lock));
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Switches into the context of the current core, which is constantly in a loop
+ * in core_switch() to choose a new runnable thread and switch into its thread
+ * context.
+ *
+ * We want to switch to the current core because the idle process handles the
+ * actual switching of the threads. Please see section 3.3 Boot Sequence to
+ * find a more in depth explantion about the idle process and its
+ * relationship with core_switch().
+ *
+ * Hints:
+ * curthr state must NOT be KT_ON_CPU upon entry.
+ * To ensure that curthr is enqueued on queue only once it is no longer executing,
+ * set the kc_queue field of curcore (the current core) to the queue. See
+ * core_switch() to see how the queue is handled.
+ *
+ * Protect the context switch from interrupts: Use intr_disable(), intr_setipl(),
+ * intr_enable(), and IPL_LOW.
+ *
+ * Even though we want to disable interrupts while modifying the run queue,
+ * core_switch() will actually enable interrupts before sleeping,
+ * but it doesn't modify the IPL. Because we want an interrupt of any level
+ * to wake up the idling core, IPL should be set to IPL_LOW.
+ *
+ * Do not directly call core_switch. The curcore's thread is stuck in a loop
+ * inside core_switch, so switching to its context brings you there.
+ *
+ * For debugging purposes, you may find it useful to set
+ * last_thread_context to the context of the current thread here before the call
+ * to context_switch.
+ */
+void sched_switch(ktqueue_t *queue)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Set the state of the current thread to runnable and sched_switch() with the
+ * current core's runq. Protect access to the thread via its lock.
+ */
+void sched_yield()
+{
+ KASSERT(curthr->kt_state == KT_ON_CPU);
+ curthr->kt_state = KT_RUNNABLE;
+ sched_switch(&kt_runq);
+}
+
+/*
+ * Makes the given thread runnable by setting its state and enqueuing it in the
+ * run queue (kt_runq).
+ *
+ * Hints:
+ * Cannot be called on curthr (it is already running).
+ * Because this can be called from an interrupt context, temporarily mask
+ * interrupts. Use intr_setipl() and IPL_HIGH in order to avoid being interrupted
+ * while modifying the queue.
+ */
+void sched_make_runnable(kthread_t *thr)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Places curthr in an uninterruptible sleep on q. I.e. if the thread is cancelled
+ * while sleeping, it will NOT notice until it is woken up by the event it's
+ * waiting for.
+ *
+ * Hints:
+ * Temporarily mask interrupts using intr_setipl() and IPL_HIGH.
+ * IPL should be set to IPL_HIGH because the act of changing the thread's state
+ * and enqueuing the thread on the queue should not be interrupted
+ * (as sched_wakeup_on) could be called from an interrupt context.
+ *
+ * Do not enqueue the thread directly, let sched_switch handle this (pass both
+ * q and lock to sched_switch()).
+ */
+void sched_sleep_on(ktqueue_t *q)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Wakes up a thread on the given queue by taking it off the queue and
+ * making it runnable. If given an empty queue, do nothing.
+ *
+ * Hints:
+ * Make sure to set *ktp (if it is provided--i.e. ktp is not NULL) to the
+ * dequeued thread before making it runnable. This allows the caller to get a
+ * handle to the thread that was woken up (useful, for instance, when
+ * implementing unlock() on a mutex: the mutex can wake up a sleeping thread
+ * and make it the new owner).
+ */
+void sched_wakeup_on(ktqueue_t *q, kthread_t **ktp)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*
+ * Wake up all the threads on the given queue by making them all runnable.
+ */
+void sched_broadcast_on(ktqueue_t *q)
+{
+ NOT_YET_IMPLEMENTED("PROCS: ***none***");
+}
+
+/*===============
+ * Functions: SMP
+ *==============*/
+
+/*
+ * A sad, but functional, attempt at load balancing when a core is idle
+ */
+#define LOAD_BALANCING_IDLE_THRESHOLD 4096
+static inline kthread_t *load_balance()
+{
+ return NULL;
+}
+
+/*
+ * The meat of our SMP-system.
+ *
+ * You will want to (in this exact order):
+ * 1) perform the operations on curcore.kc_queue and curcore.kc_lock
+ * 2) set curproc to idleproc, and curthr to NULL
+ * 3) try to get the next thread to run
+ * a) try to use your oqn runq (kt_runq), which is core-specific data
+ * b) if, using core_uptime(), at least LOAD_BALANCING_IDLE_THRESHOLD have
+ * passed, then call load_balance() to try to get the next thread to run c) if
+ * neither (a) nor (b) work, the core is idle. Wait for an interrupt using
+ * intr_wait(). Note that you will need to re-disable interrupts after returning
+ * from intr_wait(). 4) ensure the context's PML4 for the selected thread is
+ * correctly setup with curcore's core-specific data. Use kt_recent_core and
+ * map_in_core_specific_data. 5) set curthr and curproc 6) context_switch out
+ */
+void core_switch()
+{
+ while (1)
+ {
+ KASSERT(!intr_enabled());
+ KASSERT(!curthr || curthr->kt_state != KT_ON_CPU);
+
+ if (curcore.kc_queue)
+ {
+ ktqueue_enqueue(curcore.kc_queue, curthr);
+ }
+
+ curproc = &idleproc;
+ curthr = NULL;
+
+ kthread_t *next_thread = NULL;
+
+ size_t idle_start = core_uptime();
+ while (1)
+ {
+ next_thread = ktqueue_dequeue(&kt_runq);
+
+ if (!next_thread &&
+ core_uptime() - idle_start >= LOAD_BALANCING_IDLE_THRESHOLD)
+ next_thread = load_balance();
+
+ if (next_thread)
+ break;
+
+ intr_wait();
+ intr_disable();
+ }
+
+ KASSERT(next_thread->kt_state == KT_RUNNABLE);
+ KASSERT(next_thread->kt_proc);
+
+ if (curcore.kc_id != next_thread->kt_recent_core)
+ {
+ map_in_core_specific_data(next_thread->kt_ctx.c_pml4);
+ next_thread->kt_recent_core = curcore.kc_id;
+ }
+
+ uintptr_t mapped_paddr = pt_virt_to_phys_helper(
+ next_thread->kt_ctx.c_pml4, (uintptr_t)&next_thread);
+ uintptr_t expected_paddr =
+ pt_virt_to_phys_helper(pt_get(), (uintptr_t)&next_thread);
+ KASSERT(mapped_paddr == expected_paddr);
+
+ curthr = next_thread;
+ curthr->kt_state = KT_ON_CPU;
+ curproc = curthr->kt_proc;
+ context_switch(&curcore.kc_ctx, &curthr->kt_ctx);
+ }
+} \ No newline at end of file
diff --git a/kernel/proc/spinlock.c b/kernel/proc/spinlock.c
new file mode 100644
index 0000000..bf89b8e
--- /dev/null
+++ b/kernel/proc/spinlock.c
@@ -0,0 +1,21 @@
+#include "globals.h"
+#include "main/apic.h"
+
+void spinlock_init(spinlock_t *lock) { lock->s_locked = 0; }
+
+inline void spinlock_lock(spinlock_t *lock)
+{
+// __sync_bool_compare_and_swap is a GCC intrinsic for atomic compare-and-swap
+// If lock->locked is 0, then it is set to 1 and __sync_bool_compare_and_swap
+// returns true Otherwise, lock->locked is left at 1 and
+// __sync_bool_compare_and_swap returns false
+}
+
+inline void spinlock_unlock(spinlock_t *lock)
+{
+}
+
+inline long spinlock_ownslock(spinlock_t *lock)
+{
+ return 1;
+}