aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Foiani <mfoiani@cs.brown.edu>2024-05-14 17:16:42 -0400
committerMichael Foiani <mfoiani@cs.brown.edu>2024-05-14 17:16:42 -0400
commit53b54f664ed2b4630c23cacc9e216a6a5935b57f (patch)
treef0138f1ed2f8894efa560e0e9721e510883f439b
parentb90313ddfa4c03f688c6c1cd5ded34aff1bf39c5 (diff)
fixes to work on dept machine
-rw-r--r--kernel/api/access.c46
-rw-r--r--kernel/api/syscall.c14
-rw-r--r--kernel/proc/fork.c2
-rw-r--r--kernel/proc/kthread.c28
-rw-r--r--kernel/util/debug.c3
-rw-r--r--kernel/vm/pagefault.c81
-rw-r--r--kernel/vm/shadow.c111
-rw-r--r--kernel/vm/vmmap.c4
8 files changed, 140 insertions, 149 deletions
diff --git a/kernel/api/access.c b/kernel/api/access.c
index 82f03ed..944277c 100644
--- a/kernel/api/access.c
+++ b/kernel/api/access.c
@@ -114,40 +114,21 @@ long user_vecdup(argvec_t *uvec, char ***kvecp)
*
* Check against the vmarea's protections on the mapping.
*/
-// long addr_perm(proc_t *p, const void *vaddr, int perm)
-// {
-// // NOT_YET_IMPLEMENTED("vm:: addr_perm");
-
-// // loop through the vmareas in the process's vmmap
-// vmarea_t *vma = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));
-// return vma && !!(perm & vma->vma_prot);
-// }
long addr_perm(proc_t *p, const void *vaddr, int perm)
{
- // NOT_YET_IMPLEMENTED("VM: addr_perm");
- // return 0;
- vmarea_t *vma = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));
- if (vma == NULL)
- {
- return 0;
- }
-
- if ((perm & PROT_READ) && !(vma->vma_prot & PROT_READ))
- {
- return 0;
- }
+ // NOT_YET_IMPLEMENTED("vm:: addr_perm");
- if ((perm & PROT_WRITE) && !(vma->vma_prot & PROT_WRITE))
- {
- return 0;
- }
+ // loop through the vmareas in the process's vmmap
+ // vmarea_t *vma = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));
+ // return vma && !!(perm & vma->vma_prot);
- if ((perm & PROT_EXEC) && !(vma->vma_prot & PROT_EXEC))
+ vmarea_t *vma = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));
+ if (vma == NULL)
{
return 0;
}
- return 1;
+ return (vma->vma_prot & perm) == perm;
}
@@ -162,17 +143,16 @@ long addr_perm(proc_t *p, const void *vaddr, int perm)
*/
long range_perm(proc_t *p, const void *vaddr, size_t len, int perm)
{
-
- for (
- size_t vfn = ADDR_TO_PN(vaddr);
- vfn < ADDR_TO_PN(PAGE_ALIGN_UP((uintptr_t)vaddr + len));
- vfn++
- )
+ // NOT_YET_IMPLEMENTED("vm:: range_perm");
+ size_t start = ADDR_TO_PN(vaddr);
+ size_t end = ADDR_TO_PN(PAGE_ALIGN_UP(vaddr + len));
+ while (start < end)
{
- if (!addr_perm(p, PN_TO_ADDR(vfn), perm))
+ if (addr_perm(p, PN_TO_ADDR(start), perm) == 0)
{
return 0;
}
+ start++;
}
return 1;
}
diff --git a/kernel/api/syscall.c b/kernel/api/syscall.c
index 7c76e51..c5fea6d 100644
--- a/kernel/api/syscall.c
+++ b/kernel/api/syscall.c
@@ -77,10 +77,11 @@ static long sys_read(read_args_t *args)
ERROR_OUT_RET(ret);
// Allocate a temporary buffer (a page-aligned block of n pages that are enough space to store the number of bytes to read)
- size_t size_in_pages = 0;
- while(++size_in_pages * PAGE_SIZE < kargs.nbytes)
- ;
- void *addr = page_alloc_n(size_in_pages);
+ // size_t size_in_pages = 0;
+ // while(++size_in_pages * PAGE_SIZE < kargs.nbytes)
+ // ;
+ size_t size_in_pages = ADDR_TO_PN(PAGE_ALIGN_UP(kargs.nbytes));
+ char *addr = (char *)page_alloc_n(size_in_pages);
if (!addr)
{
ret = -ENOMEM;
@@ -134,6 +135,11 @@ static long sys_write(write_args_t *args)
long ret = copy_from_user(&kargs, args, sizeof(kargs));
ERROR_OUT_RET(ret);
+ if (kargs.nbytes == 0)
+ {
+ return 0;
+ }
+
// Allocate a temporary buffer (a page-aligned block of n pages that are enough space to store the number of bytes to write)
size_t size_in_pages = 0;
while(++size_in_pages * PAGE_SIZE < kargs.nbytes)
diff --git a/kernel/proc/fork.c b/kernel/proc/fork.c
index b501b1e..a6436ba 100644
--- a/kernel/proc/fork.c
+++ b/kernel/proc/fork.c
@@ -75,7 +75,7 @@ long do_fork(struct regs *regs)
// Fix the values of the registers and the rest of the kthread's ctx
regs->r_rax = 0; // Set the return value to 0 for the child
- child_thread->kt_ctx.c_rsp = fork_setup_stack(regs, child_thread->kt_ctx.c_kstack); // Set the stack pointer for the child
+ child_thread->kt_ctx.c_rsp = fork_setup_stack(regs, (void *)child_thread->kt_ctx.c_kstack); // Set the stack pointer for the child
child_thread->kt_ctx.c_rip = (uintptr_t) userland_entry; // Set the instruction pointer to userland_entry
// child_thread->kt_ctx.c_rbp = curthr->kt_ctx.c_rbp; // Set the current thread's base pointer to the child's base pointer
child_thread->kt_ctx.c_pml4 = child_proc->p_pml4; // Set the current thread's page table to the child's page table
diff --git a/kernel/proc/kthread.c b/kernel/proc/kthread.c
index 722d932..b837721 100644
--- a/kernel/proc/kthread.c
+++ b/kernel/proc/kthread.c
@@ -128,30 +128,37 @@ kthread_t *kthread_clone(kthread_t *thr)
{
return NULL;
}
- new_thread->kt_state = KT_NO_STATE;
// copy the stack
- new_thread->kt_ctx.c_kstack = alloc_stack();
- if (new_thread->kt_kstack == NULL)
+ char * stk = alloc_stack();
+ if (stk == NULL)
{
slab_obj_free(kthread_allocator, new_thread);
return NULL;
}
- new_thread->kt_kstack = new_thread->kt_ctx.c_kstack;
+ // if not null, set the stack
+ new_thread->kt_kstack = (char *)stk;
new_thread->kt_ctx.c_kstacksz = DEFAULT_STACK_SIZE;
-
+ new_thread->kt_ctx.c_kstack = (uintptr_t)stk;
// set the retval, errno, cancelled
new_thread->kt_retval = thr->kt_retval;
new_thread->kt_errno = thr->kt_errno;
new_thread->kt_cancelled = thr->kt_cancelled;
- new_thread->kt_preemption_count = 0;
new_thread->kt_recent_core = ~0UL;
- new_thread->kt_wchan = NULL;
+
+ // null fields
+ new_thread->kt_wchan = thr->kt_wchan;
+ new_thread->kt_proc = NULL;
+ new_thread->kt_ctx.c_rbp = 0;
+ new_thread->kt_ctx.c_rsp = 0;
+ new_thread->kt_ctx.c_rip = 0;
+ new_thread->kt_ctx.c_pml4 = NULL;
+ new_thread->kt_preemption_count = 0;
// freshly initialize the rest of the fields
- list_init(&new_thread->kt_mutexes);
list_link_init(&new_thread->kt_plink);
list_link_init(&new_thread->kt_qlink);
+ list_init(&new_thread->kt_mutexes);
return new_thread;
}
@@ -194,10 +201,9 @@ void kthread_cancel(kthread_t *thr, void *retval)
KASSERT(thr != curthr);
// ask about the use of check_curthr_cancelled() in syscall_handler()
- int status = (int) retval;
dbg(DBG_THR, "Cancelling thread with proc name=%s, id=%d, status=%d\n",
- thr->kt_proc->p_name, thr->kt_proc->p_pid, status);
- thr->kt_retval = retval;
+ thr->kt_proc->p_name, thr->kt_proc->p_pid, (int) retval);
+ thr->kt_retval = (void *)retval;
sched_cancel(thr);
}
diff --git a/kernel/util/debug.c b/kernel/util/debug.c
index e2a589d..849b60a 100644
--- a/kernel/util/debug.c
+++ b/kernel/util/debug.c
@@ -19,8 +19,7 @@
* always be the first thing in this variable. Note that this setting can be
* changed at runtime by modifying the dbg_modes global variable.
*/
-#define INIT_DBG_MODES "-all,exec,elf,vm,syscall,print"
-
+#define INIT_DBG_MODES "-all"
/* Below is a truly terrible poll-driven serial driver that we use for debugging
* purposes - it outputs to COM1, but
* this can be easily changed. It does not use interrupts, and cannot read input
diff --git a/kernel/vm/pagefault.c b/kernel/vm/pagefault.c
index b289537..39e5776 100644
--- a/kernel/vm/pagefault.c
+++ b/kernel/vm/pagefault.c
@@ -49,80 +49,87 @@ void handle_pagefault(uintptr_t vaddr, uintptr_t cause)
{
dbg(DBG_VM, "vaddr = 0x%p (0x%p), cause = %lu\n", (void *)vaddr,
PAGE_ALIGN_DOWN(vaddr), cause);
+
// NOT_YET_IMPLEMENTED("VM: handle_pagefault");
- // 1) Find the vmarea that contains vaddr, if it exists.
- // check that the vaddr is valid
- if (vaddr < USER_MEM_LOW || vaddr > USER_MEM_HIGH)
+ // Check that the vaddr is valid
+ if (vaddr < USER_MEM_LOW)
{
do_exit(EFAULT);
}
- // lookup the vmarea for this addr
- vmarea_t *vma = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr));
- if (vma == NULL)
+ if (vaddr > USER_MEM_HIGH)
{
do_exit(EFAULT);
}
- // 2) Check the vmarea's protections (see the vmarea_t struct) against the 'cause'
- // error out if the fault has cause write and we don't have write permission in the area
- if ((cause & FAULT_WRITE) && !(vma->vma_prot & PROT_WRITE))
+ // Lookup the vmarea for this address
+ size_t pn = ADDR_TO_PN(vaddr);
+ vmarea_t* vma = vmmap_lookup(curproc->p_vmmap, pn);
+ if (!vma)
{
do_exit(EFAULT);
}
- // error out if the fault has cause exec and we don't have exec permission in the area
- if ((cause & FAULT_EXEC) && !(vma->vma_prot & PROT_EXEC))
+
+ // Error out if we don't have any permission in the area
+ if (vma->vma_prot == PROT_NONE)
{
do_exit(EFAULT);
}
- // error out if we don't have read permission in the area
- if (!(vma->vma_prot & PROT_READ))
+
+ // Check the vmarea's protections (see the vmarea_t struct) against the 'cause' of the pagefault
+ if ((cause & FAULT_WRITE) && !(vma->vma_prot & PROT_WRITE))
{
do_exit(EFAULT);
- }
- // error our if we don't have any permission in the area
- if (vma->vma_prot == PROT_NONE)
+ }
+ else if ((cause & FAULT_EXEC) && !(vma->vma_prot & PROT_EXEC))
+ {
+ do_exit(EFAULT);
+ }
+ else if (!(vma->vma_prot & PROT_READ))
{
do_exit(EFAULT);
}
- // 3) Obtain the corresponding pframe from the vmarea's mobj.
- pframe_t *pf;
+ // Obtain the corresponding pframe from the vmarea's mobj
+ long forwrite = 0;
+ if (cause & FAULT_WRITE)
+ {
+ forwrite = 1;
+ }
+ pframe_t* pfp;
mobj_lock(vma->vma_obj);
- int ret = mobj_get_pframe(
+ long status = mobj_get_pframe(
vma->vma_obj,
- vma->vma_off + ADDR_TO_PN(vaddr) - vma->vma_start,
- cause & FAULT_WRITE ? 1 : 0,
- &pf
+ pn - vma->vma_start + vma->vma_off,
+ forwrite,
+ &pfp
);
mobj_unlock(vma->vma_obj);
- if (ret < 0)
+ if (status < 0)
{
do_exit(EFAULT);
}
- // 4) Finally, set up a call to pt_map to insert a new mapping into the appropriate pagetable
- uintptr_t paddr = pt_virt_to_phys(pf->pf_addr);
- pframe_release(&pf);
- int pdflags = PT_PRESENT | PT_WRITE | PT_USER;
- int ptflags = PT_PRESENT | PT_USER;
- if (cause & FAULT_WRITE)
+ // Set up a call to pt_map to insert a new mapping into the appropriate pagetable
+ uintptr_t paddr = pt_virt_to_phys((uintptr_t) pfp->pf_addr);
+ pframe_release(&pfp);
+ uint32_t ptflags = PT_PRESENT | PT_USER;
+ if (cause & FAULT_WRITE)
{
ptflags |= PT_WRITE;
}
-
- int err = pt_map(
+ status = pt_map(
curproc->p_pml4,
- paddr,
- (uintptr_t) PAGE_ALIGN_DOWN(vaddr),
- pdflags,
+ paddr,
+ (uintptr_t) PAGE_ALIGN_DOWN(vaddr),
+ PT_PRESENT | PT_USER | PT_WRITE,
ptflags
);
- if (err < 0)
+ if (status < 0)
{
do_exit(EFAULT);
}
- // 5) Flush the TLB
- tlb_flush((uintptr_t) PAGE_ALIGN_DOWN(vaddr));
+ // Flush the TLB
+ tlb_flush(vaddr);
}
diff --git a/kernel/vm/shadow.c b/kernel/vm/shadow.c
index 06cf20d..4883160 100644
--- a/kernel/vm/shadow.c
+++ b/kernel/vm/shadow.c
@@ -43,6 +43,7 @@ void shadow_init()
{
// NOT_YET_IMPLEMENTED("VM: shadow_init");
shadow_allocator = slab_allocator_create("shadow", sizeof(mobj_shadow_t));
+ KASSERT(shadow_allocator);
}
/*
@@ -55,7 +56,7 @@ void shadow_init()
* 2) Set up the bottom object of the shadow chain, which could have two cases:
* a) Either shadowed is a shadow object, and you can use its bottom_mobj
* b) Or shadowed is not a shadow object, in which case it is the bottom
- * object of this chain.
+ * object of this chain.shadow_create
*
* Make sure to manage the refcounts correctly.
*/
@@ -63,6 +64,11 @@ mobj_t *shadow_create(mobj_t *shadowed)
{
// NOT_YET_IMPLEMENTED("VM: shadow_create");
+ if (!shadowed)
+ {
+ return NULL;
+ }
+
// create a new shadow object
mobj_shadow_t *so = (mobj_shadow_t *)slab_obj_alloc(shadow_allocator);
if (!so)
@@ -142,46 +148,41 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
pframe_t **pfp)
{
// NOT_YET_IMPLEMENTED("VM: shadow_get_pframe");
-
- // if forwrite is set, use mobj_default_get_pframe
+ // return 0;
if (forwrite)
{
return mobj_default_get_pframe(o, pagenum, forwrite, pfp);
}
-
- // else, check if the object already contains the desired frame
+ // check if o already contains the desired frame.
pframe_t *pf = NULL;
mobj_find_pframe(o, pagenum, &pf);
if (pf)
{
- // if it does, return the pframe
*pfp = pf;
return 0;
}
- // iterate through the shadow chain to find the nearest shadow mobj that has the frame
- mobj_shadow_t *so = MOBJ_TO_SO(o);
- mobj_t *iter = so->shadowed;
- while (iter && iter->mo_type == MOBJ_SHADOW)
+ mobj_shadow_t *shadow = MOBJ_TO_SO(o);
+ mobj_t *current = shadow->shadowed;
+
+ while (current && current->mo_type == MOBJ_SHADOW)
{
- mobj_lock(iter);
- mobj_find_pframe(iter, pagenum, &pf);
- mobj_unlock(iter);
+ mobj_lock(current);
+ mobj_find_pframe(current, pagenum, &pf);
+ mobj_unlock(current);
+
if (pf)
{
*pfp = pf;
return 0;
}
- // update the iterator
- so = MOBJ_TO_SO(iter);
- iter = so->shadowed;
- }
- // if no shadow objects have the page, call mobj_get_pframe() to get the page from the bottom object
- // at this point, iter is the bottom object
- mobj_lock(so->bottom_mobj);
- long ret = mobj_get_pframe(so->bottom_mobj, pagenum, forwrite, pfp);
- mobj_unlock(so->bottom_mobj);
+ shadow = MOBJ_TO_SO(current);
+ current = shadow->shadowed;
+ }
+ mobj_lock(shadow->bottom_mobj);
+ long ret = mobj_get_pframe(shadow->bottom_mobj, pagenum, forwrite, pfp);
+ mobj_unlock(shadow->bottom_mobj);
return ret;
}
@@ -208,48 +209,39 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
*/
static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
{
- // NOT_YET_IMPLEMENTEDshadow_fill_pframe");
+ // NOT_YET_IMPLEMENTED("VM: shadow_fill_pframe");
+ // return -1;
+ pframe_t *pf_shadow = NULL;
+ long ret = 0;
+
- // get the mobj_shadow_t
mobj_shadow_t *so = MOBJ_TO_SO(o);
- // iterate over the shadow chain
- mobj_t *iter = so->shadowed;
- while (iter && iter->mo_type == MOBJ_SHADOW)
+ mobj_t *shadowed = so->shadowed;
+ while (shadowed && shadowed->mo_type == MOBJ_SHADOW)
{
- // get the pframe from the shadow object
- pframe_t *spf = NULL;
- mobj_lock(iter);
- mobj_find_pframe(iter, pf->pf_pagenum, &spf);
- mobj_unlock(iter);
+ mobj_lock(shadowed);
+ mobj_find_pframe(shadowed, pf->pf_pagenum, &pf_shadow);
+ mobj_unlock(shadowed);
- // if the pframe is found, copy the contents into pf
- // then release the pframe
- if (spf)
+ if (pf_shadow)
{
- memcpy(pf->pf_addr, spf->pf_addr, PAGE_SIZE);
- pframe_release(&spf);
+ memcpy(pf->pf_addr, pf_shadow->pf_addr, PAGE_SIZE);
+ pframe_release(&pf_shadow);
return 0;
}
- // update the iterator
- so = MOBJ_TO_SO(iter);
- iter = so->shadowed;
+ so = MOBJ_TO_SO(shadowed);
+ shadowed = so->shadowed;
}
-
- // if none of the shadow objects have a copy of the frame, use mobj_get_pframe on the bottom object
- pframe_t *spf = NULL;
mobj_lock(so->bottom_mobj);
- long ret = mobj_get_pframe(so->bottom_mobj, pf->pf_pagenum, 0, &spf);
+ ret = mobj_get_pframe(so->bottom_mobj, pf->pf_pagenum, 0, &pf_shadow);
mobj_unlock(so->bottom_mobj);
- // check if the operation was sucessful, memcpy the contents into pf
- // and release the pframe
- if (ret >= 0)
- {
- memcpy(pf->pf_addr, spf->pf_addr, PAGE_SIZE);
- pframe_release(&spf);
+ if (ret < 0) {
+ return ret;
}
-
- return ret;
+ memcpy(pf->pf_addr, pf_shadow->pf_addr, PAGE_SIZE);
+ pframe_release(&pf_shadow);
+ return 0;
}
/*
@@ -264,6 +256,7 @@ static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
static long shadow_flush_pframe(mobj_t *o, pframe_t *pf)
{
// NOT_YET_IMPLEMENTED("VM: shadow_flush_pframe");
+ // return -1;
return 0;
}
@@ -280,17 +273,17 @@ static long shadow_flush_pframe(mobj_t *o, pframe_t *pf)
static void shadow_destructor(mobj_t *o)
{
// NOT_YET_IMPLEMENTED("VM: shadow_destructor");
+ mobj_default_destructor(o);
- // get the mobj_shadow_t
mobj_shadow_t *so = MOBJ_TO_SO(o);
+ // dbg(DBG_PROC, "shadow_destructor: refcount bottom: %d\n", so->bottom_mobj->mo_refcount);
+ // dbg(DBG_PROC, "shadow_destructor: refcount: %d\n", so->shadowed->mo_refcount);
- // call the default destructor
- mobj_default_destructor(o);
-
- // put the shadow and bottom_mobj
+ // put the shadow and bottom_mobj members of the shadow object
mobj_put(&so->shadowed);
mobj_put(&so->bottom_mobj);
- // free the slab
slab_obj_free(shadow_allocator, so);
-} \ No newline at end of file
+
+ return;
+}
diff --git a/kernel/vm/vmmap.c b/kernel/vm/vmmap.c
index 8c1a455..5f8e575 100644
--- a/kernel/vm/vmmap.c
+++ b/kernel/vm/vmmap.c
@@ -169,7 +169,7 @@ void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
*/
ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
{
- // NOT_YET_IMPLEMENTED("VM: vmmap_find_range");
+ // : vmmap_find_range");
// case 1: dir is VMMAP_DIR_LOHI
if (dir == VMMAP_DIR_LOHI)
@@ -871,4 +871,4 @@ end:
buf[osize - 1] = '\0';
}
return osize - size;
-}
+} \ No newline at end of file