aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsotech117 <michael_foiani@brown.edu>2024-05-13 09:27:24 +0000
committersotech117 <michael_foiani@brown.edu>2024-05-13 09:27:24 +0000
commitf09878f6327426631d9419d825a4e8396e3b9dc4 (patch)
tree009d1f1b1386baf6d07b3b7d9a436590ada14094
parent0e2acbe54e5800621692c2f6e9e9590aa369e165 (diff)
weenix
-rw-r--r--Config.mk6
-rw-r--r--kernel/api/access.c32
-rw-r--r--kernel/api/syscall.c126
-rw-r--r--kernel/drivers/memdevs.c15
-rw-r--r--kernel/fs/s5fs/s5fs.c6
-rw-r--r--kernel/fs/vnode_specials.c39
-rw-r--r--kernel/main/kmain.c7
-rw-r--r--kernel/proc/fork.c75
-rw-r--r--kernel/proc/kthread.c40
-rw-r--r--kernel/proc/proc.c33
-rw-r--r--kernel/util/debug.c2
-rw-r--r--kernel/vm/anon.c33
-rw-r--r--kernel/vm/brk.c101
-rw-r--r--kernel/vm/mmap.c165
-rw-r--r--kernel/vm/pagefault.c75
-rw-r--r--kernel/vm/shadow.c188
-rw-r--r--kernel/vm/vmmap.c556
17 files changed, 1410 insertions, 89 deletions
diff --git a/Config.mk b/Config.mk
index c17e704..2749f85 100644
--- a/Config.mk
+++ b/Config.mk
@@ -13,8 +13,8 @@
DRIVERS=1
VFS=1
S5FS=1
- VM=0
- DYNAMIC=0
+ VM=1
+ DYNAMIC=1
# When you finish S5FS, first enable "VM"; once this is working, then enable
# "DYNAMIC".
@@ -63,7 +63,7 @@
# Parameters for the hard disk we build (must be compatible!)
# If the FS is too big for the disk, BAD things happen!
- DISK_BLOCKS=2048 # For fsmaker
+ DISK_BLOCKS=3072 # For fsmaker
DISK_INODES=240 # For fsmaker
# Boolean options specified in this specified in this file that should be
diff --git a/kernel/api/access.c b/kernel/api/access.c
index 0e11b73..9a7bed0 100644
--- a/kernel/api/access.c
+++ b/kernel/api/access.c
@@ -116,8 +116,18 @@ long user_vecdup(argvec_t *uvec, char ***kvecp)
*/
long addr_perm(proc_t *p, const void *vaddr, int perm)
{
- NOT_YET_IMPLEMENTED("VM: addr_perm");
- return 0;
+ // NOT_YET_IMPLEMENTED("VM: addr_perm");
+
+ // loop through the vmareas in the process's vmmap
+ vmarea_t *vma = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));
+
+ // if the vma doesn't exist, return 0
+ if (!vma)
+ {
+ return 0;
+ }
+
+ return !!(perm & vma->vma_prot);
}
/*
@@ -131,6 +141,20 @@ long addr_perm(proc_t *p, const void *vaddr, int perm)
*/
long range_perm(proc_t *p, const void *vaddr, size_t len, int perm)
{
- NOT_YET_IMPLEMENTED("VM: range_perm");
- return 0;
+ // NOT_YET_IMPLEMENTED("VM: range_perm");
+
+ // loop through the page numbers in the range
+ size_t vfn = ADDR_TO_PN(vaddr);
+ size_t end_vfn = ADDR_TO_PN(vaddr + len);
+ for (size_t i = vfn; i < end_vfn; i++)
+ {
+ // check the permissions for each page
+ if (!addr_perm(p, PN_TO_ADDR(i), perm))
+ {
+ return 0;
+ }
+ }
+
+ // return 1 if all pages have the correct permissions
+ return 1;
}
diff --git a/kernel/api/syscall.c b/kernel/api/syscall.c
index ed771ac..e077631 100644
--- a/kernel/api/syscall.c
+++ b/kernel/api/syscall.c
@@ -69,8 +69,48 @@ void syscall_init(void) { intr_register(INTR_SYSCALL, syscall_handler); }
*/
static long sys_read(read_args_t *args)
{
- NOT_YET_IMPLEMENTED("VM: sys_read");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: sys_read");
+
+ // Initialize a read_args_t struct locally in kernel space and copy from userland args.
+ read_args_t kargs;
+ long ret = copy_from_user(&kargs, args, sizeof(kargs));
+ ERROR_OUT_RET(ret);
+
+ // Allocate a temporary buffer (a page-aligned block of n pages that are enough space to store the number of bytes to read)
+ size_t size_in_pages = (kargs.nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ void *addr = (void *)page_alloc_n(size_in_pages);
+ if (!addr)
+ {
+ ERROR_OUT_RET(-ENOMEM);
+ }
+
+ // Call do_read() with the buffer and then copy the buffer to the userland args after the system call
+ ret = do_read(kargs.fd, addr, kargs.nbytes);
+ // if ret < 0, free the temporary buffer and return -1
+ if (ret < 0)
+ {
+ page_free_n(addr, size_in_pages);
+ ERROR_OUT_RET(ret);
+ }
+ // if read nothing, free the temporary buffer and return 0
+ if (ret == 0)
+ {
+ page_free_n(addr, size_in_pages);
+ return 0;
+ }
+
+ // copy the buffer to the userland args after the system call
+ ret = copy_to_user(kargs.buf, addr, ret);
+ // if ret < 0, free the temporary buffer and return -1
+ if (ret < 0)
+ {
+ page_free_n(addr, size_in_pages);
+ ERROR_OUT_RET(ret);
+ }
+
+ // Make sure to free the temporary buffer allocated
+ page_free_n(addr, size_in_pages);
+ return ret;
}
/*
@@ -84,8 +124,42 @@ static long sys_read(read_args_t *args)
*/
static long sys_write(write_args_t *args)
{
- NOT_YET_IMPLEMENTED("VM: sys_write");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: sys_write");
+
+ // Initialize a write_args_t struct locally in kernel space and copy from userland args.
+ write_args_t kargs;
+ long ret = copy_from_user(&kargs, args, sizeof(kargs));
+ ERROR_OUT_RET(ret);
+
+ // Allocate a temporary buffer (a page-aligned block of n pages that are enough space to store the number of bytes to write)
+ size_t size_in_pages = (kargs.nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ void *addr = (void *)page_alloc_n(size_in_pages);
+ if (!addr)
+ {
+ ERROR_OUT_RET(-ENOMEM);
+ }
+
+ // Copy the buffer from the userland args to the temporary buffer
+ ret = copy_from_user(addr, kargs.buf, kargs.nbytes);
+ // if ret < 0, free the temporary buffer and return -1
+ if (ret < 0)
+ {
+ page_free_n(addr, size_in_pages);
+ ERROR_OUT_RET(ret);
+ }
+
+ // Call do_write() with the buffer and then copy the buffer to the userland args after the system call
+ ret = do_write(kargs.fd, addr, kargs.nbytes);
+ // if ret < 0, free the temporary buffer and return -1
+ if (ret < 0)
+ {
+ page_free_n(addr, size_in_pages);
+ ERROR_OUT_RET(ret);
+ }
+
+ // Make sure to free the temporary buffer allocated
+ page_free_n(addr, size_in_pages);
+ return ret;
}
/*
@@ -100,8 +174,48 @@ static long sys_write(write_args_t *args)
*/
static long sys_getdents(getdents_args_t *args)
{
- NOT_YET_IMPLEMENTED("VM: sys_getdents");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: sys_getdents");
+
+ // Copy the arguments from user memory
+ getdents_args_t kargs;
+ long ret = copy_from_user(&kargs, args, sizeof(kargs));
+ ERROR_OUT_RET(ret);
+
+ // Check that the count field is at least the size of a dirent_t
+ if (kargs.count < sizeof(dirent_t))
+ {
+ ERROR_OUT_RET(-EINVAL);
+ }
+
+ size_t count_read = 0;
+
+ // iterate over the directory entries
+ while (count_read * sizeof(dirent_t) <= kargs.count)
+ {
+ // read count / sizeof(dirent_t) directory entries into the provided dirp and call do_getdent
+ dirent_t d;
+ ret = do_getdent(kargs.fd, &d);
+ ERROR_OUT_RET(ret); // error check
+
+ // if read nothing, break
+ if (ret == 0)
+ {
+ break;
+ }
+ // if you read a different size than dirent_t, break
+ if (ret != sizeof(dirent_t))
+ {
+ break;
+ }
+
+ // copy the dirent_t to the userland args after the system call
+ ret = copy_to_user(kargs.dirp + count_read, &d, sizeof(dirent_t));
+ ERROR_OUT_RET(ret); // error check
+
+ count_read++;
+ }
+
+ return count_read * sizeof(dirent_t);
}
#ifdef __MOUNTING__
diff --git a/kernel/drivers/memdevs.c b/kernel/drivers/memdevs.c
index 815143a..eeaaddc 100644
--- a/kernel/drivers/memdevs.c
+++ b/kernel/drivers/memdevs.c
@@ -149,6 +149,17 @@ static ssize_t zero_read(chardev_t *dev, size_t pos, void *buf, size_t count)
*/
static long zero_mmap(vnode_t *file, mobj_t **ret)
{
- NOT_YET_IMPLEMENTED("VM: zero_mmap");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: zero_mmap");
+
+ // create a new anonymous object
+ mobj_t *mobj = anon_create();
+ mobj_unlock(mobj); // unlock the object from the init
+ if (mobj == NULL)
+ {
+ return -ENOMEM;
+ }
+
+ // set the return value
+ *ret = mobj;
+ return 0;
}
diff --git a/kernel/fs/s5fs/s5fs.c b/kernel/fs/s5fs/s5fs.c
index 602c7aa..ba406e7 100644
--- a/kernel/fs/s5fs/s5fs.c
+++ b/kernel/fs/s5fs/s5fs.c
@@ -400,7 +400,11 @@ static ssize_t s5fs_write(vnode_t *vnode, size_t pos, const void *buf,
*/
static long s5fs_mmap(vnode_t *file, mobj_t **ret)
{
- NOT_YET_IMPLEMENTED("VM: s5fs_mmap");
+ // NOT_YET_IMPLEMENTED("VM: s5fs_mmap");
+
+ // Add a reference to the underlying mobj and return it
+ mobj_ref(&file->vn_mobj);
+ *ret = &file->vn_mobj;
return 0;
}
diff --git a/kernel/fs/vnode_specials.c b/kernel/fs/vnode_specials.c
index fb6df0b..d8c79bd 100644
--- a/kernel/fs/vnode_specials.c
+++ b/kernel/fs/vnode_specials.c
@@ -159,20 +159,47 @@ static long chardev_file_write(vnode_t *file, size_t pos, const void *buf,
*/
static long chardev_file_mmap(vnode_t *file, mobj_t **ret)
{
- NOT_YET_IMPLEMENTED("VM: chardev_file_mmap");
- return 0;
+ // NOT_YET_IMPLEMENTED("VM: chardev_file_mmap");
+
+ // check if the vnode represents a chardev
+ chardev_t *dev = file->vn_dev.chardev;
+ if (dev == NULL)
+ {
+ return -ENXIO;
+ }
+
+ // call the chardev's mmap operation
+ return dev->cd_ops->mmap(file, ret);
}
static long chardev_file_fill_pframe(vnode_t *file, pframe_t *pf)
{
- NOT_YET_IMPLEMENTED("VM: chardev_file_fill_pframe");
- return 0;
+ // NOT_YET_IMPLEMENTED("VM: chardev_file_fill_pframe");
+
+ // check if the vnode represents a chardev
+ chardev_t *dev = file->vn_dev.chardev;
+ if (dev == NULL)
+ {
+ return -ENXIO;
+ }
+
+ // call the chardev's fill_pframe operation
+ return dev->cd_ops->fill_pframe(file, pf);
}
static long chardev_file_flush_pframe(vnode_t *file, pframe_t *pf)
{
- NOT_YET_IMPLEMENTED("VM: chardev_file_flush_pframe");
- return 0;
+ // NOT_YET_IMPLEMENTED("VM: chardev_file_flush_pframe");
+
+ // check if the vnode represents a chardev
+ chardev_t *dev = file->vn_dev.chardev;
+ if (dev == NULL)
+ {
+ return -ENXIO;
+ }
+
+ // call the chardev's flush_pframe operation
+ return dev->cd_ops->flush_pframe(file, pf);
}
static ssize_t blockdev_file_read(vnode_t *file, size_t pos, void *buf,
diff --git a/kernel/main/kmain.c b/kernel/main/kmain.c
index 4c49b9a..6007acd 100644
--- a/kernel/main/kmain.c
+++ b/kernel/main/kmain.c
@@ -170,6 +170,10 @@ static void *initproc_run(long arg1, void *arg2)
// dbg(DBG_PROC, "%s", "In main thread!\n");
+ char *argv[1] = {NULL};
+ char *envp[1] = {NULL};
+ kernel_execve("/sbin/init", argv, envp);
+
#ifdef __DRIVERS__
// driverstest_main(0, NULL);
char name[32] = {0};
@@ -183,7 +187,8 @@ static void *initproc_run(long arg1, void *arg2)
#endif
// see if there are any children to wait for
- while (do_waitpid(-1, 0, 0) != -ECHILD)
+ int status = 0;
+ while (do_waitpid(-1, &status, 0) != -ECHILD)
{
// do nothing
}
diff --git a/kernel/proc/fork.c b/kernel/proc/fork.c
index 28f9f9c..cbf5e30 100644
--- a/kernel/proc/fork.c
+++ b/kernel/proc/fork.c
@@ -57,6 +57,77 @@ static uintptr_t fork_setup_stack(const regs_t *regs, void *kstack)
*/
long do_fork(struct regs *regs)
{
- NOT_YET_IMPLEMENTED("VM: do_fork");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: do_fork");
+
+ // Create a new process
+ proc_t *child_proc = proc_create("child from fork");
+ if (child_proc == NULL)
+ {
+ return -ENOMEM;
+ }
+ // Create a new thread
+ kthread_t *child_thread = kthread_clone(curthr);
+ if (child_thread == NULL)
+ {
+ proc_destroy(child_proc);
+ return -ENOMEM;
+ }
+
+ // Set the child process's parent to the current process
+ // child_thread->kt_proc = child_proc;
+ // list_insert_head(&child_proc->p_threads, &child_thread->kt_plink);
+
+ // Get the new vmmap_t for the child process
+ // vmmap_t *child_vmmap = vmmap_clone(curproc->p_vmmap);
+ // if (child_vmmap == NULL)
+ // {
+ // kthread_destroy(child_thread);
+ // proc_destroy(child_proc);
+ // return -ENOMEM;
+ // }
+
+ // Set the new vmmap_t for the child process
+ // child_proc->p_vmmap = child_vmmap;
+ // // Set the vmmap to the child process
+ // child_thread->kt_proc = child_proc;
+
+ // Set the working directory of the child process to the current process
+ // vref(curproc->p_cwd);
+ // child_proc->p_cwd = curproc->p_cwd;
+
+ // Copy over each file descriptor from the parent to the child
+ // for (int i = 0; i < NFILES; i++)
+ // {
+ // if (curproc->p_files[i] != NULL)
+ // {
+ // fref(curproc->p_files[i]);
+ // child_proc->p_files[i] = curproc->p_files[i];
+ // }
+ // }
+
+ // Fix the values of the registers and the rest of the kthread's ctx
+ regs->r_rax = 0; // Set the return value to 0 for the child
+ child_thread->kt_ctx.c_rsp = fork_setup_stack(regs, child_thread->kt_kstack); // Set the stack pointer for the child
+ child_thread->kt_ctx.c_rip = (uintptr_t) userland_entry; // Set the instruction pointer to userland_entry
+ // child_thread->kt_ctx.c_rbp = curthr->kt_ctx.c_rbp; // Set the current thread's base pointer to the child's base pointer
+ child_thread->kt_ctx.c_pml4 = curproc->p_pml4; // Set the current thread's page table to the current proc's
+ child_thread->kt_proc = child_proc; // Set the child process to the child thread
+
+ // Update the list
+ list_insert_tail(&child_proc->p_threads, &child_thread->kt_plink);
+
+
+ // Update the brks for the child process
+ // child_proc->p_brk = curproc->p_brk;
+ // child_proc->p_start_brk = curproc->p_start_brk;
+
+ // Unmap the parent's page table and flush the TLB
+ pt_unmap_range(curproc->p_pml4, USER_MEM_LOW, USER_MEM_HIGH);
+ tlb_flush_all();
+
+ // Prepare the child process to be run on the CPU
+ sched_make_runnable(child_thread);
+
+ // Return the child's process id to the parent
+ return child_proc->p_pid;
}
diff --git a/kernel/proc/kthread.c b/kernel/proc/kthread.c
index d066dac..b614ac3 100644
--- a/kernel/proc/kthread.c
+++ b/kernel/proc/kthread.c
@@ -121,8 +121,42 @@ kthread_t *kthread_create(proc_t *proc, kthread_func_t func, long arg1,
*/
kthread_t *kthread_clone(kthread_t *thr)
{
- NOT_YET_IMPLEMENTED("VM: kthread_clone");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: kthread_clone");
+
+ kthread_t *new_thread = slab_obj_alloc(kthread_allocator);
+ if (new_thread == NULL)
+ {
+ return NULL;
+ }
+ new_thread->kt_state = KT_NO_STATE;
+
+ // copy the stack
+ new_thread->kt_ctx.c_kstack = alloc_stack();
+ if (new_thread->kt_kstack == NULL)
+ {
+ slab_obj_free(kthread_allocator, new_thread);
+ return NULL;
+ }
+ new_thread->kt_kstack = new_thread->kt_ctx.c_kstack;
+ new_thread->kt_ctx.c_kstacksz = DEFAULT_STACK_SIZE;
+
+ // context_setup(&new_thread->kt_ctx, NULL, 0, NULL, new_thread->kt_kstack,
+ // DEFAULT_STACK_SIZE, thr->kt_proc->p_pml4); (done in fork, I hope)
+
+ // set the retval, errno, cancelled
+ new_thread->kt_retval = thr->kt_retval;
+ new_thread->kt_errno = thr->kt_errno;
+ new_thread->kt_cancelled = thr->kt_cancelled;
+ new_thread->kt_preemption_count = 0;
+ new_thread->kt_recent_core = ~0UL;
+ new_thread->kt_wchan = NULL;
+ // freshly initialize the rest of the fields
+ list_init(&new_thread->kt_mutexes);
+ list_link_init(&new_thread->kt_plink);
+ list_link_init(&new_thread->kt_qlink);
+ // list_insert_tail(&thr->kt_proc->p_threads, &new_thread->kt_plink); (done in fork)
+
+ return new_thread;
}
/*
@@ -162,7 +196,7 @@ void kthread_cancel(kthread_t *thr, void *retval)
// NOT_YET_IMPLEMENTED("PROCS: kthread_cancel");
KASSERT(thr != curthr);
- // FIXME: ask about the use of check_curthr_cancelled() in syscall_handler()
+ // ask about the use of check_curthr_cancelled() in syscall_handler()
int status = (int) retval;
dbg(DBG_THR, "Cancelling thread with proc name=%s, id=%d, status=%d\n",
thr->kt_proc->p_name, thr->kt_proc->p_pid, status);
diff --git a/kernel/proc/proc.c b/kernel/proc/proc.c
index eab6556..f7878a2 100644
--- a/kernel/proc/proc.c
+++ b/kernel/proc/proc.c
@@ -265,19 +265,21 @@ void proc_cleanup(long status)
// NOT_YET_IMPLEMENTED("PROCS: proc_cleanup");
dbg(DBG_PROC, "proc_cleanup called on proc with pid=%d with exit status=%d\n", curproc->p_pid, status);
-#ifdef __VFS__
+ curproc->p_state = PROC_DEAD;
+
+ // update state and status
+ // if (curthr->kt_cancelled) {
+ // curproc->p_status = curthr->kt_retval;
+ // } else {
+ // }
+ curproc->p_status = status;
+
for (int fd = 0; fd < NFILES; fd++)
{
- if (curproc->p_files[fd])
- {
- fput(curproc->p_files + fd);
- }
+ do_close(fd);
}
- if (curproc->p_cwd)
- {
- vput(&curproc->p_cwd);
- }
-#endif
+ vput(&curproc->p_cwd);
+ vmmap_destroy(&curproc->p_vmmap);
if (curproc->p_pid == PID_INIT)
{
@@ -297,16 +299,8 @@ void proc_cleanup(long status)
// remove & insert to init process
child->p_pproc = proc_initproc;
list_remove(&child->p_child_link);
- list_insert_tail(&curproc->p_pproc->p_children, &child->p_child_link);
+ list_insert_head(&proc_initproc->p_children, &child->p_child_link);
}
-
- // update state and status
- curproc->p_state = PROC_DEAD;
- // if (curthr->kt_cancelled) {
- // curproc->p_status = curthr->kt_retval;
- // } else {
- curproc->p_status = status;
- // }
}
/*
@@ -371,6 +365,7 @@ void proc_kill_all()
p->p_pproc->p_pid != PID_IDLE
)
{
+ // proc_kill(p, curproc->p_status);
proc_kill(p, -1);
}
}
diff --git a/kernel/util/debug.c b/kernel/util/debug.c
index a8f9732..fab63e7 100644
--- a/kernel/util/debug.c
+++ b/kernel/util/debug.c
@@ -19,7 +19,7 @@
* always be the first thing in this variable. Note that this setting can be
* changed at runtime by modifying the dbg_modes global variable.
*/
-#define INIT_DBG_MODES "-all,test,print,s5fs"
+#define INIT_DBG_MODES "-all,exec,elf"
/* Below is a truly terrible poll-driven serial driver that we use for debugging
* purposes - it outputs to COM1, but
diff --git a/kernel/vm/anon.c b/kernel/vm/anon.c
index 4a92fc9..a433395 100644
--- a/kernel/vm/anon.c
+++ b/kernel/vm/anon.c
@@ -27,7 +27,8 @@ static mobj_ops_t anon_mobj_ops = {.get_pframe = NULL,
*/
void anon_init()
{
- NOT_YET_IMPLEMENTED("VM: anon_init");
+ // NOT_YET_IMPLEMENTED("VM: anon_init");
+ anon_allocator = slab_allocator_create("anon", sizeof(mobj_t));
}
/*
@@ -36,8 +37,17 @@ void anon_init()
*/
mobj_t *anon_create()
{
- NOT_YET_IMPLEMENTED("VM: anon_create");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: anon_create");
+ // make a new mobj
+ mobj_t *mobj = (mobj_t *)slab_obj_alloc(anon_allocator);
+ // initialize the mobj
+ if (mobj)
+ {
+ mobj_init(mobj, MOBJ_ANON, &anon_mobj_ops);
+ mobj_lock(mobj);
+ return mobj;
+ }
+ panic("MINE anon_create: slab_obj_alloc failed");
}
/*
@@ -46,7 +56,15 @@ mobj_t *anon_create()
*/
static long anon_fill_pframe(mobj_t *o, pframe_t *pf)
{
- NOT_YET_IMPLEMENTED("VM: anon_fill_pframe");
+ // NOT_YET_IMPLEMENTED("VM: anon_fill_pframe");
+
+ // set the pframe's mobj to the given mobj
+ // pf->pf_addr = o;
+ // // set the pframe's flags to dirty
+ // pf->pf_dirty = 1;
+
+ memset(pf->pf_addr, 0, PAGE_SIZE);
+
return 0;
}
@@ -61,5 +79,10 @@ static long anon_flush_pframe(mobj_t *o, pframe_t *pf) { return 0; }
*/
static void anon_destructor(mobj_t *o)
{
- NOT_YET_IMPLEMENTED("VM: anon_destructor");
+ // NOT_YET_IMPLEMENTED("VM: anon_destructor");
+ // call the default destructor
+ mobj_default_destructor(o);
+
+ // free the mobj
+ slab_obj_free(anon_allocator, o);
}
diff --git a/kernel/vm/brk.c b/kernel/vm/brk.c
index 46d6fc2..69a315f 100644
--- a/kernel/vm/brk.c
+++ b/kernel/vm/brk.c
@@ -53,6 +53,105 @@
*/
long do_brk(void *addr, void **ret)
{
- NOT_YET_IMPLEMENTED("VM: do_brk");
+ // NOT_YET_IMPLEMENTED("VM: do_brk");
+
+ // If addr is NULL, return the current break
+ if (addr == NULL)
+ {
+ *ret = curproc->p_brk;
+ return 0;
+ }
+
+ // Check if the address is within the valid range
+ if ((uintptr_t)addr > USER_MEM_HIGH)
+ {
+ return -ENOMEM;
+ }
+
+ // Check if the address is within the valid range
+ if (addr < curproc->p_start_brk)
+ {
+ return -ENOMEM;
+ }
+
+ // Check if the address is the same as the current break
+ // if (addr == curproc->p_brk)
+ // {
+ // *ret = curproc->p_brk;
+ // return 0;
+ // }
+
+ // Check if the address is page aligned
+ uintptr_t addr_page_aligned = ADDR_TO_PN(PAGE_ALIGN_UP(addr));
+ uintptr_t p_brk_page_aligned = ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_brk));
+ uintptr_t p_start_brk_page_aligned = ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_start_brk));
+
+ // Lookup the vmarea that represents the heap
+ vmarea_t *heap_vmarea = vmmap_lookup(curproc->p_vmmap, p_start_brk_page_aligned);
+
+ // Check if the address is the same as the current break
+ // If so, set rets and end here
+ if (addr_page_aligned == p_brk_page_aligned)
+ {
+ curproc->p_brk = addr;
+ *ret = addr;
+ return 0;
+ }
+
+ // Check the three cases, whether the heap needs to be created, modified or shrinked
+ if (heap_vmarea == NULL)
+ {
+ // Create the heap
+ long ret = vmmap_is_range_empty(curproc->p_vmmap, p_start_brk_page_aligned, addr_page_aligned - p_brk_page_aligned);
+ if (!ret)
+ {
+ // On fail, return -ENOMEM
+ return -ENOMEM;
+ }
+
+ // Map the heap
+ int flags = MAP_PRIVATE | MAP_ANON;
+ int prot = PROT_READ | PROT_WRITE;
+ ret = vmmap_map(
+ curproc->p_vmmap, NULL,
+ p_start_brk_page_aligned,
+ addr_page_aligned - p_start_brk_page_aligned,
+ prot, flags,
+ 0,
+ VMMAP_DIR_LOHI, &heap_vmarea
+ );
+ if (ret < 0)
+ {
+ // On fail, return ret
+ return ret;
+ }
+ }
+ else if (addr_page_aligned < p_brk_page_aligned)
+ {
+ // Shrink the heap
+ long ret = vmmap_remove(curproc->p_vmmap, addr_page_aligned, p_brk_page_aligned - addr_page_aligned);
+ if (ret < 0)
+ {
+ // On fail, return ret
+ return ret;
+ }
+ }
+ else
+ {
+ // Modify the heap
+ long ret = vmmap_is_range_empty(curproc->p_vmmap, p_brk_page_aligned, addr_page_aligned - p_brk_page_aligned);
+ if (!ret)
+ {
+ // On fail, return -ENOMEM
+ return -ENOMEM;
+ }
+ // Update the heap
+ heap_vmarea->vma_end = addr_page_aligned;
+ }
+
+
+ // Update rets & return 0 on success
+ curproc->p_brk = addr;
+ *ret = addr;
return 0;
}
diff --git a/kernel/vm/mmap.c b/kernel/vm/mmap.c
index 082149b..78aa3b5 100644
--- a/kernel/vm/mmap.c
+++ b/kernel/vm/mmap.c
@@ -55,8 +55,134 @@
long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
void **ret)
{
- NOT_YET_IMPLEMENTED("VM: do_mmap");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: do_mmap");
+
+ // check if addr is page aligned when MAP_FIXED is specified
+ if (PAGE_ALIGNED(addr) == 0 && (flags & MAP_FIXED))
+ {
+ return -EINVAL;
+ }
+
+ // check if MAP_FIXED is specified and addr is out of range of the user address space
+ if ((flags & MAP_FIXED) && ((uintptr_t)addr < USER_MEM_LOW || (uintptr_t)addr + len > USER_MEM_HIGH))
+ {
+ return -EINVAL;
+ }
+
+ // check if len is not zero (len is an unsigned value, so it is always positive)
+ if (len == 0)
+ {
+ return -EINVAL;
+ }
+
+ // check if offset is positive and aligned
+ if (off < 0 || PAGE_ALIGNED(off) == 0)
+ {
+ return -EINVAL;
+ }
+
+ // check if flags do not contain MAP_PRIVATE or MAP_SHARED
+ if ((flags & MAP_PRIVATE) == 0 && (flags & MAP_SHARED) == 0)
+ {
+ return -EINVAL;
+ }
+
+ // check if fd is not a valid file descriptor and MAP_ANON was not set
+ if (fd < 0 && (flags & MAP_ANON) == 0)
+ {
+ return -EBADF;
+ }
+
+ // check if a file mapping was requested, but fd is not open for reading
+ // file error checking is done in if statement below
+ file_t *file = NULL;
+ if (fd >= 0 && (flags & MAP_ANON) == 0)
+ {
+ // get the file and check if it is valid
+ file = fget(fd);
+ if (file == NULL)
+ {
+ return -EBADF;
+ }
+
+ // ENODEV CHECKS
+
+ // check if the file's vnode's mmap operation doesn't exist
+ if (file->f_vnode->vn_ops == NULL || file->f_vnode->vn_ops->mmap == NULL)
+ {
+ fput(&file);
+ return -ENODEV;
+ }
+
+ // ACCESS CHECKS
+
+ // check if thef FMODE_READ flag is not set
+ if ((file->f_mode & FMODE_READ) == 0)
+ {
+ fput(&file);
+ return -EACCES;
+ }
+
+ // check if append mode is set and PROT_WRITE is set
+ if ((prot & PROT_WRITE) && (file->f_mode & FMODE_APPEND))
+ {
+ fput(&file);
+ return -EACCES;
+ }
+
+ // check if MAP_SHARED was requested and PROT_WRITE is set, but fd is not open in read/write (O_RDWR) mode
+ if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file->f_mode & FMODE_READ) == 0)
+ {
+ fput(&file);
+ return -EACCES;
+ }
+
+ // check if PROT_WRITE is set, but the file has FMODE_APPEND specified
+ if ((prot & PROT_WRITE) && (file->f_mode & FMODE_APPEND))
+ {
+ fput(&file);
+ return -EACCES;
+ }
+
+ fput(&file);
+ }
+
+
+ // Now that error checking is done, we can proceed with the mapping
+ vmarea_t *vma = NULL;
+ long err = vmmap_map(
+ curproc->p_vmmap,
+ file ? file->f_vnode : NULL,
+ ADDR_TO_PN(PAGE_ALIGN_DOWN(addr)),
+ ADDR_TO_PN(PAGE_ALIGN_UP((uintptr_t)addr + len)) - ADDR_TO_PN(PAGE_ALIGN_DOWN(addr)),
+ prot,
+ flags,
+ off,
+ VMMAP_DIR_HILO,
+ &vma
+ );
+
+ // check if vmmap_map() failed
+ if (err < 0)
+ {
+ return err;
+ }
+
+ // set ret if it was provided
+ void *start = PN_TO_ADDR(vma->vma_start);
+ if (ret)
+ {
+ *ret = start;
+ }
+
+ // flush the TLB
+ tlb_flush_range(
+ (uintptr_t) start,
+ PAGE_SIZE * (vma->vma_end - vma->vma_start)
+ );
+
+ // return 0 on success
+ return 0;
}
/*
@@ -78,6 +204,37 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
*/
long do_munmap(void *addr, size_t len)
{
- NOT_YET_IMPLEMENTED("VM: do_munmap");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: do_munmap");
+
+ // Check if addr is page aligned
+ if (PAGE_ALIGNED(addr) == 0)
+ {
+ return -EINVAL;
+ }
+
+ // Check if len is in bounds
+ if (len > USER_MEM_HIGH)
+ {
+ return -EINVAL;
+ }
+
+ // Check if the addr is out of range of the user address space
+ if ((uintptr_t)addr < USER_MEM_LOW || (uintptr_t)addr + len > USER_MEM_HIGH)
+ {
+ return -EINVAL;
+ }
+
+ // Check if len is 0
+ if (len == 0)
+ {
+ return -EINVAL;
+ }
+
+ // Remove the mapping
+ long ret = vmmap_remove(
+ curproc->p_vmmap,
+ ADDR_TO_PN(addr),
+ ADDR_TO_PN(PAGE_ALIGN_UP((uintptr_t)addr + len))
+ );
+ return ret;
} \ No newline at end of file
diff --git a/kernel/vm/pagefault.c b/kernel/vm/pagefault.c
index 764ce85..2e0c92d 100644
--- a/kernel/vm/pagefault.c
+++ b/kernel/vm/pagefault.c
@@ -49,5 +49,78 @@ void handle_pagefault(uintptr_t vaddr, uintptr_t cause)
{
dbg(DBG_VM, "vaddr = 0x%p (0x%p), cause = %lu\n", (void *)vaddr,
PAGE_ALIGN_DOWN(vaddr), cause);
- NOT_YET_IMPLEMENTED("VM: handle_pagefault");
+ // NOT_YET_IMPLEMENTED("VM: handle_pagefault");
+
+ // 1) Find the vmarea that contains vaddr, if it exists.
+ // check that the vaddr is valid
+ if (vaddr < USER_MEM_LOW || vaddr > USER_MEM_HIGH)
+ {
+ do_exit(EFAULT);
+ }
+ // lookup the vmarea for this addr
+ vmarea_t *vma = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr));
+ if (vma == NULL)
+ {
+ do_exit(EFAULT);
+ }
+
+ // 2) Check the vmarea's protections (see the vmarea_t struct) against the 'cause'
+ // error out if the fault has cause write and we don't have write permission in the area
+ if ((cause & FAULT_WRITE) && !(vma->vma_prot & PROT_WRITE))
+ {
+ do_exit(EFAULT);
+ }
+ // error out if the fault has cause exec and we don't have exec permission in the area
+ if ((cause & FAULT_EXEC) && !(vma->vma_prot & PROT_EXEC))
+ {
+ do_exit(EFAULT);
+ }
+ // error out if we don't have read permission in the area
+ if (!(vma->vma_prot & PROT_READ))
+ {
+ do_exit(EFAULT);
+ }
+ // error our if we don't have any permission in the area
+ if (vma->vma_prot == PROT_NONE)
+ {
+ do_exit(EFAULT);
+ }
+
+ // 3) Obtain the corresponding pframe from the vmarea's mobj.
+ pframe_t *pf;
+ mobj_lock(vma->vma_obj);
+ int ret = mobj_get_pframe(
+ vma->vma_obj,
+ vma->vma_off + (ADDR_TO_PN(vaddr) - vma->vma_start),
+ cause & FAULT_WRITE ? 1 : 0,
+ &pf
+ );
+ mobj_unlock(vma->vma_obj);
+ if (ret < 0)
+ {
+ do_exit(EFAULT);
+ }
+
+ // 4) Finally, set up a call to pt_map to insert a new mapping into the appropriate pagetable
+ int pdflags = PT_PRESENT | PT_WRITE | PT_USER;
+ int ptflags = PT_PRESENT | PT_USER;
+ if (cause & FAULT_WRITE)
+ {
+ ptflags |= PT_WRITE;
+ }
+
+ int err = pt_map(
+ curproc->p_pml4,
+ pt_virt_to_phys((uintptr_t) pf->pf_addr),
+ (uintptr_t) PAGE_ALIGN_DOWN(vaddr),
+ pdflags,
+ ptflags
+ );
+ if (err < 0)
+ {
+ do_exit(EFAULT);
+ }
+
+ // 5) Flush the TLB
+ tlb_flush((uintptr_t) PAGE_ALIGN_DOWN(vaddr));
}
diff --git a/kernel/vm/shadow.c b/kernel/vm/shadow.c
index 312b32e..91b1fce 100644
--- a/kernel/vm/shadow.c
+++ b/kernel/vm/shadow.c
@@ -41,7 +41,8 @@ static mobj_ops_t shadow_mobj_ops = {.get_pframe = shadow_get_pframe,
*/
void shadow_init()
{
- NOT_YET_IMPLEMENTED("VM: shadow_init");
+ // NOT_YET_IMPLEMENTED("VM: shadow_init");
+ shadow_allocator = slab_allocator_create("shadow", sizeof(mobj_shadow_t));
}
/*
@@ -60,8 +61,37 @@ void shadow_init()
*/
mobj_t *shadow_create(mobj_t *shadowed)
{
- NOT_YET_IMPLEMENTED("VM: shadow_create");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: shadow_create");
+
+ // create a new shadow object
+ mobj_shadow_t *so = (mobj_shadow_t *)slab_obj_alloc(shadow_allocator);
+ if (!so)
+ {
+ return NULL;
+ }
+
+ // initialize the mobj_shadow_t
+
+ // set the bottom_mobj based on the two cases
+ if (shadowed->mo_type == MOBJ_SHADOW)
+ {
+ so->bottom_mobj = MOBJ_TO_SO(so->shadowed)->bottom_mobj;
+ }
+ else
+ {
+ so->bottom_mobj = shadowed;
+ }
+ // init the other fields
+ so->shadowed = shadowed;
+ mobj_init(&so->mobj, MOBJ_SHADOW, &shadow_mobj_ops);
+ mobj_ref(so->shadowed);
+ mobj_ref(so->bottom_mobj);
+
+ // lock the shadow object
+ mobj_lock(&so->mobj);
+
+ // return the shadow object
+ return &so->mobj;
}
/*
@@ -80,7 +110,52 @@ mobj_t *shadow_create(mobj_t *shadowed)
*/
void shadow_collapse(mobj_t *o)
{
- NOT_YET_IMPLEMENTED("VM: shadow_collapse");
+ // NOT_YET_IMPLEMENTED("VM: shadow_collapse");
+
+ // get the mobj_shadow_t and it's mobj
+ mobj_shadow_t *so = MOBJ_TO_SO(o);
+ mobj_t *iter = so->shadowed;
+ // iterate through the shadow chain
+ while (iter && so->shadowed->mo_type == MOBJ_SHADOW)
+ {
+ // check to see if the refcount is not 1. if so, continue to next shadowed object
+ if (so->shadowed->mo_refcount != 1)
+ {
+ iter = so->shadowed;
+ continue;
+ }
+ // else, go over the shadowed object's pframes
+
+ // iterate through the pframes
+ mobj_lock(&so->shadowed);
+ list_iterate(&so->shadowed->mo_pframes, pframe, pframe_t, pf_link)
+ {
+ // get the pframe from the shadow object
+ pframe_t *spf = NULL;
+
+ mobj_lock(iter); // lock before getting the pframe
+ mobj_find_pframe(o, pframe->pf_pagenum, &spf);
+ mobj_unlock(iter);
+
+ // check if the pframe is not in the shadow object when migrating
+ if (spf == NULL)
+ {
+ // if not, remove the pframe from the shadowed object
+ // and insert it into out iterated shadow object
+ list_remove(&pframe->pf_link);
+ list_insert_tail(&iter->mo_pframes, &pframe->pf_link);
+ }
+ else
+ {
+ // if it is, release the pframe we found
+ pframe_release(&spf);
+ }
+ }
+
+ // put locked the shadowed object after iterating through it
+ mobj_put_locked(&so->shadowed);
+ // FIXME: this is probably wrong
+ }
}
/*
@@ -111,8 +186,47 @@ void shadow_collapse(mobj_t *o)
static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
pframe_t **pfp)
{
- NOT_YET_IMPLEMENTED("VM: shadow_get_pframe");
- return 0;
+ // NOT_YET_IMPLEMENTED("VM: shadow_get_pframe");
+
+ // if forwrite is set, use mobj_default_get_pframe
+ if (forwrite)
+ {
+ return mobj_default_get_pframe(o, pagenum, forwrite, pfp);
+ }
+
+ // else, check if the object already contains the desired frame
+ pframe_t *pf = NULL;
+ mobj_find_pframe(o, pagenum, &pf);
+ if (pf)
+ {
+ // if it does, return the pframe
+ *pfp = pf;
+ return 0;
+ }
+
+ // iterate through the shadow chain to find the nearest shadow mobj that has the frame
+ mobj_shadow_t *so = MOBJ_TO_SO(o);
+ mobj_t *iter = so->shadowed;
+ while (iter && iter->mo_type == MOBJ_SHADOW)
+ {
+ mobj_lock(iter);
+ mobj_find_pframe(o, pagenum, &pf);
+ mobj_unlock(iter);
+ if (pf)
+ {
+ *pfp = pf;
+ return 0;
+ }
+ // update the iterator
+ iter = MOBJ_TO_SO(iter)->shadowed;
+ }
+
+ // if no shadow objects have the page, call mobj_get_pframe() to get the page from the bottom object
+ // at this point, iter is the bottom object
+ mobj_lock(iter);
+ long ret = mobj_get_pframe(iter, pagenum, forwrite, pfp);
+ mobj_unlock(iter);
+ return ret;
}
/*
@@ -138,8 +252,47 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
*/
static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
{
- NOT_YET_IMPLEMENTED("VM: shadow_fill_pframe");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: shadow_fill_pframe");
+
+ // get the mobj_shadow_t
+ mobj_shadow_t *so = MOBJ_TO_SO(o);
+ // iterate over the shadow chain
+ mobj_t *iter = so->shadowed;
+ while (iter && iter->mo_type == MOBJ_SHADOW)
+ {
+ // get the pframe from the shadow object
+ pframe_t *spf = NULL;
+ mobj_lock(iter);
+ mobj_find_pframe(o, pf->pf_pagenum, &spf);
+ mobj_unlock(iter);
+
+ // if the pframe is found, copy the contents into pf
+ // then release the pframe
+ if (spf)
+ {
+ memcpy(pf->pf_addr, spf->pf_addr, PAGE_SIZE);
+ pframe_release(&spf);
+ return 0;
+ }
+
+ // update the iterator
+ iter = MOBJ_TO_SO(iter)->shadowed;
+ }
+
+ // if none of the shadow objects have a copy of the frame, use mobj_get_pframe on the bottom object
+ pframe_t *spf = NULL;
+ mobj_lock(iter);
+ long ret = mobj_get_pframe(iter, pf->pf_pagenum, 0, &spf);
+ mobj_unlock(iter);
+ // check if the operation was sucessful, memcpy the contents into pf
+ // and release the pframe
+ if (ret == 0)
+ {
+ memcpy(pf->pf_addr, pf->pf_addr, PAGE_SIZE);
+ pframe_release(&spf);
+ }
+
+ return ret;
}
/*
@@ -153,8 +306,8 @@ static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
*/
static long shadow_flush_pframe(mobj_t *o, pframe_t *pf)
{
- NOT_YET_IMPLEMENTED("VM: shadow_flush_pframe");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: shadow_flush_pframe");
+ return 0;
}
/*
@@ -169,5 +322,18 @@ static long shadow_flush_pframe(mobj_t *o, pframe_t *pf)
*/
static void shadow_destructor(mobj_t *o)
{
- NOT_YET_IMPLEMENTED("VM: shadow_destructor");
+ // NOT_YET_IMPLEMENTED("VM: shadow_destructor");
+
+ // get the mobj_shadow_t
+ mobj_shadow_t *so = MOBJ_TO_SO(o);
+
+ // call the default destructor
+ mobj_default_destructor(o);
+
+ // put the shadow and bottom_mobj
+ mobj_put(&so->shadowed);
+ mobj_put(&so->bottom_mobj);
+
+ // free the slab
+ slab_obj_free(shadow_allocator, so);
}
diff --git a/kernel/vm/vmmap.c b/kernel/vm/vmmap.c
index 0e2dad6..fd99c55 100644
--- a/kernel/vm/vmmap.c
+++ b/kernel/vm/vmmap.c
@@ -16,6 +16,7 @@
#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/slab.h"
+#include "mm/tlb.h"
static slab_allocator_t *vmmap_allocator;
static slab_allocator_t *vmarea_allocator;
@@ -32,8 +33,27 @@ void vmmap_init(void)
*/
vmarea_t *vmarea_alloc(void)
{
- NOT_YET_IMPLEMENTED("VM: vmarea_alloc");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: vmarea_alloc");
+
+ // Allocate a new vmarea
+ vmarea_t *new_vmarea = (vmarea_t *)slab_obj_alloc(vmarea_allocator);
+ if (new_vmarea == NULL)
+ {
+ return NULL;
+ }
+
+ // Initialize the fields of the vmarea
+ new_vmarea->vma_start = 0;
+ new_vmarea->vma_end = 0;
+ new_vmarea->vma_off = 0;
+ new_vmarea->vma_prot = 0;
+ new_vmarea->vma_flags = 0;
+ new_vmarea->vma_obj = NULL;
+ new_vmarea->vma_obj = NULL;
+ list_link_init(&new_vmarea->vma_plink);
+
+ // Return the new vmarea
+ return new_vmarea;
}
/*
@@ -42,7 +62,22 @@ vmarea_t *vmarea_alloc(void)
*/
void vmarea_free(vmarea_t *vma)
{
- NOT_YET_IMPLEMENTED("VM: vmarea_free");
+ // NOT_YET_IMPLEMENTED("VM: vmarea_free");
+
+ // Remove the vmarea from any lists it may be on
+ if (list_link_is_linked(&vma->vma_plink))
+ {
+ list_remove(&vma->vma_plink);
+ }
+
+ // Put the vma_obj if it exists
+ if (vma->vma_obj != NULL)
+ {
+ mobj_put(&vma->vma_obj);
+ }
+
+ // Free the vmarea
+ slab_obj_free(vmarea_allocator, vma);
}
/*
@@ -50,8 +85,20 @@ void vmarea_free(vmarea_t *vma)
*/
vmmap_t *vmmap_create(void)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_create");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_create");
+
+ // Allocate a new vmmap
+ vmmap_t *new_vmmap = (vmmap_t *)slab_obj_alloc(vmmap_allocator);
+ if (new_vmmap == NULL)
+ {
+ return NULL;
+ }
+
+ // Initialize the fields of the vmmap
+ list_init(&new_vmmap->vmm_list);
+ new_vmmap->vmm_proc = curproc;
+
+ return new_vmmap;
}
/*
@@ -60,7 +107,22 @@ vmmap_t *vmmap_create(void)
*/
void vmmap_destroy(vmmap_t **mapp)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_destroy");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_destroy");
+
+ vmmap_t *map = *mapp;
+
+ // Iterate through the list of vmareas and free each one
+ list_iterate(&(map)->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ list_remove(&vma->vma_plink);
+ vmarea_free(vma);
+ }
+
+ // Free the map
+ slab_obj_free(vmmap_allocator, map);
+
+ // Set the map to NULL
+ *mapp = NULL;
}
/*
@@ -70,7 +132,22 @@ void vmmap_destroy(vmmap_t **mapp)
*/
void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_insert*");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_insert*");
+
+ // iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // if the new vmarea is after the current vmarea
+ if (vma->vma_start > new_vma->vma_end)
+ {
+ // insert the new vmarea before the current vmarea
+ list_insert_before(&vma->vma_plink, &new_vma->vma_plink);
+ return;
+ }
+ }
+
+ // insert this map to the tail
+ list_insert_tail(&map->vmm_list, &new_vma->vma_plink);
}
/*
@@ -90,7 +167,51 @@ void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
*/
ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_find_range");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_find_range");
+
+ // case 1: dir is VMMAP_DIR_LOHI
+ if (dir == VMMAP_DIR_LOHI)
+ {
+ // iterate over the page numbers
+ size_t vfn = ADDR_TO_PN(USER_MEM_LOW);
+ while (vfn <= ADDR_TO_PN(USER_MEM_HIGH) - npages)
+ {
+ // Lookup the vmarea for this page number
+ vmarea_t *vma = vmmap_lookup(map, vfn);
+
+ // if the vmarea is NULL, return the page number
+ if (vma == NULL)
+ {
+ return vfn;
+ }
+
+ // if the vmarea is not NULL, set the page number to the end of the vmarea
+ vfn = vma->vma_end;
+ }
+ }
+
+ // case 2: dir is VMMAP_DIR_HILO
+ else if (dir == VMMAP_DIR_HILO)
+ {
+ // iterate over the page numbers
+ size_t vfn = ADDR_TO_PN(USER_MEM_HIGH) - npages;
+ while (vfn >= ADDR_TO_PN(USER_MEM_LOW))
+ {
+ // Lookup the vmarea for this page number
+ vmarea_t *vma = vmmap_lookup(map, vfn);
+
+ // if the vmarea is NULL, return the page number
+ if (vma == NULL)
+ {
+ return vfn;
+ }
+
+ // if the vmarea is not NULL, set the page number to the start of the vmarea
+ vfn = vma->vma_start - npages;
+ }
+ }
+
+ // if no range exists, return -1
return -1;
}
@@ -100,7 +221,19 @@ ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
*/
vmarea_t *vmmap_lookup(vmmap_t *map, size_t vfn)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_lookup");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_lookup");
+
+ // iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // if the vfn lies within the range of the current vmarea
+ if (vfn >= vma->vma_start && vfn < vma->vma_end)
+ {
+ return vma;
+ }
+ }
+
+ // if the page is unmapped, return NULL
return NULL;
}
@@ -140,8 +273,83 @@ void vmmap_collapse(vmmap_t *map)
*/
vmmap_t *vmmap_clone(vmmap_t *map)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_clone");
- return NULL;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_clone");
+
+ // Create a new vmmap
+ vmmap_t *new_vmmap = vmmap_create();
+ if (new_vmmap == NULL)
+ {
+ return NULL;
+ }
+
+ // Iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // Create a new vmarea
+ vmarea_t *new_vmarea = vmarea_alloc();
+ if (new_vmarea == NULL)
+ {
+ vmmap_destroy(&new_vmmap);
+ return NULL;
+ }
+
+ // Clone the fields of the vmarea
+ new_vmarea->vma_start = vma->vma_start;
+ new_vmarea->vma_end = vma->vma_end;
+ new_vmarea->vma_off = vma->vma_off;
+ new_vmarea->vma_prot = vma->vma_prot;
+ new_vmarea->vma_flags = vma->vma_flags;
+
+ // If the vmarea is share-mapped
+ if (vma->vma_flags & MAP_SHARED)
+ {
+ new_vmarea->vma_obj = vma->vma_obj;
+ mobj_ref(new_vmarea->vma_obj);
+ }
+
+ // If the vmarea is not share-mapped
+ else
+ {
+ // Create two shadow objects
+ mobj_lock(vma->vma_obj);
+ mobj_t *shadow_obj_map = shadow_create(vma->vma_obj);
+ mobj_unlock(vma->vma_obj);
+
+ mobj_unlock(shadow_obj_map); // unlock the map before use
+ if (shadow_obj_map == NULL)
+ {
+ vmarea_free(new_vmarea);
+ vmmap_destroy(&new_vmmap);
+ return NULL;
+ }
+
+ mobj_lock(vma->vma_obj);
+ mobj_t *shadow_obj_new = shadow_create(vma->vma_obj);
+ mobj_unlock(vma->vma_obj);
+
+ mobj_unlock(shadow_obj_new); // unlock the new before use
+ if (shadow_obj_new == NULL)
+ {
+ mobj_put(&shadow_obj_map);
+ vmarea_free(new_vmarea);
+ vmmap_destroy(&new_vmmap);
+ return NULL;
+ }
+
+ // Put the original vma_obj
+ mobj_put(&vma->vma_obj);
+
+ // Insert the shadow objects into their respective vmareas
+ new_vmarea->vma_obj = shadow_obj_new;
+ vma->vma_obj = shadow_obj_map;
+ }
+
+ // Insert the new vmarea into the new vmmap
+ vmmap_insert(new_vmmap, new_vmarea);
+ }
+
+ // Return the new vmmap
+ return new_vmmap;
}
/*
@@ -182,8 +390,98 @@ vmmap_t *vmmap_clone(vmmap_t *map)
long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages,
int prot, int flags, off_t off, int dir, vmarea_t **new_vma)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_map");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_map");
+
+ // Create a new vmarea
+ // see if lopage is 0. if so, use vmmap_find_range() to get a valid range
+ if (lopage == 0)
+ {
+ lopage = vmmap_find_range(map, npages, dir);
+ if (lopage == -1)
+ {
+ return -ENOMEM;
+ }
+ }
+
+ // Alloc the new vmarea
+ vmarea_t *new_vmarea = vmarea_alloc();
+ if (new_vmarea == NULL)
+ {
+ return -ENOMEM;
+ }
+ // Set the fields of the new vmarea
+ new_vmarea->vma_start = lopage;
+ new_vmarea->vma_end = lopage + npages;
+ new_vmarea->vma_off = ADDR_TO_PN(off);
+ new_vmarea->vma_prot = prot;
+ new_vmarea->vma_flags = flags;
+ new_vmarea->vma_vmmap = map;
+ new_vmarea->vma_obj = NULL;
+
+ // If file is NULL, create an anon object
+ if (file == NULL)
+ {
+ new_vmarea->vma_obj = anon_create();
+ mobj_unlock(new_vmarea->vma_obj); // unlock the anon object before use
+ if (new_vmarea->vma_obj == NULL)
+ {
+ vmarea_free(new_vmarea);
+ return -ENOMEM;
+ }
+ }
+ else
+ {
+ // If file is non-NULL, use the vnode's mmap operation to get the mobj
+ long ret = file->vn_ops->mmap(file, &new_vmarea->vma_obj);
+ if (ret < 0)
+ {
+ // on fail, free the new vmarea and return the error
+ vmarea_free(new_vmarea);
+ return ret;
+ }
+ }
+
+ // If MAP_PRIVATE is specified, set up a shadow object
+ if (flags & MAP_PRIVATE)
+ {
+ mobj_lock(new_vmarea->vma_obj);
+ mobj_t *shadow_obj = shadow_create(new_vmarea->vma_obj);
+ mobj_unlock(new_vmarea->vma_obj);
+ mobj_unlock(shadow_obj); // unlock the shadow object before use
+ if (shadow_obj == NULL)
+ {
+ vmarea_free(new_vmarea);
+ return -ENOMEM;
+ }
+ new_vmarea->vma_obj = shadow_obj;
+ }
+
+ // If MAP_FIXED is specified and the given range overlaps with any preexisting mappings, remove the preexisting mappings
+ if (lopage != 0 && (flags & MAP_FIXED))
+ {
+ long ret = vmmap_remove(map, lopage, npages);
+ if (ret < 0)
+ {
+ vmarea_free(new_vmarea);
+ // remove/put the shadow/annon object if it exists
+ if (new_vmarea->vma_obj)
+ {
+ mobj_put(&new_vmarea->vma_obj); // FIXME: is this correct!
+ }
+
+ return ret;
+ }
+ }
+
+ // Insert the new vmarea into the map
+ vmmap_insert(map, new_vmarea);
+
+ // set ret val and return 0
+ if (new_vma)
+ {
+ *new_vma = new_vmarea;
+ }
+ return 0;
}
/*
@@ -219,8 +517,107 @@ long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages,
*/
long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_remove");
- return -1;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_remove");
+
+ // Iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // if the vmarea is completely inside the region to be unmapped
+ if (vma->vma_start < lopage && vma->vma_end > lopage + npages)
+ {
+ // split the old vmarea into two vmareas
+ vmarea_t *new_vmarea = vmarea_alloc();
+ if (new_vmarea == NULL)
+ {
+ return -ENOMEM;
+ }
+
+ // Set the fields of the new vmarea
+ new_vmarea->vma_start = lopage + npages;
+ new_vmarea->vma_end = vma->vma_end;
+ new_vmarea->vma_off = vma->vma_off + (new_vmarea->vma_start - vma->vma_start);
+ new_vmarea->vma_prot = vma->vma_prot;
+ new_vmarea->vma_flags = vma->vma_flags;
+ new_vmarea->vma_vmmap = map;
+ new_vmarea->vma_obj = vma->vma_obj;
+ // increment the refcount of the object associated with the vmarea
+ mobj_ref(new_vmarea->vma_obj);
+
+ // Shorten the length of the old vmarea
+ vma->vma_end = lopage;
+
+ // Insert the new vmarea into the map
+ vmmap_insert(map, new_vmarea);
+
+ // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
+ pt_unmap_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages)
+ );
+ tlb_flush_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ );
+ }
+
+ // if the region overlaps the end of the vmarea
+ else if (vma->vma_start < lopage && vma->vma_end > lopage && vma->vma_end <= lopage + npages)
+ {
+ // shorten the length of the mapping
+ vma->vma_end = lopage;
+ // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
+ pt_unmap_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages)
+ );
+ tlb_flush_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ );
+ }
+
+ // if the region overlaps the beginning of the vmarea
+ else if (vma->vma_start >= lopage && vma->vma_end > lopage + npages && vma->vma_start < lopage + npages)
+ {
+ // move the beginning of the mapping and shorten its length
+ vma->vma_off += (lopage + npages - vma->vma_start);
+ vma->vma_start = lopage + npages;
+
+ // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
+ pt_unmap_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages)
+ );
+ tlb_flush_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ );
+ }
+
+ // if the region completely contains the vmarea
+ else if (vma->vma_start >= lopage && vma->vma_end <= lopage + npages)
+ {
+ // remove the vmarea from the list
+ list_remove(&vma->vma_plink);
+ vmarea_free(vma);
+
+ // call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
+ pt_unmap_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages)
+ );
+ tlb_flush_range(
+ map->vmm_proc->p_pml4,
+ PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ );
+ }
+ }
+
+ return 0;
}
/*
@@ -229,8 +626,29 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
*/
long vmmap_is_range_empty(vmmap_t *map, size_t startvfn, size_t npages)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_is_range_empty");
- return 0;
+ // NOT_YET_IMPLEMENTED("VM: vmmap_is_range_empty");
+
+ // Iterate over the list of vmareas
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ // if the range completely contains the vmarea
+ if (vma->vma_start <= startvfn && vma->vma_end >= startvfn + npages)
+ {
+ return 0;
+ }
+ // if the start of the vmarea is greater than or equal to the start of the range
+ if (vma->vma_start < startvfn + npages && vma->vma_start >= startvfn)
+ {
+ return 0;
+ }
+ // check if the end of the vmarea is greater than the start of the range
+ if (vma->vma_end > startvfn && vma->vma_end <= startvfn + npages)
+ {
+ return 0;
+ }
+ }
+
+ return 1;
}
/*
@@ -250,7 +668,55 @@ long vmmap_is_range_empty(vmmap_t *map, size_t startvfn, size_t npages)
*/
long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_read");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_read");
+
+ // Iterate over the page numbers
+ size_t vfn = ADDR_TO_PN(vaddr);
+ size_t end_vfn = ADDR_TO_PN(vaddr + count);
+ size_t bytes_read = 0;
+ while (vfn < end_vfn)
+ {
+ // Lookup the vmarea for this page number
+ vmarea_t *vma = vmmap_lookup(map, vfn);
+ if (vma == NULL)
+ {
+ return -EFAULT;
+ }
+
+ // Find the pframe for this page number
+ pframe_t *pf;
+ mobj_lock(vma->vma_obj);
+ long ret = mobj_get_pframe(vma->vma_obj, vfn - vma->vma_start + vma->vma_off, 0, &pf);
+ mobj_unlock(vma->vma_obj);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ // Read from the pframe and copy it into buf
+ void *cursor = (void *)(bytes_read + vaddr);
+ size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_read);
+ memcpy(
+ buf + bytes_read,
+ (void *)pf->pf_addr + PAGE_OFFSET(cursor),
+ bytes_this_iteration
+ );
+
+ // Unlock the pframe
+ pframe_release(&pf);
+
+ // Increment the bytes read
+ bytes_read += bytes_this_iteration;
+
+ // check if we have read enough
+ if (bytes_read >= count)
+ {
+ return 0;
+ }
+
+ // Increment the page number
+ vfn++;
+ }
return 0;
}
@@ -272,7 +738,59 @@ long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
*/
long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
{
- NOT_YET_IMPLEMENTED("VM: vmmap_write");
+ // NOT_YET_IMPLEMENTED("VM: vmmap_write");
+
+ // Iterate over the page numbers
+ size_t vfn = ADDR_TO_PN(vaddr);
+ size_t end_vfn = ADDR_TO_PN(vaddr + count);
+ size_t bytes_written = 0;
+ while(vfn < end_vfn)
+ {
+ // Lookup the vmarea for this page number
+ vmarea_t *vma = vmmap_lookup(map, vfn);
+ if (vma == NULL)
+ {
+ return -EFAULT;
+ }
+
+ // Find the pframe for this page number
+ pframe_t *pf;
+ mobj_lock(vma->vma_obj);
+ long ret = mobj_get_pframe(vma->vma_obj, vfn - vma->vma_start + vma->vma_off, 1, &pf);
+ mobj_unlock(vma->vma_obj);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ // Write to the pframe, copying data from buf
+ void *cursor = (void *)(bytes_written + vaddr);
+ size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_written);
+ memcpy(
+ (void *)pf->pf_addr + PAGE_OFFSET(cursor),
+ buf + bytes_written,
+ bytes_this_iteration
+ );
+
+ // Dirty the page
+ pf->pf_dirty = 1;
+
+ // Unlock the pframe
+ pframe_release(&pf);
+
+ // Increment the bytes written
+ bytes_written += bytes_this_iteration;
+
+ // check if we have written enough
+ if (bytes_written >= count)
+ {
+ return 0;
+ }
+
+ // Increment the page number
+ vfn++;
+ }
+
return 0;
}