aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsotech117 <michael_foiani@brown.edu>2024-05-14 03:19:46 +0000
committersotech117 <michael_foiani@brown.edu>2024-05-14 03:19:46 +0000
commit06d50155ac0bd079bfca0f5728346d8beeb205f2 (patch)
tree2f20c8ba099304d6ea06fb76e8a0807b8afa5a5f
parent7585cb5ad84babe9db8c6595de464e33fb878f0c (diff)
weenix fixes
-rw-r--r--Config.mk2
-rw-r--r--kernel/api/access.c42
-rw-r--r--kernel/api/syscall.c30
-rw-r--r--kernel/drivers/memdevs.c2
-rw-r--r--kernel/fs/namev.c2
-rw-r--r--kernel/fs/s5fs/s5fs.c2
-rw-r--r--kernel/fs/s5fs/s5fs_subr.c6
-rw-r--r--kernel/fs/vfs_syscall.c7
-rw-r--r--kernel/fs/vnode_specials.c2
-rw-r--r--kernel/main/kmain.c4
-rw-r--r--kernel/proc/fork.c50
-rw-r--r--kernel/proc/proc.c20
-rw-r--r--kernel/vm/anon.c4
-rw-r--r--kernel/vm/brk.c4
-rw-r--r--kernel/vm/mmap.c48
-rw-r--r--kernel/vm/shadow.c83
-rw-r--r--kernel/vm/vmmap.c225
17 files changed, 228 insertions, 305 deletions
diff --git a/Config.mk b/Config.mk
index 2749f85..6b5a1e8 100644
--- a/Config.mk
+++ b/Config.mk
@@ -63,7 +63,7 @@
# Parameters for the hard disk we build (must be compatible!)
# If the FS is too big for the disk, BAD things happen!
- DISK_BLOCKS=3072 # For fsmaker
+ DISK_BLOCKS=2048 # For fsmaker
DISK_INODES=240 # For fsmaker
# Boolean options specified in this specified in this file that should be
diff --git a/kernel/api/access.c b/kernel/api/access.c
index 9941971..aa8e83c 100644
--- a/kernel/api/access.c
+++ b/kernel/api/access.c
@@ -116,30 +116,11 @@ long user_vecdup(argvec_t *uvec, char ***kvecp)
*/
long addr_perm(proc_t *p, const void *vaddr, int perm)
{
- // NOT_YET_IMPLEMENTED("VM: addr_perm");
+ // NOT_YET_IMPLEMENTED("vm:: addr_perm");
- // // loop through the vmareas in the process's vmmap
- // vmarea_t *vma = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));
-
- // // if the vma doesn't exist, return 0
- // if (!vma)
- // {
- // return 0;
- // }
-
- // return !!(perm & vma->vma_prot);
-
- // TODO: FIX MEEE
-
- vmarea_t* area = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));
- if (!area) {
- return 0;
- }
- if (perm & area->vma_prot) {
- return 1;
- } else {
- return 0;
- }
+ // loop through the vmareas in the process's vmmap
+ vmarea_t *vma = vmmap_lookup(p->p_vmmap, ADDR_TO_PN(vaddr));
+ return vma && !!(perm & vma->vma_prot);
}
/*
@@ -153,20 +134,17 @@ long addr_perm(proc_t *p, const void *vaddr, int perm)
*/
long range_perm(proc_t *p, const void *vaddr, size_t len, int perm)
{
- // NOT_YET_IMPLEMENTED("VM: range_perm");
- // loop through the page numbers in the range
- size_t vfn = ADDR_TO_PN(vaddr);
- size_t end_vfn = ADDR_TO_PN((uintptr_t)vaddr + len);
- for (size_t i = vfn; i < end_vfn; i++)
+ for (
+ size_t vfn = ADDR_TO_PN(vaddr);
+ vfn < ADDR_TO_PN(PAGE_ALIGN_UP((uintptr_t)vaddr + len));
+ vfn++
+ )
{
- // check the permissions for each page
- if (!addr_perm(p, PN_TO_ADDR(i), perm))
+ if (!addr_perm(p, PN_TO_ADDR(vfn), perm))
{
return 0;
}
}
-
- // return 1 if all pages have the correct permissions
return 1;
}
diff --git a/kernel/api/syscall.c b/kernel/api/syscall.c
index 467b7d6..7c76e51 100644
--- a/kernel/api/syscall.c
+++ b/kernel/api/syscall.c
@@ -77,11 +77,14 @@ static long sys_read(read_args_t *args)
ERROR_OUT_RET(ret);
// Allocate a temporary buffer (a page-aligned block of n pages that are enough space to store the number of bytes to read)
- size_t size_in_pages = (kargs.nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
- void *addr = (void *)page_alloc_n(size_in_pages);
+ size_t size_in_pages = 0;
+ while(++size_in_pages * PAGE_SIZE < kargs.nbytes)
+ ;
+ void *addr = page_alloc_n(size_in_pages);
if (!addr)
{
- ERROR_OUT_RET(-ENOMEM);
+ ret = -ENOMEM;
+ ERROR_OUT_RET(ret);
}
// Call do_read() with the buffer and then copy the buffer to the userland args after the system call
@@ -100,7 +103,7 @@ static long sys_read(read_args_t *args)
}
// copy the buffer to the userland args after the system call
- ret = copy_to_user(kargs.buf, addr, ret);
+ ret = copy_to_user(kargs.buf, addr, kargs.nbytes);
// if ret < 0, free the temporary buffer and return -1
if (ret < 0)
{
@@ -132,7 +135,9 @@ static long sys_write(write_args_t *args)
ERROR_OUT_RET(ret);
// Allocate a temporary buffer (a page-aligned block of n pages that are enough space to store the number of bytes to write)
- size_t size_in_pages = (kargs.nbytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ size_t size_in_pages = 0;
+ while(++size_in_pages * PAGE_SIZE < kargs.nbytes)
+ ;
void *addr = (void *)page_alloc_n(size_in_pages);
if (!addr)
{
@@ -187,10 +192,9 @@ static long sys_getdents(getdents_args_t *args)
ERROR_OUT_RET(-EINVAL);
}
- size_t count_read = 0;
-
- // iterate over the directory entries
- while (count_read * sizeof(dirent_t) <= kargs.count)
+ // iterate over the directory entries to get the number of dirs
+ size_t num_dirs = 0;
+ while (num_dirs * sizeof(dirent_t) < kargs.count)
{
// read count / sizeof(dirent_t) directory entries into the provided dirp and call do_getdent
dirent_t d;
@@ -209,13 +213,13 @@ static long sys_getdents(getdents_args_t *args)
}
// copy the dirent_t to the userland args after the system call
- ret = copy_to_user(kargs.dirp + count_read, &d, sizeof(dirent_t));
+ ret = copy_to_user(kargs.dirp + num_dirs, &d, sizeof(dirent_t));
ERROR_OUT_RET(ret); // error check
- count_read++;
+ num_dirs++;
}
- return count_read * sizeof(dirent_t);
+ return num_dirs * sizeof(dirent_t);
}
#ifdef __MOUNTING__
@@ -868,4 +872,4 @@ static long syscall_dispatch(size_t sysnum, uintptr_t args, regs_t *regs)
curthr->kt_errno = ENOSYS;
return -1;
}
-}
+} \ No newline at end of file
diff --git a/kernel/drivers/memdevs.c b/kernel/drivers/memdevs.c
index eeaaddc..065fde8 100644
--- a/kernel/drivers/memdevs.c
+++ b/kernel/drivers/memdevs.c
@@ -162,4 +162,4 @@ static long zero_mmap(vnode_t *file, mobj_t **ret)
// set the return value
*ret = mobj;
return 0;
-}
+} \ No newline at end of file
diff --git a/kernel/fs/namev.c b/kernel/fs/namev.c
index 8e355d6..f443b2e 100644
--- a/kernel/fs/namev.c
+++ b/kernel/fs/namev.c
@@ -76,7 +76,7 @@ long namev_is_descendant(vnode_t *a, vnode_t *b)
long namev_lookup(vnode_t *dir, const char *name, size_t namelen,
vnode_t **res_vnode)
{
- // // NOT_YET_IMPLEMENTED("VFS: namev_lookup");
+ // NOT_YET_IMPLEMENTED("VFS: namev_lookup");
// KASSERT(NULL != dir);
// KASSERT(NULL != name);
diff --git a/kernel/fs/s5fs/s5fs.c b/kernel/fs/s5fs/s5fs.c
index 8fdfc7b..c40c6ff 100644
--- a/kernel/fs/s5fs/s5fs.c
+++ b/kernel/fs/s5fs/s5fs.c
@@ -307,7 +307,7 @@ static void s5fs_delete_vnode(fs_t *fs, vnode_t *vn)
// Write the inode back to disk and return
pframe_t *pf;
s5_get_meta_disk_block(FS_TO_S5FS(fs), S5_INODE_BLOCK(vn->vn_vno), 1, &pf);
- // // Check if the page frame was not found
+ // Check if the page frame was not found
// if (err < 0)
// {
// return;
diff --git a/kernel/fs/s5fs/s5fs_subr.c b/kernel/fs/s5fs/s5fs_subr.c
index 6e89249..f092d0a 100644
--- a/kernel/fs/s5fs/s5fs_subr.c
+++ b/kernel/fs/s5fs/s5fs_subr.c
@@ -242,7 +242,7 @@ long s5_file_block_to_disk_block(s5_node_t *sn, size_t file_blocknum,
}
// Update the inode
indirect_block[indirect_block_index] = alloced_blocknum;
- // sn->dirtied_inode = 1;
+ sn->dirtied_inode = 1;
// set ret params and return
*newp = 1;
@@ -408,8 +408,8 @@ ssize_t s5_write_file(s5_node_t *sn, size_t pos, const char *buf, size_t len)
if (err < 0)
{
// Restore pos
- sn->vnode.vn_len -= bytes_written;
- sn->inode.s5_un.s5_size -= bytes_written;
+ sn->vnode.vn_len += bytes_written;
+ sn->inode.s5_un.s5_size += bytes_written;
return err;
}
diff --git a/kernel/fs/vfs_syscall.c b/kernel/fs/vfs_syscall.c
index 245e55d..fc0878e 100644
--- a/kernel/fs/vfs_syscall.c
+++ b/kernel/fs/vfs_syscall.c
@@ -569,6 +569,10 @@ long do_link(const char *oldpath, const char *newpath)
// NOT_YET_IMPLEMENTED("VFS: do_link");
// Resolve the oldpath
+ if (strlen(newpath) > NAME_LEN)
+ {
+ return -ENAMETOOLONG;
+ }
vnode_t *old_vnode = NULL;
long ret = namev_resolve(NULL, oldpath, &old_vnode);
@@ -599,6 +603,7 @@ long do_link(const char *oldpath, const char *newpath)
if (!S_ISDIR(dir->vn_mode))
{
vput(&old_vnode);
+ vput(&dir);
return -ENOTDIR;
}
// Check if name is too long
@@ -610,7 +615,7 @@ long do_link(const char *oldpath, const char *newpath)
// Lock the vnodes and call link
vlock_in_order(old_vnode, dir);
- ret = dir->vn_ops->link(old_vnode, dir, name, len);
+ ret = dir->vn_ops->link(dir, name, len, old_vnode);
vunlock_in_order(old_vnode, dir);
vput(&old_vnode);
diff --git a/kernel/fs/vnode_specials.c b/kernel/fs/vnode_specials.c
index d8c79bd..41d4ce2 100644
--- a/kernel/fs/vnode_specials.c
+++ b/kernel/fs/vnode_specials.c
@@ -224,4 +224,4 @@ static long blockdev_file_fill_pframe(vnode_t *file, pframe_t *pf)
static long blockdev_file_flush_pframe(vnode_t *file, pframe_t *pf)
{
return -ENOTSUP;
-}
+} \ No newline at end of file
diff --git a/kernel/main/kmain.c b/kernel/main/kmain.c
index e16d08e..c835275 100644
--- a/kernel/main/kmain.c
+++ b/kernel/main/kmain.c
@@ -170,8 +170,8 @@ static void *initproc_run(long arg1, void *arg2)
// dbg(DBG_PROC, "%s", "In main thread!\n");
- char *argv[2] = {"init", NULL};
- char *envp[1] = {NULL};
+ char *const argv[] = {NULL};
+ char *const envp[] = {NULL};
kernel_execve("/sbin/init", argv, envp);
#ifdef __DRIVERS__
diff --git a/kernel/proc/fork.c b/kernel/proc/fork.c
index cbf5e30..b501b1e 100644
--- a/kernel/proc/fork.c
+++ b/kernel/proc/fork.c
@@ -60,7 +60,7 @@ long do_fork(struct regs *regs)
// NOT_YET_IMPLEMENTED("VM: do_fork");
// Create a new process
- proc_t *child_proc = proc_create("child from fork");
+ proc_t *child_proc = proc_create("cf");
if (child_proc == NULL)
{
return -ENOMEM;
@@ -73,53 +73,17 @@ long do_fork(struct regs *regs)
return -ENOMEM;
}
- // Set the child process's parent to the current process
- // child_thread->kt_proc = child_proc;
- // list_insert_head(&child_proc->p_threads, &child_thread->kt_plink);
-
- // Get the new vmmap_t for the child process
- // vmmap_t *child_vmmap = vmmap_clone(curproc->p_vmmap);
- // if (child_vmmap == NULL)
- // {
- // kthread_destroy(child_thread);
- // proc_destroy(child_proc);
- // return -ENOMEM;
- // }
-
- // Set the new vmmap_t for the child process
- // child_proc->p_vmmap = child_vmmap;
- // // Set the vmmap to the child process
- // child_thread->kt_proc = child_proc;
-
- // Set the working directory of the child process to the current process
- // vref(curproc->p_cwd);
- // child_proc->p_cwd = curproc->p_cwd;
-
- // Copy over each file descriptor from the parent to the child
- // for (int i = 0; i < NFILES; i++)
- // {
- // if (curproc->p_files[i] != NULL)
- // {
- // fref(curproc->p_files[i]);
- // child_proc->p_files[i] = curproc->p_files[i];
- // }
- // }
-
// Fix the values of the registers and the rest of the kthread's ctx
regs->r_rax = 0; // Set the return value to 0 for the child
- child_thread->kt_ctx.c_rsp = fork_setup_stack(regs, child_thread->kt_kstack); // Set the stack pointer for the child
+ child_thread->kt_ctx.c_rsp = fork_setup_stack(regs, child_thread->kt_ctx.c_kstack); // Set the stack pointer for the child
child_thread->kt_ctx.c_rip = (uintptr_t) userland_entry; // Set the instruction pointer to userland_entry
// child_thread->kt_ctx.c_rbp = curthr->kt_ctx.c_rbp; // Set the current thread's base pointer to the child's base pointer
- child_thread->kt_ctx.c_pml4 = curproc->p_pml4; // Set the current thread's page table to the current proc's
+ child_thread->kt_ctx.c_pml4 = child_proc->p_pml4; // Set the current thread's page table to the child's page table
child_thread->kt_proc = child_proc; // Set the child process to the child thread
-
// Update the list
- list_insert_tail(&child_proc->p_threads, &child_thread->kt_plink);
-
-
- // Update the brks for the child process
- // child_proc->p_brk = curproc->p_brk;
- // child_proc->p_start_brk = curproc->p_start_brk;
+ list_insert_head(&child_proc->p_threads, &child_thread->kt_plink);
+ child_proc->p_brk = curproc->p_brk; // Set the child's break to the parent's break
+ child_proc->p_start_brk = curproc->p_start_brk; // Set the child's start break to the parent's start break
// Unmap the parent's page table and flush the TLB
pt_unmap_range(curproc->p_pml4, USER_MEM_LOW, USER_MEM_HIGH);
@@ -130,4 +94,4 @@ long do_fork(struct regs *regs)
// Return the child's process id to the parent
return child_proc->p_pid;
-}
+} \ No newline at end of file
diff --git a/kernel/proc/proc.c b/kernel/proc/proc.c
index f13064a..2ade163 100644
--- a/kernel/proc/proc.c
+++ b/kernel/proc/proc.c
@@ -223,9 +223,6 @@ proc_t *proc_create(const char *name)
proc_initproc = proc;
}
- proc->p_vmmap = vmmap_clone(curproc->p_vmmap);
- curproc->p_vmmap->vmm_proc = proc;
-
#ifdef __VFS__
// clone and ref the files from curproc
for (int fd = 0; fd < NFILES; fd++)
@@ -243,6 +240,23 @@ proc_t *proc_create(const char *name)
}
#endif
+#ifdef __VM__
+ proc->p_vmmap = vmmap_clone(curproc->p_vmmap);
+ curproc->p_vmmap->vmm_proc = proc;
+
+ KASSERT(proc->p_vmmap != NULL); //FIXME!
+
+ // copy the table of file descriptors
+ for (int i = 0; i < NFILES; i++)
+ {
+ if (curproc->p_files[i] != NULL)
+ {
+ fref(curproc->p_files[i]);
+ proc->p_files[i] = curproc->p_files[i];
+ }
+ }
+#endif
+
return proc;
}
diff --git a/kernel/vm/anon.c b/kernel/vm/anon.c
index a433395..8c65b85 100644
--- a/kernel/vm/anon.c
+++ b/kernel/vm/anon.c
@@ -60,7 +60,7 @@ static long anon_fill_pframe(mobj_t *o, pframe_t *pf)
// set the pframe's mobj to the given mobj
// pf->pf_addr = o;
- // // set the pframe's flags to dirty
+ // set the pframe's flags to dirty
// pf->pf_dirty = 1;
memset(pf->pf_addr, 0, PAGE_SIZE);
@@ -85,4 +85,4 @@ static void anon_destructor(mobj_t *o)
// free the mobj
slab_obj_free(anon_allocator, o);
-}
+} \ No newline at end of file
diff --git a/kernel/vm/brk.c b/kernel/vm/brk.c
index 69a315f..37bd16c 100644
--- a/kernel/vm/brk.c
+++ b/kernel/vm/brk.c
@@ -63,7 +63,7 @@ long do_brk(void *addr, void **ret)
}
// Check if the address is within the valid range
- if ((uintptr_t)addr > USER_MEM_HIGH)
+ if ((uintptr_t)addr > USER_MEM_HIGH || (uintptr_t)addr < USER_MEM_LOW)
{
return -ENOMEM;
}
@@ -154,4 +154,4 @@ long do_brk(void *addr, void **ret)
curproc->p_brk = addr;
*ret = addr;
return 0;
-}
+} \ No newline at end of file
diff --git a/kernel/vm/mmap.c b/kernel/vm/mmap.c
index ce932de..a298df4 100644
--- a/kernel/vm/mmap.c
+++ b/kernel/vm/mmap.c
@@ -70,7 +70,7 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
}
// check if len is not zero (len is an unsigned value, so it is always positive)
- if (len == 0)
+ if ((ssize_t) len <= 0)
{
return -EINVAL;
}
@@ -87,8 +87,8 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
return -EINVAL;
}
- // check if fd is not a valid file descriptor and MAP_ANON was not set
- if (fd < 0 && (flags & MAP_ANON) == 0)
+ // check if the fd is valid and MAP_ANON was not set
+ if (((fd < 0 || fd >= NFILES) || curproc->p_files[fd] == NULL) && !(flags & MAP_ANON))
{
return -EBADF;
}
@@ -96,10 +96,10 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
// check if a file mapping was requested, but fd is not open for reading
// file error checking is done in if statement below
file_t *file = NULL;
- if (fd >= 0 && (flags & MAP_ANON) == 0)
+ if (fd >= 0 && fd < NFILES)
{
// get the file and check if it is valid
- file = fget(fd);
+ file = curproc->p_files[fd];
if (file == NULL)
{
return -EBADF;
@@ -110,7 +110,6 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
// check if the file's vnode's mmap operation doesn't exist
if (file->f_vnode->vn_ops == NULL || file->f_vnode->vn_ops->mmap == NULL)
{
- fput(&file);
return -ENODEV;
}
@@ -119,32 +118,26 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
// check if thef FMODE_READ flag is not set
if ((file->f_mode & FMODE_READ) == 0)
{
- fput(&file);
return -EACCES;
}
// check if append mode is set and PROT_WRITE is set
if ((prot & PROT_WRITE) && (file->f_mode & FMODE_APPEND))
{
- fput(&file);
return -EACCES;
}
- // check if MAP_SHARED was requested and PROT_WRITE is set, but fd is not open in read/write (O_RDWR) mode
- if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file->f_mode & FMODE_READ) == 0)
+ // if MAP_SHARED was requested and PROT_WRITE is set, but fd is not open in read/write (O_RDWR) mode.
+ if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file->f_mode & FMODE_WRITE) == 0)
{
- fput(&file);
return -EACCES;
}
// check if PROT_WRITE is set, but the file has FMODE_APPEND specified
if ((prot & PROT_WRITE) && (file->f_mode & FMODE_APPEND))
{
- fput(&file);
return -EACCES;
}
-
- fput(&file);
}
@@ -167,20 +160,18 @@ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
return err;
}
- // set ret if it was provided
void *start = PN_TO_ADDR(vma->vma_start);
- if (ret)
- {
- *ret = start;
- }
-
// flush the TLB
tlb_flush_range(
(uintptr_t) start,
PAGE_SIZE * (vma->vma_end - vma->vma_start)
);
- // return 0 on success
+ // set ret if it was provided and return 0 on success
+ if (ret)
+ {
+ *ret = start;
+ }
return 0;
}
@@ -212,13 +203,17 @@ long do_munmap(void *addr, size_t len)
}
// Check if len is in bounds
- if (len > USER_MEM_HIGH || len == 0)
+ if (len > USER_MEM_HIGH || len <= 0)
{
return -EINVAL;
}
// Check if the addr is out of range of the user address space
- if ((uintptr_t)addr < USER_MEM_LOW || (uintptr_t)addr + len > USER_MEM_HIGH)
+ if (
+ (uintptr_t)addr < USER_MEM_LOW
+ || (uintptr_t)addr > USER_MEM_HIGH
+ || (uintptr_t)addr + len > USER_MEM_HIGH
+ )
{
return -EINVAL;
}
@@ -227,9 +222,8 @@ long do_munmap(void *addr, size_t len)
size_t start = ADDR_TO_PN(addr);
size_t end = ADDR_TO_PN(PAGE_ALIGN_UP((uintptr_t)addr + len));
long ret = vmmap_remove(
- curproc->p_vmmap,
- start,
- end - start
- );
+ curproc->p_vmmap,
+ start,
+ end - start);
return ret;
} \ No newline at end of file
diff --git a/kernel/vm/shadow.c b/kernel/vm/shadow.c
index 91b1fce..06cf20d 100644
--- a/kernel/vm/shadow.c
+++ b/kernel/vm/shadow.c
@@ -71,6 +71,7 @@ mobj_t *shadow_create(mobj_t *shadowed)
}
// initialize the mobj_shadow_t
+ so->shadowed = shadowed;
// set the bottom_mobj based on the two cases
if (shadowed->mo_type == MOBJ_SHADOW)
@@ -82,12 +83,11 @@ mobj_t *shadow_create(mobj_t *shadowed)
so->bottom_mobj = shadowed;
}
// init the other fields
- so->shadowed = shadowed;
- mobj_init(&so->mobj, MOBJ_SHADOW, &shadow_mobj_ops);
mobj_ref(so->shadowed);
mobj_ref(so->bottom_mobj);
- // lock the shadow object
+ // init and lock the shadow object
+ mobj_init(&so->mobj, MOBJ_SHADOW, &shadow_mobj_ops);
mobj_lock(&so->mobj);
// return the shadow object
@@ -110,52 +110,7 @@ mobj_t *shadow_create(mobj_t *shadowed)
*/
void shadow_collapse(mobj_t *o)
{
- // NOT_YET_IMPLEMENTED("VM: shadow_collapse");
-
- // get the mobj_shadow_t and it's mobj
- mobj_shadow_t *so = MOBJ_TO_SO(o);
- mobj_t *iter = so->shadowed;
- // iterate through the shadow chain
- while (iter && so->shadowed->mo_type == MOBJ_SHADOW)
- {
- // check to see if the refcount is not 1. if so, continue to next shadowed object
- if (so->shadowed->mo_refcount != 1)
- {
- iter = so->shadowed;
- continue;
- }
- // else, go over the shadowed object's pframes
-
- // iterate through the pframes
- mobj_lock(&so->shadowed);
- list_iterate(&so->shadowed->mo_pframes, pframe, pframe_t, pf_link)
- {
- // get the pframe from the shadow object
- pframe_t *spf = NULL;
-
- mobj_lock(iter); // lock before getting the pframe
- mobj_find_pframe(o, pframe->pf_pagenum, &spf);
- mobj_unlock(iter);
-
- // check if the pframe is not in the shadow object when migrating
- if (spf == NULL)
- {
- // if not, remove the pframe from the shadowed object
- // and insert it into out iterated shadow object
- list_remove(&pframe->pf_link);
- list_insert_tail(&iter->mo_pframes, &pframe->pf_link);
- }
- else
- {
- // if it is, release the pframe we found
- pframe_release(&spf);
- }
- }
-
- // put locked the shadowed object after iterating through it
- mobj_put_locked(&so->shadowed);
- // FIXME: this is probably wrong
- }
+ NOT_YET_IMPLEMENTED("VM: shadow_collapse");
}
/*
@@ -210,7 +165,7 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
while (iter && iter->mo_type == MOBJ_SHADOW)
{
mobj_lock(iter);
- mobj_find_pframe(o, pagenum, &pf);
+ mobj_find_pframe(iter, pagenum, &pf);
mobj_unlock(iter);
if (pf)
{
@@ -218,14 +173,15 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
return 0;
}
// update the iterator
- iter = MOBJ_TO_SO(iter)->shadowed;
+ so = MOBJ_TO_SO(iter);
+ iter = so->shadowed;
}
// if no shadow objects have the page, call mobj_get_pframe() to get the page from the bottom object
// at this point, iter is the bottom object
- mobj_lock(iter);
- long ret = mobj_get_pframe(iter, pagenum, forwrite, pfp);
- mobj_unlock(iter);
+ mobj_lock(so->bottom_mobj);
+ long ret = mobj_get_pframe(so->bottom_mobj, pagenum, forwrite, pfp);
+ mobj_unlock(so->bottom_mobj);
return ret;
}
@@ -252,7 +208,7 @@ static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
*/
static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
{
- // NOT_YET_IMPLEMENTED("VM: shadow_fill_pframe");
+ // NOT_YET_IMPLEMENTEDshadow_fill_pframe");
// get the mobj_shadow_t
mobj_shadow_t *so = MOBJ_TO_SO(o);
@@ -263,7 +219,7 @@ static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
// get the pframe from the shadow object
pframe_t *spf = NULL;
mobj_lock(iter);
- mobj_find_pframe(o, pf->pf_pagenum, &spf);
+ mobj_find_pframe(iter, pf->pf_pagenum, &spf);
mobj_unlock(iter);
// if the pframe is found, copy the contents into pf
@@ -276,19 +232,20 @@ static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
}
// update the iterator
- iter = MOBJ_TO_SO(iter)->shadowed;
+ so = MOBJ_TO_SO(iter);
+ iter = so->shadowed;
}
// if none of the shadow objects have a copy of the frame, use mobj_get_pframe on the bottom object
pframe_t *spf = NULL;
- mobj_lock(iter);
- long ret = mobj_get_pframe(iter, pf->pf_pagenum, 0, &spf);
- mobj_unlock(iter);
+ mobj_lock(so->bottom_mobj);
+ long ret = mobj_get_pframe(so->bottom_mobj, pf->pf_pagenum, 0, &spf);
+ mobj_unlock(so->bottom_mobj);
// check if the operation was sucessful, memcpy the contents into pf
// and release the pframe
- if (ret == 0)
+ if (ret >= 0)
{
- memcpy(pf->pf_addr, pf->pf_addr, PAGE_SIZE);
+ memcpy(pf->pf_addr, spf->pf_addr, PAGE_SIZE);
pframe_release(&spf);
}
@@ -336,4 +293,4 @@ static void shadow_destructor(mobj_t *o)
// free the slab
slab_obj_free(shadow_allocator, so);
-}
+} \ No newline at end of file
diff --git a/kernel/vm/vmmap.c b/kernel/vm/vmmap.c
index 8789371..8c1a455 100644
--- a/kernel/vm/vmmap.c
+++ b/kernel/vm/vmmap.c
@@ -49,7 +49,7 @@ vmarea_t *vmarea_alloc(void)
new_vmarea->vma_prot = 0;
new_vmarea->vma_flags = 0;
new_vmarea->vma_obj = NULL;
- new_vmarea->vma_obj = NULL;
+ new_vmarea->vma_vmmap = NULL;
list_link_init(&new_vmarea->vma_plink);
// Return the new vmarea
@@ -134,6 +134,8 @@ void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
{
// NOT_YET_IMPLEMENTED("VM: vmmap_insert*");
+ new_vma->vma_vmmap = map;
+
// iterate over the list of vmareas
list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
{
@@ -173,47 +175,48 @@ ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
if (dir == VMMAP_DIR_LOHI)
{
// iterate over the page numbers, going from low to high
- // determine the continguous range of free virtual pages
+ // determine the continguous range of free virtual pages
- int start, end = 0;
+ size_t start_page, contig_page = 0;
size_t vfn = ADDR_TO_PN(USER_MEM_LOW);
- while (vfn <= ADDR_TO_PN(USER_MEM_HIGH))
+ while (vfn++ < ADDR_TO_PN(USER_MEM_HIGH))
{
// Lookup the vmarea for this page number
- vmarea_t *vma = vmmap_lookup(map, vfn++);
+ vmarea_t *vma = vmmap_lookup(map, vfn);
if (vma == NULL)
{
// if unmapped, document this
- end = vfn;
- if (start == 0)
+ if (contig_page == 0)
{
- start = vfn;
+ start_page = 0;
}
+ contig_page++;
}
else
{
// if mapped, start over
- start, end = 0;
+ start_page = contig_page = 0;
}
// if the range exists, return the start
- if (end == npages)
+ if (contig_page == npages)
{
- return start;
+ KASSERT(start_page >= ADDR_TO_PN(USER_MEM_LOW));
+ return start_page;
}
}
}
-
+
// case 2: dir is VMMAP_DIR_HILO
- else if (dir == VMMAP_DIR_HILO)
+ if (dir == VMMAP_DIR_HILO)
{
// iterate over the page numbers
- int contig = 0;
+ size_t contig = 0;
size_t vfn = ADDR_TO_PN(USER_MEM_HIGH);
- while (vfn >= ADDR_TO_PN(USER_MEM_LOW))
+ while (--vfn >= ADDR_TO_PN(USER_MEM_LOW))
{
// Lookup the vmarea for this page number
- vmarea_t *vma = vmmap_lookup(map, --vfn);
+ vmarea_t *vma = vmmap_lookup(map, vfn);
if (vma == NULL)
{
// if unmapped, increment the contig
@@ -228,6 +231,7 @@ ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
// if there are n contiguous pages, return the current vfn
if (contig == npages)
{
+ KASSERT(vfn >= ADDR_TO_PN(USER_MEM_LOW));
return vfn;
}
}
@@ -298,6 +302,7 @@ vmmap_t *vmmap_clone(vmmap_t *map)
// NOT_YET_IMPLEMENTED("VM: vmmap_clone");
// Create a new vmmap
+ // vmmap_collapse(map);
vmmap_t *new_vmmap = vmmap_create();
if (new_vmmap == NULL)
{
@@ -413,97 +418,101 @@ long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages,
int prot, int flags, off_t off, int dir, vmarea_t **new_vma)
{
// NOT_YET_IMPLEMENTED("VM: vmmap_map");
+ // return -1;
+
+ // ASK: why are these needed!!
+ KASSERT(map != NULL);
+ KASSERT(prot == PROT_NONE
+ || prot == PROT_READ
+ || prot == PROT_WRITE
+ || prot == PROT_EXEC
+ || prot == (PROT_READ | PROT_WRITE)
+ || prot == (PROT_READ | PROT_EXEC)
+ || prot == (PROT_WRITE | PROT_EXEC)
+ || prot == (PROT_READ | PROT_WRITE | PROT_EXEC));
+ KASSERT((flags & MAP_TYPE) == MAP_SHARED || (flags & MAP_TYPE) == MAP_PRIVATE);
+
+ if (lopage == 0)
+ {
+ KASSERT(dir == VMMAP_DIR_LOHI || dir == VMMAP_DIR_HILO);
+ ssize_t res = vmmap_find_range(map, npages, dir);
+ if (res == -1) {
+ return -ENOMEM;
+ }
+ lopage = res;
+ }
- // Create a new vmarea
- // see if lopage is 0. if so, use vmmap_find_range() to get a valid range
- if (lopage == 0)
+ if (lopage != 0 && (flags & MAP_FIXED))
{
- lopage = vmmap_find_range(map, npages, dir);
- if (lopage == -1)
+ long ret = vmmap_remove(map, lopage, npages);
+ if (ret < 0)
{
- return -ENOMEM;
+ return ret;
}
}
- // Alloc the new vmarea
- vmarea_t *new_vmarea = vmarea_alloc();
- if (new_vmarea == NULL)
+ // alloc the new vma
+ vmarea_t *vma = vmarea_alloc();
+ if (!vma)
{
return -ENOMEM;
}
- // Set the fields of the new vmarea
- new_vmarea->vma_start = lopage;
- new_vmarea->vma_end = lopage + npages;
- new_vmarea->vma_off = ADDR_TO_PN(off);
- new_vmarea->vma_prot = prot;
- new_vmarea->vma_flags = flags;
- new_vmarea->vma_vmmap = map;
- new_vmarea->vma_obj = NULL;
- // If file is NULL, create an anon object
- if (file == NULL)
+ // fill in fields, except for mobj and vma
+ vma->vma_start = lopage;
+ vma->vma_end = lopage + npages;
+ vma->vma_off = ADDR_TO_PN(off);
+ vma->vma_prot = prot;
+ vma->vma_flags = flags;
+
+ // make the mobj, depending on the case (anon or mmap)
+ mobj_t *obj = NULL;
+ if (file == NULL)
{
- new_vmarea->vma_obj = anon_create();
- mobj_unlock(new_vmarea->vma_obj); // unlock the anon object before use
- if (new_vmarea->vma_obj == NULL)
+ obj = anon_create();
+ if (obj == NULL)
{
- vmarea_free(new_vmarea);
+ vmarea_free(vma);
return -ENOMEM;
}
- }
+ mobj_unlock(obj);
+ }
else
{
- // If file is non-NULL, use the vnode's mmap operation to get the mobj
- long ret = file->vn_ops->mmap(file, &new_vmarea->vma_obj);
+ long ret = file->vn_ops->mmap(file, &obj);
if (ret < 0)
{
- // on fail, free the new vmarea and return the error
- vmarea_free(new_vmarea);
+ vmarea_free(vma);
return ret;
}
}
+ vma->vma_obj = obj;
- // If MAP_PRIVATE is specified, set up a shadow object
- if (flags & MAP_PRIVATE)
+ // if the flag is private, upgrade the obj to a shadow obj
+ if (flags & MAP_PRIVATE)
{
- mobj_lock(new_vmarea->vma_obj);
- mobj_t *shadow_obj = shadow_create(new_vmarea->vma_obj);
- mobj_unlock(new_vmarea->vma_obj);
- mobj_unlock(shadow_obj); // unlock the shadow object before use
- mobj_put(&new_vmarea->vma_obj); // put the original object
- if (shadow_obj == NULL)
+ mobj_t *shadow_obj = shadow_create(obj);
+ if (shadow_obj == NULL)
{
- vmarea_free(new_vmarea);
+ vmarea_free(vma);
+ mobj_put(&obj);
return -ENOMEM;
}
- new_vmarea->vma_obj = shadow_obj;
- }
+ // unlock from creation
+ mobj_unlock(shadow_obj);
- // If MAP_FIXED is specified and the given range overlaps with any preexisting mappings, remove the preexisting mappings
- if (lopage != 0 && (flags & MAP_FIXED))
- {
- long ret = vmmap_remove(map, lopage, npages);
- if (ret < 0)
- {
- vmarea_free(new_vmarea);
- // remove/put the shadow/annon object if it exists
- if (new_vmarea->vma_obj)
- {
- mobj_put(&new_vmarea->vma_obj); // FIXME: is this correct!
- }
-
- return ret;
- }
+ vma->vma_obj = shadow_obj;
+ // put the og obj
+ mobj_put(&obj);
}
- // Insert the new vmarea into the map
- vmmap_insert(map, new_vmarea);
-
- // set ret val and return 0
- if (new_vma)
+ // now that vma is ready, set it
+ vmmap_insert(map, vma);
+ if (new_vma != NULL)
{
- *new_vma = new_vmarea;
+ *new_vma = vma;
}
+
return 0;
}
@@ -558,13 +567,15 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
// Set the fields of the new vmarea
new_vmarea->vma_start = lopage + npages;
new_vmarea->vma_end = vma->vma_end;
- new_vmarea->vma_off = vma->vma_off + (new_vmarea->vma_start - vma->vma_start);
+ new_vmarea->vma_off += lopage + npages - vma->vma_start;
new_vmarea->vma_prot = vma->vma_prot;
new_vmarea->vma_flags = vma->vma_flags;
- new_vmarea->vma_vmmap = map;
+ // new_vmarea->vma_vmmap = map;
+ mobj_lock(vma->vma_obj);
new_vmarea->vma_obj = vma->vma_obj;
// increment the refcount of the object associated with the vmarea
mobj_ref(new_vmarea->vma_obj);
+ mobj_unlock(vma->vma_obj);
// Shorten the length of the old vmarea
vma->vma_end = lopage;
@@ -574,13 +585,12 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
// call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
pt_unmap_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage),
- PN_TO_ADDR(lopage + npages)
- );
+ curproc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages));
tlb_flush_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ PN_TO_ADDR(lopage),
+ npages
);
}
@@ -589,15 +599,15 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
{
// shorten the length of the mapping
vma->vma_end = lopage;
+
// call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
pt_unmap_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage),
- PN_TO_ADDR(lopage + npages)
- );
+ curproc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages));
tlb_flush_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ PN_TO_ADDR(lopage),
+ npages
);
}
@@ -605,18 +615,17 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
else if (vma->vma_start >= lopage && vma->vma_end > lopage + npages && vma->vma_start < lopage + npages)
{
// move the beginning of the mapping and shorten its length
- vma->vma_off += (lopage + npages - vma->vma_start);
+ vma->vma_off += lopage + npages - vma->vma_start;
vma->vma_start = lopage + npages;
// call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
pt_unmap_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage),
- PN_TO_ADDR(lopage + npages)
- );
+ curproc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages));
tlb_flush_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ PN_TO_ADDR(lopage),
+ npages
);
}
@@ -629,13 +638,12 @@ long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
// call pt_unmap_range() and tlb_flush_range() to clean the pagetables and TLB
pt_unmap_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage),
- PN_TO_ADDR(lopage + npages)
- );
+ curproc->p_pml4,
+ PN_TO_ADDR(lopage),
+ PN_TO_ADDR(lopage + npages));
tlb_flush_range(
- map->vmm_proc->p_pml4,
- PN_TO_ADDR(lopage + npages) - PN_TO_ADDR(lopage)
+ PN_TO_ADDR(lopage),
+ npages
);
}
}
@@ -717,11 +725,11 @@ long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
}
// Read from the pframe and copy it into buf
- void *cursor = (void *)(bytes_read + vaddr);
+ void *cursor = bytes_read + vaddr;
size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_read);
memcpy(
(void *) buf + bytes_read,
- (void *)pf->pf_addr + PAGE_OFFSET(cursor),
+ (void *) pf->pf_addr + PAGE_OFFSET(cursor),
bytes_this_iteration
);
@@ -767,7 +775,7 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
size_t vfn = ADDR_TO_PN(vaddr);
size_t end_vfn = ADDR_TO_PN(vaddr + count);
size_t bytes_written = 0;
- while(vfn < end_vfn)
+ while(vfn <= end_vfn)
{
// Lookup the vmarea for this page number
vmarea_t *vma = vmmap_lookup(map, vfn);
@@ -787,7 +795,7 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
}
// Write to the pframe, copying data from buf
- void *cursor = (void *)(bytes_written + vaddr);
+ void *cursor = bytes_written + vaddr;
size_t bytes_this_iteration = MIN(PAGE_SIZE - PAGE_OFFSET(cursor), count - bytes_written);
memcpy(
(void *)pf->pf_addr + PAGE_OFFSET(cursor),
@@ -795,9 +803,8 @@ long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
bytes_this_iteration
);
- // Dirty the page
+ // Dirty the pframe
pf->pf_dirty = 1;
-
// Unlock the pframe
pframe_release(&pf);