aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Foiani <mfoiani@cs.brown.edu>2024-05-15 00:38:32 -0400
committerMichael Foiani <mfoiani@cs.brown.edu>2024-05-15 00:38:32 -0400
commitbf9a10ff2710ff088a94586df8756741db67bb0c (patch)
tree1392a8c90e5e2a0a508f019baa9a279a0f1039c8
parentc9eb0848a356cd7c32d7f4be389c964f9d568434 (diff)
add autograder tests
-rw-r--r--kernel/fs/s5fs/s5fs.c3
-rw-r--r--kernel/include/test/vmtest.h2
-rw-r--r--kernel/main/kmain.c3
-rw-r--r--kernel/test/vmtest.c342
-rw-r--r--kernel/util/debug.c2
5 files changed, 337 insertions, 15 deletions
diff --git a/kernel/fs/s5fs/s5fs.c b/kernel/fs/s5fs/s5fs.c
index c40c6ff..73ecaa6 100644
--- a/kernel/fs/s5fs/s5fs.c
+++ b/kernel/fs/s5fs/s5fs.c
@@ -827,9 +827,6 @@ static long s5fs_mkdir(vnode_t *dir, const char *name, size_t namelen,
// Link the new directory to the parent
vlock(dir);
-
- dbg(DBG_PRINT, "LINKING DIR TO NAME %s\n\n", name);
-
long link3 = s5_link(VNODE_TO_S5NODE(dir), name, namelen, VNODE_TO_S5NODE(new_vnode));
vunlock(dir);
// Check if the link operation failed
diff --git a/kernel/include/test/vmtest.h b/kernel/include/test/vmtest.h
new file mode 100644
index 0000000..a6ce59d
--- /dev/null
+++ b/kernel/include/test/vmtest.h
@@ -0,0 +1,2 @@
+# pragma once
+long vmtest_main(long argc, void *arg2); \ No newline at end of file
diff --git a/kernel/main/kmain.c b/kernel/main/kmain.c
index c4b64b4..77955d0 100644
--- a/kernel/main/kmain.c
+++ b/kernel/main/kmain.c
@@ -33,6 +33,7 @@
#include "fs/vnode.h"
#include "test/driverstest.h"
+#include "test/vmtest.h"
GDB_DEFINE_HOOK(boot)
@@ -171,6 +172,8 @@ static void *initproc_run(long arg1, void *arg2)
// dbg(DBG_PROC, "%s", "In main thread!\n");
#ifdef __VM__
+ vmtest_main(0, NULL);
+
char *const argv[] = {NULL};
char *const envp[] = {NULL};
kernel_execve("/sbin/init", argv, envp);
diff --git a/kernel/test/vmtest.c b/kernel/test/vmtest.c
index 9ffa4c6..b248eed 100644
--- a/kernel/test/vmtest.c
+++ b/kernel/test/vmtest.c
@@ -1,3 +1,4 @@
+
#include "errno.h"
#include "globals.h"
@@ -13,7 +14,137 @@
#include "mm/slab.h"
#include "mm/kmalloc.h"
#include "vm/vmmap.h"
+#include "mm/mman.h"
+#include "mm/page.h"
+#include "mm/mobj.h"
+#include "vm/shadow.h"
+
+typedef struct mobj_shadow
+{
+ // the mobj parts of this shadow object
+ mobj_t mobj;
+ // a reference to the mobj that is the data source for this shadow object
+ // This should be a reference to a shadow object of some ancestor process.
+ // This is used to traverse the shadow object chain.
+ mobj_t *shadowed;
+ // a reference to the mobj at the bottom of this shadow object's chain
+ // this should NEVER be a shadow object (i.e. it should have some type other
+ // than MOBJ_SHADOW)
+ mobj_t *bottom_mobj;
+} mobj_shadow_t;
+
+#define MOBJ_TO_SO(o) CONTAINER_OF(o, struct mobj_shadow, mobj)
+
+/*
+ Test to make sure that vmmap_write and vmmap_read are able to interact with
+ anonymous objects correctly
+*/
+long test_vmmap_write_read() {
+ vmmap_t *map = curproc->p_vmmap;
+ // First vmmap_map into the address space
+ // offset + using anonymous objects (in reality, most anon objects will not have an offset)
+ vmarea_t* new_vma = NULL;
+ size_t num_pages = 6;
+ size_t offset = 2 * 4096;
+
+ long ret = vmmap_map(map, NULL, 0, num_pages,
+ PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, offset, VMMAP_DIR_HILO, &new_vma);
+ test_assert(ret == 0, "vmmap failed!");
+ test_assert(new_vma != NULL, "vmarea was not returned properly");
+ test_assert(new_vma->vma_off == offset / PAGE_SIZE, "offset was not updated correctly");
+ test_assert(new_vma->vma_obj != NULL, "vma_obj pointer was not set");
+ test_assert(new_vma->vma_prot & PROT_READ, "check protections failed — PROT_READ not set");
+ test_assert(new_vma->vma_prot & PROT_WRITE, "check proections failed — PROT_WRITE not set");
+
+ size_t start_page = new_vma->vma_start;
+ size_t end_page = new_vma->vma_end;
+ test_assert(end_page - start_page == num_pages, "number of pages mapped is incorrect");
+
+ // write to some random location within the vmarea, offset of 20 bytes within the page
+ // fill in the rest of the page with a bunch of A's
+ size_t offset_in_page = 20;
+ char* buf = page_alloc_n(1);
+ memset(buf, 'A', 4096);
+ size_t amount_to_read_write = PAGE_SIZE - offset_in_page;
+
+ dbg(DBG_TEST, "write starting from an offset into the 2nd page and the rest of the bytes\n");
+ // in the second page
+ dbg(DBG_TEST, "[_______xxxxxxxxxxx]\n");
+ char* addr = (char*)PN_TO_ADDR(start_page + 2) + offset_in_page;
+ // write starting from an offset into the 2nd page
+ ret = vmmap_write(map, (void*)addr, buf, PAGE_SIZE - offset_in_page);
+ test_assert(ret == 0, "vmmap_write failed");
+
+ // make sure we read the same in vmmap_read
+ char* read_buf = page_alloc_n(1);
+ ret = vmmap_read(map, (void*)addr, read_buf, PAGE_SIZE - offset_in_page);
+ test_assert(0 == memcmp(read_buf, buf, amount_to_read_write), "data read is not equal");
+ page_free_n(buf, 1);
+ page_free_n(read_buf, 1);
+
+ dbg(DBG_TEST, "write multi-page content starting from an offset within the third page\n");
+ dbg(DBG_TEST, "[______xxxxx][xxxxxxxxxxx][xxx________]\n");
+ // so something like the above
+ // request more data
+ buf = page_alloc_n(2);
+ read_buf = page_alloc_n(2);
+ // offset is at the halfway mark in the page
+ offset_in_page = 4096 / 2;
+ addr = (char*)PN_TO_ADDR(start_page + 3) + offset_in_page;
+ amount_to_read_write = PAGE_SIZE * 2;
+
+ memset(buf, 'B', PAGE_SIZE * 2);
+ ret = vmmap_write(map, (void*)addr, buf, amount_to_read_write);
+ test_assert(ret == 0, "vmmap_write failed");
+
+ ret = vmmap_read(map, addr, read_buf, amount_to_read_write);
+ test_assert(ret == 0, "vmmap_read failed");
+ test_assert(0 == memcmp(read_buf, buf, amount_to_read_write), "data read is not equal");
+
+ page_free_n(buf, 2);
+ page_free_n(read_buf, 2);
+
+ dbg(DBG_TEST, "test a multi-vmarea mapping, this vmarea should be adjacent to the one previously made\n");
+ // each vmarea has 6 pages
+ // [ ][ ]
+ vmarea_t* prior_vma = NULL;
+ ret = vmmap_map(map, NULL, 0, num_pages,
+ PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, offset, VMMAP_DIR_HILO, &prior_vma);
+ test_assert(prior_vma != NULL, "vma was not initialized correctly");
+ test_assert(prior_vma->vma_end == new_vma->vma_start, "end was not calculated properly");
+ test_assert(prior_vma->vma_end - prior_vma->vma_start == num_pages, "number of pages was not calculated correctly");
+ // start reading from somewhere within the second to last page at an offset
+ // read almost 4 pages in total --> two from prev area, two from next vmarea
+ buf = page_alloc_n(4);
+ read_buf = page_alloc_n(4);
+ // we're really just cycling through the alphabet
+ memset(buf, 'C', PAGE_SIZE * 4);
+ // keep the same offset
+ offset_in_page = 4096 / 2;
+ // write all four pages of data
+ size_t amount_to_write = 4 * PAGE_SIZE; // magic numbers go brrrr
+ addr = (char*)PN_TO_ADDR(prior_vma->vma_end - 2) + offset_in_page;
+ ret = vmmap_write(map, (void*)addr, buf, amount_to_write);
+ test_assert(ret == 0, "vmmap_write failed");
+
+ size_t amount_to_read = offset_in_page + (3*PAGE_SIZE);
+ ret = vmmap_read(map, addr, read_buf, amount_to_read);
+ test_assert(ret == 0, "vmmap_read failed");
+ test_assert(0 == memcmp(read_buf, buf, amount_to_read), "data read is not equal");
+ page_free_n(buf, 4);
+ page_free_n(read_buf, 4);
+
+ // clean up for the test
+ vmmap_destroy(&map);
+ curproc->p_vmmap = vmmap_create();
+
+ return 0;
+}
+
+/*
+ Test to make sure that vmmap_
+*/
long test_vmmap() {
vmmap_t *map = curproc->p_vmmap;
@@ -21,7 +152,7 @@ long test_vmmap() {
KASSERT(vmmap_is_range_empty(map, ADDR_TO_PN(USER_MEM_LOW), ADDR_TO_PN(USER_MEM_HIGH - USER_MEM_LOW)));
// Go through the address space, make sure we find nothing
- for (size_t i = USER_MEM_LOW; i < ADDR_TO_PN(USER_MEM_HIGH); i += PAGE_SIZE) {
+ for (size_t i = ADDR_TO_PN(USER_MEM_LOW); i < ADDR_TO_PN(USER_MEM_HIGH); i += PAGE_SIZE) {
KASSERT(!vmmap_lookup(map, i));
}
@@ -31,23 +162,28 @@ long test_vmmap() {
size_t num_pages_per_vmarea = 16;
size_t prev_start = ADDR_TO_PN(USER_MEM_HIGH);
+ long ret;
for (size_t i = 0; i < num_vmareas; i++) {
- ssize_t start = vmmap_find_range(map, num_pages_per_vmarea, VMMAP_DIR_HILO);
+ size_t start = vmmap_find_range(map, num_pages_per_vmarea, VMMAP_DIR_HILO);
test_assert(start + num_pages_per_vmarea == prev_start, "Incorrect return value from vmmap_find_range");
- vmarea_t *vma = kmalloc(sizeof(vmarea_t));
- KASSERT(vma && "Unable to alloc the vmarea");
- memset(vma, 0, sizeof(vmarea_t));
-
- vma->vma_start = start;
- vma->vma_end = start + num_pages_per_vmarea;
- vmmap_insert(map, vma);
+ vmarea_t *vma = NULL;
+ // specifiying MAP_FIXED such that we for sure map it starting rom this page
+ // avoid creating shadow objects
+ ret = vmmap_map(map, NULL, start, num_pages_per_vmarea, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED, 0, VMMAP_DIR_HILO, &vma);
+ test_assert(ret == 0, "vmmap failed");
+ test_assert(vma != NULL, "vmarea was not created properly in vmmap");
+ test_assert(vma->vma_start == start, "start was not set correctly");
+ test_assert(vma->vma_end == start + num_pages_per_vmarea, "ending page was not set correctly");
+ test_assert(vma->vma_off == 0, "off was not set correctly");
+ test_assert(vmmap_lookup(map, vma->vma_start) != NULL, "could not find inserted vmarea");
prev_start = start;
}
// Now, our address space should look like:
// EMPTY EMPTY EMPTY [ ][ ][ ][ ][ ]
+ dbg(DBG_TEST, "address space looks like: EMPTY EMPTY EMPTY [___][___][___][___][___]\n");
// ^LP
// ^HP
// ^section_start
@@ -55,17 +191,201 @@ long test_vmmap() {
// LP --> the lowest possible userland page number
// section start --> HP - (num_vmareas * num_pages_per_vmarea)
+ // test to make sure that the vmmap is ordered from lowest to highest
+ prev_start = 0;
+ size_t prev_end = 0;
+ dbg(DBG_TEST, "testing the ordering of the vmmap\n");
list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink) {
- list_remove(&vma->vma_plink);
- kfree(vma);
+ if (!prev_start) {
+ prev_start = vma->vma_start;
+ prev_end = vma->vma_end;
+ } else {
+ test_assert(prev_start < vma->vma_start, "overlap with start");
+ test_assert(prev_end <= vma->vma_start, "overlap with end");
+ }
+ }
+
+ size_t LP = ADDR_TO_PN(USER_MEM_LOW);
+ size_t HP = ADDR_TO_PN(USER_MEM_HIGH);
+ size_t section_start = HP - (num_vmareas * num_pages_per_vmarea);
+
+ for (size_t page = HP - (num_vmareas * num_pages_per_vmarea); page < HP; page++) {
+ // Make sure that we can find everything
+ test_assert(vmmap_lookup(map, page) != NULL, "could not find page: %X\n", page);
+ }
+
+ // // Let's remove the middle entry entirely.
+ size_t remove_idx = num_vmareas / 2;
+ size_t remove_start = section_start + (remove_idx * num_pages_per_vmarea);
+ size_t remove_end = remove_start + num_pages_per_vmarea;
+
+ dbg(DBG_TEST, "removing middle entry\n");
+ dbg(DBG_TEST, "EMPTY EMPTY EMPTY [___][___]EMPTY[___][___]\n");
+ ret = vmmap_remove(map, remove_start, num_pages_per_vmarea);
+ test_assert(ret == 0, "vmmap remove failed");
+ // Let's make sure we can't find anything in the middle entry.
+ for (size_t i = 0; i < num_pages_per_vmarea; i++) {
+ // Within the current vmarea, we shouldn't find anything.
+ test_assert(vmmap_lookup(map, i + remove_start) == NULL, "Should not find a vmarea");
+
+ // If we go one vmarea beyond, we should find stuff!
+ size_t next_vmarea_start = remove_start + num_pages_per_vmarea;
+ test_assert(vmmap_lookup(map, i + next_vmarea_start) != NULL, "where is the vmarea?");
}
+
+ // Now, let's put our vmmap_remove implementation to the test.
+ // Currently, we have:
+ // EMPTY EMPTY EMPTY [ ][ ]EMPTY[ ][ ]
+
+ dbg(DBG_TEST, "splitting first entry\n");
+ dbg(DBG_TEST, "EMPTY EMPTY EMPTY [_]E[_][___]EMPTY[___][___]\n");
+ // Let's split the first element into pieces, from 4 to 12.
+ // This leaves us with:
+ // EMPTY EMPTY EMPTY [ ]E[ ][ ]EMPTY[ ][ ]
+ size_t split_start = section_start + (num_pages_per_vmarea / 4);
+ size_t split_end = section_start + 3*(num_pages_per_vmarea / 4);
+ test_assert(vmmap_remove(map, split_start, split_end - split_start) == 0, "vmmap_remove was not successful");
+
+ for (size_t i = section_start; i < HP; i++) {
+ if (i >= split_start && i < split_end) {
+ test_assert(vmmap_lookup(map, i) == NULL, "should not have found region for page: %X\n", i);
+ } else if (i >= remove_start && i < remove_end) {
+ test_assert(vmmap_lookup(map, i) == NULL, "should not have found region for page: %X\n", i);
+ } else {
+ test_assert(vmmap_lookup(map, i) != NULL, "should have found a section for page: %X\n", i);
+ }
+ }
+
+ // Now, we have:
+ // case 2, case 4, case 4, case 3
+ // EMPTY EMPTY EMPTY [ ]EMPTY[ ][ ]EMPTY[ ][ ]
+ // **********************
+ // 0 4 12 16
+ // Oof, time to calculate this:
+ dbg(DBG_TEST, "removing large area of vmmap\n");
+ dbg(DBG_TEST, "EMPTY EMPTY EMPTY [____]EMPTY[____][____]EMPTY[____][____]\n");
+ dbg(DBG_TEST, "________________________________**********************__\n");
+ size_t big_split_start = section_start + 7*(num_pages_per_vmarea / 8);
+ size_t big_split_end = HP - (num_pages_per_vmarea / 2);
+ test_assert(vmmap_remove(map, big_split_start, big_split_end - big_split_start) == 0, "vmmap_remove failed");
+
+ for (size_t i = section_start; i < HP; i++) {
+ vmarea_t *lookup_vma = vmmap_lookup(map, i);
+
+ // EMPTY EMPTY EMPTY [ ]EMPTY[ ][ ]EMPTY[ ][ ]
+ // *****************
+ // ^^^^^
+ if (i >= split_start && i < split_end) {
+ test_assert(lookup_vma == NULL, "why does vmarea exist");
+ continue;
+ }
+
+ // EMPTY EMPTY EMPTY [ ]EMPTY[ ][ ]EMPTY[ ][ ]
+ // ****************
+ // ^^^^^^^^^^^^^^^^^
+ if ((i >= big_split_start) && i < big_split_end) {
+ test_assert(lookup_vma == NULL, "why does vmarea exist");
+ continue;
+ }
+ test_assert(lookup_vma != NULL, "why does vmarea not exist");
+ }
+
+ // clean up test and make sure everything looks good
+ vmmap_destroy(&curproc->p_vmmap);
+ curproc->p_vmmap = vmmap_create();
return 0;
}
+long test_basic_shadow_collapse() {
+ vmmap_t *map = curproc->p_vmmap;
+ size_t num_pages = 6;
+ size_t offset = 0;
+ vmarea_t* new_vma = NULL;
+
+ long ret = vmmap_map(map, NULL, 0, num_pages,
+ PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, offset, VMMAP_DIR_HILO, &new_vma);
+ KASSERT(ret == 0);
+ KASSERT(new_vma != NULL);
+ KASSERT(new_vma->vma_obj != NULL);
+ // after this with shadow object this will look like:
+ // shadow_obj -> anon_bottom_mobj
+ test_assert(new_vma->vma_obj->mo_refcount == 1, "refcount of new object is not correct");
+ mobj_lock(new_vma->vma_obj);
+ mobj_t* new_shadow = shadow_create(new_vma->vma_obj);
+ mobj_unlock(new_vma->vma_obj);
+ // shadow_obj --> shadow_obj -> anon_bottom_mobj
+ test_assert(new_vma->vma_obj->mo_refcount == 2, "refcount should have incremented by 2");
+ test_assert(new_shadow->mo_refcount == 1, "refcount of new shadow object was not 1");
+ mobj_put(&new_vma->vma_obj);
+ // update the area to now point to the shadow object, manually doing vmmap_clone here
+ new_vma->vma_obj = new_shadow;
+ KASSERT(MOBJ_TO_SO(new_shadow)->shadowed != NULL);
+ KASSERT(MOBJ_TO_SO(new_shadow)->bottom_mobj != NULL);
+ // store a reference to bottom_mobj
+ mobj_t* bottom_mobj = MOBJ_TO_SO(new_shadow)->bottom_mobj;
+ shadow_collapse(new_shadow);
+ test_assert(MOBJ_TO_SO(new_shadow)->shadowed == bottom_mobj, "shadowed was not updated correctly");
+ test_assert(MOBJ_TO_SO(new_shadow)->bottom_mobj == bottom_mobj, "bottom mobj changed");
+ mobj_unlock(new_shadow);
+ vmmap_destroy(&map);
+ curproc->p_vmmap = vmmap_create();
+
+ return 0;
+}
+
+long test_vmmap_is_range_empty() {
+ vmmap_t *map = curproc->p_vmmap;
+
+ // Create 2 vmareas, with 8 pages per each
+ size_t num_vmareas = 3;
+ size_t num_pages_per_vmarea = 16;
+
+ size_t prev_start = ADDR_TO_PN(USER_MEM_HIGH);
+ for (size_t i = 0; i < num_vmareas; i++) {
+ ssize_t start = vmmap_find_range(map, num_pages_per_vmarea, VMMAP_DIR_HILO);
+ test_assert(start + num_pages_per_vmarea == prev_start, "Incorrect return value from vmmap_find_range");
+
+ vmarea_t *vma = kmalloc(sizeof(vmarea_t));
+ KASSERT(vma && "Unable to alloc the vmarea");
+ memset(vma, 0, sizeof(vmarea_t));
+
+ vma->vma_start = start;
+ vma->vma_end = start + num_pages_per_vmarea;
+ vmmap_insert(map, vma);
+
+ prev_start = start;
+ }
+
+ size_t LP = ADDR_TO_PN(USER_MEM_LOW);
+ size_t HP = ADDR_TO_PN(USER_MEM_HIGH);
+ size_t section_start = HP - (num_vmareas * num_pages_per_vmarea);
+
+ dbg(DBG_TEST, "EMPTY EMPTY EMPTY EMPTY EMPTY [___][___[___]\n");
+ // test overlap different cases:
+ test_assert(vmmap_is_range_empty(map,
+ section_start - (num_pages_per_vmarea / 2), num_pages_per_vmarea / 2),
+ "does not catch no overlap case correctly");
+ test_assert(!vmmap_is_range_empty(map, section_start - 2, 4), "two pages should've overlapped");
+ test_assert(!vmmap_is_range_empty(map, section_start, 8), "overlap with half of the pages in vmarea");
+ test_assert(!vmmap_is_range_empty(map, section_start + 8, num_pages_per_vmarea), "overlap in both vmareas");
+ test_assert(!vmmap_is_range_empty(map, HP - 8, 7), "overlap with second vmarea");
+ test_assert(!vmmap_is_range_empty(map, HP - 2, 1), "overlap with end of second vmarea");
+ test_assert(!vmmap_is_range_empty(map, section_start - 2, 4 + num_pages_per_vmarea), "overlap with both");
+
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink) {
+ list_remove(&vma->vma_plink);
+ kfree(vma);
+ }
+ return 0;
+}
+
long vmtest_main(long arg1, void* arg2) {
test_init();
test_vmmap();
+ test_vmmap_is_range_empty();
+ test_vmmap_write_read();
+ test_basic_shadow_collapse();
// Write your own tests here!
diff --git a/kernel/util/debug.c b/kernel/util/debug.c
index 849b60a..0b2faab 100644
--- a/kernel/util/debug.c
+++ b/kernel/util/debug.c
@@ -19,7 +19,7 @@
* always be the first thing in this variable. Note that this setting can be
* changed at runtime by modifying the dbg_modes global variable.
*/
-#define INIT_DBG_MODES "-all"
+#define INIT_DBG_MODES "-all,test,testpass,testfail"
/* Below is a truly terrible poll-driven serial driver that we use for debugging
* purposes - it outputs to COM1, but
* this can be easily changed. It does not use interrupts, and cannot read input