aboutsummaryrefslogtreecommitdiff
path: root/kernel/vm
diff options
context:
space:
mode:
authornthnluu <nate1299@me.com>2024-01-28 21:20:27 -0500
committernthnluu <nate1299@me.com>2024-01-28 21:20:27 -0500
commitc63f340d90800895f007de64b7d2d14624263331 (patch)
tree2c0849fa597dd6da831c8707b6f2603403778d7b /kernel/vm
Created student weenix repository
Diffstat (limited to 'kernel/vm')
-rw-r--r--kernel/vm/anon.c65
-rw-r--r--kernel/vm/brk.c58
-rw-r--r--kernel/vm/mmap.c83
-rw-r--r--kernel/vm/pagefault.c53
-rw-r--r--kernel/vm/shadow.c173
-rw-r--r--kernel/vm/vmmap.c326
-rw-r--r--kernel/vm/vmmap.gdb24
7 files changed, 782 insertions, 0 deletions
diff --git a/kernel/vm/anon.c b/kernel/vm/anon.c
new file mode 100644
index 0000000..a998d70
--- /dev/null
+++ b/kernel/vm/anon.c
@@ -0,0 +1,65 @@
+#include "mm/mobj.h"
+#include "mm/page.h"
+#include "mm/pframe.h"
+#include "mm/slab.h"
+
+#include "util/debug.h"
+#include "util/string.h"
+
+/* for debugging/verification purposes */
+int anon_count = 0;
+
+static slab_allocator_t *anon_allocator;
+
+static long anon_fill_pframe(mobj_t *o, pframe_t *pf);
+
+static long anon_flush_pframe(mobj_t *o, pframe_t *pf);
+
+static void anon_destructor(mobj_t *o);
+
+static mobj_ops_t anon_mobj_ops = {.get_pframe = NULL,
+ .fill_pframe = anon_fill_pframe,
+ .flush_pframe = anon_flush_pframe,
+ .destructor = anon_destructor};
+
+/*
+ * Initialize anon_allocator using the slab allocator.
+ */
+void anon_init()
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
+
+/*
+ * The mobj should be locked upon successful return. Use mobj_init and
+ * mobj_lock.
+ */
+mobj_t *anon_create()
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return NULL;
+}
+
+/*
+ * This function is not complicated -- think about what the pframe should look
+ * like for an anonymous object
+ */
+static long anon_fill_pframe(mobj_t *o, pframe_t *pf)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return 0;
+}
+
+static long anon_flush_pframe(mobj_t *o, pframe_t *pf) { return 0; }
+
+/*
+ * Release all resources associated with an anonymous object.
+ *
+ * Hints:
+ * 1) Call mobj_default_destructor() to free pframes
+ * 2) Free the mobj
+ */
+static void anon_destructor(mobj_t *o)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
diff --git a/kernel/vm/brk.c b/kernel/vm/brk.c
new file mode 100644
index 0000000..5169a42
--- /dev/null
+++ b/kernel/vm/brk.c
@@ -0,0 +1,58 @@
+#include "errno.h"
+#include "globals.h"
+#include "mm/mm.h"
+#include "util/debug.h"
+
+#include "mm/mman.h"
+
+/*
+ * This function implements the brk(2) system call.
+ *
+ * This routine manages the calling process's "break" -- the ending address
+ * of the process's dynamic region (heap)
+ *
+ * Some important details on the range of values 'p_brk' can take:
+ * 1) 'p_brk' should not be set to a value lower than 'p_start_brk', since this
+ * could overrite data in another memory region. But, 'p_brk' can be equal to
+ * 'p_start_brk', which would mean that there is no heap yet/is empty.
+ * 2) Growth of the 'p_brk' cannot overlap with/expand into an existing
+ * mapping. Use vmmap_is_range_empty() to help with this.
+ * 3) 'p_brk' cannot go beyond the region of the address space allocated for use by
+ * userland (USER_MEM_HIGH)
+ *
+ * Before setting 'p_brk' to 'addr', you must account for all scenarios by comparing
+ * the page numbers of addr, 'p_brk' and 'p_start_brk' as the vmarea that represents the heap
+ * has page granularity. Think about the following sub-cases (note that the heap
+ * should always be represented by at most one vmarea):
+ * 1) The heap needs to be created. What permissions and attributes does a process
+ * expect the heap to have?
+ * 2) The heap already exists, so you need to modify its end appropriately.
+ * 3) The heap needs to shrink.
+ *
+ * Beware of page alignment!:
+ * 1) The starting break is not necessarily page aligned. Since the loader sets
+ * 'p_start_brk' to be the end of the bss section, 'p_start_brk' should always be
+ * aligned up to start the dynamic region at the first page after bss_end.
+ * 2) vmareas only have page granularity, so you will need to take this
+ * into account when deciding how to set the mappings if p_brk or p_start_brk
+ * is not page aligned. The caller of do_brk() would be very disappointed if
+ * you give them less than they asked for!
+ *
+ * Some additional details:
+ * 1) You are guaranteed that the process data/bss region is non-empty.
+ * That is, if the starting brk is not page-aligned, its page has
+ * read/write permissions.
+ * 2) If 'addr' is NULL, you should return the current break. We use this to
+ * implement sbrk(0) without writing a separate syscall. Look in
+ * user/libc/syscall.c if you're curious.
+ * 3) Return 0 on success, -errno on failure. The 'ret' argument should be used to
+ * return the updated 'p_brk' on success.
+ *
+ * Error cases do_brk is responsible for generating:
+ * - ENOMEM: attempting to set p_brk beyond its valid range
+ */
+long do_brk(void *addr, void **ret)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return 0;
+}
diff --git a/kernel/vm/mmap.c b/kernel/vm/mmap.c
new file mode 100644
index 0000000..7eb2d89
--- /dev/null
+++ b/kernel/vm/mmap.c
@@ -0,0 +1,83 @@
+#include "vm/mmap.h"
+#include "errno.h"
+#include "fs/file.h"
+#include "fs/vfs.h"
+#include "fs/vnode.h"
+#include "globals.h"
+#include "mm/mm.h"
+#include "mm/mman.h"
+#include "mm/tlb.h"
+#include "util/debug.h"
+
+/*
+ * This function implements the mmap(2) syscall: Add a mapping to the current
+ * process's address space. Supports the following flags: MAP_SHARED,
+ * MAP_PRIVATE, MAP_FIXED, and MAP_ANON.
+ *
+ * ret - If provided, on success, *ret must point to the start of the mapped area
+ *
+ * Return 0 on success, or:
+ * - EACCES:
+ * - a file mapping was requested, but fd is not open for reading.
+ * - MAP_SHARED was requested and PROT_WRITE is set, but fd is
+ * not open in read/write (O_RDWR) mode.
+ * - PROT_WRITE is set, but the file has FMODE_APPEND specified.
+ * - EBADF:
+ * - fd is not a valid file descriptor and MAP_ANON was
+ * not set
+ * - EINVAL:
+ * - addr is not page aligned and MAP_FIXED is specified
+ * - addr is out of range of the user address space and MAP_FIXED is specified
+ * - off is not page aligned
+ * - len is <= 0 or off < 0
+ * - flags do not contain MAP_PRIVATE or MAP_SHARED
+ * - ENODEV:
+ * - The underlying filesystem of the specified file does not
+ * support memory mapping or in other words, the file's vnode's mmap
+ * operation doesn't exist
+ * - Propagate errors from vmmap_map()
+ *
+ * See the errors section of the mmap(2) man page for more details
+ *
+ * Hints:
+ * 1) A lot of error checking.
+ * 2) Call vmmap_map() to create the mapping.
+ * a) Use VMMAP_DIR_HILO as default, which will make other stencil code in
+ * Weenix happy.
+ * 3) Call tlb_flush_range() on the newly-mapped region. This is because the
+ * newly-mapped region could have been used by someone else, and you don't
+ * want to get stale mappings.
+ * 4) Don't forget to set ret if it was provided.
+ *
+ * If you are mapping less than a page, make sure that you are still allocating
+ * a full page.
+ */
+long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off,
+ void **ret)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return -1;
+}
+
+/*
+ * This function implements the munmap(2) syscall.
+ *
+ * Return 0 on success, or:
+ * - EINVAL:
+ * - addr is not aligned on a page boundary
+ * - the region to unmap is out of range of the user address space
+ * - len is 0
+ * - Propagate errors from vmmap_remove()
+ *
+ * See the errors section of the munmap(2) man page for more details
+ *
+ * Hints:
+ * - Similar to do_mmap():
+ * 1) Perform error checking.
+ * 2) Call vmmap_remove().
+ */
+long do_munmap(void *addr, size_t len)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return -1;
+} \ No newline at end of file
diff --git a/kernel/vm/pagefault.c b/kernel/vm/pagefault.c
new file mode 100644
index 0000000..11868e5
--- /dev/null
+++ b/kernel/vm/pagefault.c
@@ -0,0 +1,53 @@
+#include "vm/pagefault.h"
+#include "errno.h"
+#include "globals.h"
+#include "mm/mm.h"
+#include "mm/mman.h"
+#include "mm/mobj.h"
+#include "mm/pframe.h"
+#include "mm/tlb.h"
+#include "types.h"
+#include "util/debug.h"
+
+/*
+ * Respond to a user mode pagefault by setting up the desired page.
+ *
+ * vaddr - The virtual address that the user pagefaulted on
+ * cause - A combination of FAULT_ flags indicating the type of operation that
+ * caused the fault (see pagefault.h)
+ *
+ * Implementation details:
+ * 1) Find the vmarea that contains vaddr, if it exists.
+ * 2) Check the vmarea's protections (see the vmarea_t struct) against the 'cause' of
+ * the pagefault. For example, error out if the fault has cause write and we don't
+ * have write permission in the area. Keep in mind:
+ * a) You can assume that FAULT_USER is always specified.
+ * b) If neither FAULT_WRITE nor FAULT_EXEC is specified, you may assume the
+ * fault was due to an attempted read.
+ * 3) Obtain the corresponding pframe from the vmarea's mobj. Be careful about
+ * locking and error checking!
+ * 4) Finally, set up a call to pt_map to insert a new mapping into the
+ * appropriate pagetable:
+ * a) Use pt_virt_to_phys() to obtain the physical address of the actual
+ * data.
+ * b) You should not assume that vaddr is page-aligned, but you should
+ * provide a page-aligned address to the mapping.
+ * c) For pdflags, use PT_PRESENT | PT_WRITE | PT_USER.
+ * d) For ptflags, start with PT_PRESENT | PT_USER. Also supply PT_WRITE if
+ * the user can and wants to write to the page.
+ * 5) Flush the TLB.
+ *
+ * Tips:
+ * 1) This gets called by _pt_fault_handler() in mm/pagetable.c, which
+ * importantly checks that the fault did not occur in kernel mode. Think
+ * about why a kernel mode page fault would be bad in Weenix. Explore
+ * _pt_fault_handler() to get a sense of what's going on.
+ * 2) If you run into any errors, you should segfault by calling
+ * do_exit(EFAULT).
+ */
+void handle_pagefault(uintptr_t vaddr, uintptr_t cause)
+{
+ dbg(DBG_VM, "vaddr = 0x%p (0x%p), cause = %lu\n", (void *)vaddr,
+ PAGE_ALIGN_DOWN(vaddr), cause);
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
diff --git a/kernel/vm/shadow.c b/kernel/vm/shadow.c
new file mode 100644
index 0000000..3b6f783
--- /dev/null
+++ b/kernel/vm/shadow.c
@@ -0,0 +1,173 @@
+#include "vm/shadow.h"
+#include "mm/page.h"
+#include "mm/pframe.h"
+#include "mm/slab.h"
+#include "util/debug.h"
+#include "util/string.h"
+
+#define SHADOW_SINGLETON_THRESHOLD 5
+
+typedef struct mobj_shadow
+{
+ // the mobj parts of this shadow object
+ mobj_t mobj;
+ // a reference to the mobj that is the data source for this shadow object
+ // This should be a reference to a shadow object of some ancestor process.
+ // This is used to traverse the shadow object chain.
+ mobj_t *shadowed;
+ // a reference to the mobj at the bottom of this shadow object's chain
+ // this should NEVER be a shadow object (i.e. it should have some type other
+ // than MOBJ_SHADOW)
+ mobj_t *bottom_mobj;
+} mobj_shadow_t;
+
+#define MOBJ_TO_SO(o) CONTAINER_OF(o, mobj_shadow_t, mobj)
+
+static slab_allocator_t *shadow_allocator;
+
+static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
+ pframe_t **pfp);
+static long shadow_fill_pframe(mobj_t *o, pframe_t *pf);
+static long shadow_flush_pframe(mobj_t *o, pframe_t *pf);
+static void shadow_destructor(mobj_t *o);
+
+static mobj_ops_t shadow_mobj_ops = {.get_pframe = shadow_get_pframe,
+ .fill_pframe = shadow_fill_pframe,
+ .flush_pframe = shadow_flush_pframe,
+ .destructor = shadow_destructor};
+
+/*
+ * Initialize shadow_allocator using the slab allocator.
+ */
+void shadow_init()
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
+
+/*
+ * Create a shadow object that shadows the given mobj.
+ *
+ * Return a new, LOCKED shadow object on success, or NULL upon failure.
+ *
+ * Hints:
+ * 1) Create and initialize a mobj_shadow_t based on the given mobj.
+ * 2) Set up the bottom object of the shadow chain, which could have two cases:
+ * a) Either shadowed is a shadow object, and you can use its bottom_mobj
+ * b) Or shadowed is not a shadow object, in which case it is the bottom
+ * object of this chain.
+ *
+ * Make sure to manage the refcounts correctly.
+ */
+mobj_t *shadow_create(mobj_t *shadowed)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return NULL;
+}
+
+/*
+ * Given a shadow object o, collapse its shadow chain as far as you can.
+ *
+ * Hints:
+ * 1) You can only collapse if the shadowed object is a shadow object.
+ * 2) When collapsing, you must manually migrate pframes from o's shadowed
+ * object to o, checking to see if a copy doesn't already exist in o.
+ * 3) Be careful with refcounting! In particular, when you put away o's
+ * shadowed object, its refcount should drop to 0, initiating its
+ * destruction (shadow_destructor).
+ * 4) As a reminder, any refcounting done in shadow_collapse() must play nice
+ * with any refcounting done in shadow_destructor().
+ * 5) Pay attention to mobj and pframe locking.
+ */
+void shadow_collapse(mobj_t *o)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
+
+/*
+ * Obtain the desired pframe from the given mobj, traversing its shadow chain if
+ * necessary. This is where copy-on-write logic happens!
+ *
+ * Arguments:
+ * o - The object from which to obtain a pframe
+ * pagenum - Number of the desired page relative to the object
+ * forwrite - Set if the caller wants to write to the pframe's data, clear if
+ * only reading
+ * pfp - Upon success, pfp should point to the desired pframe.
+ *
+ * Return 0 on success, or:
+ * - Propagate errors from mobj_default_get_pframe() and mobj_get_pframe()
+ *
+ * Hints:
+ * 1) If forwrite is set, use mobj_default_get_pframe().
+ * 2) If forwrite is clear, check if o already contains the desired frame.
+ * a) If not, iterate through the shadow chain to find the nearest shadow
+ * mobj that has the frame. Do not recurse! If the shadow chain is long,
+ * you will cause a kernel buffer overflow (e.g. from forkbomb).
+ * b) If no shadow objects have the page, call mobj_get_pframe() to get the
+ * page from the bottom object and return what it returns.
+ *
+ * Pay attention to pframe locking.
+ */
+static long shadow_get_pframe(mobj_t *o, size_t pagenum, long forwrite,
+ pframe_t **pfp)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return 0;
+}
+
+/*
+ * Use the given mobj's shadow chain to fill the given pframe.
+ *
+ * Return 0 on success, or:
+ * - Propagate errors from mobj_get_pframe()
+ *
+ * Hints:
+ * 1) Explore mobj_default_get_pframe(), which calls mobj_create_pframe(), to
+ * understand what state pf is in when this function is called, and how you
+ * can use it.
+ * 2) As you can see above, shadow_get_pframe would call
+ * mobj_default_get_pframe (when the forwrite is set), which would
+ * create and then fill the pframe (shadow_fill_pframe is called).
+ * 3) Traverse the shadow chain for a copy of the frame, starting at the given
+ * mobj's shadowed object. You can use mobj_find_pframe to look for the
+ * page frame. pay attention to locking/unlocking, and be sure not to
+ * recurse when traversing.
+ * 4) If none of the shadow objects have a copy of the frame, use
+ * mobj_get_pframe on the bottom object to get it.
+ * 5) After obtaining the desired frame, simply copy its contents into pf.
+ */
+static long shadow_fill_pframe(mobj_t *o, pframe_t *pf)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return -1;
+}
+
+/*
+ * Flush a shadow object's pframe to disk.
+ *
+ * Return 0 on success.
+ *
+ * Hint:
+ * - Are shadow objects backed to disk? Do you actually need to do anything
+ * here?
+ */
+static long shadow_flush_pframe(mobj_t *o, pframe_t *pf)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return -1;
+}
+
+/*
+ * Clean up all resources associated with mobj o.
+ *
+ * Hints:
+ * - Check out mobj_put() to understand how this function gets called.
+ *
+ * 1) Call mobj_default_destructor() to flush o's pframes.
+ * 2) Put the shadow and bottom_mobj members of the shadow object.
+ * 3) Free the mobj_shadow_t.
+ */
+static void shadow_destructor(mobj_t *o)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
diff --git a/kernel/vm/vmmap.c b/kernel/vm/vmmap.c
new file mode 100644
index 0000000..f683ca0
--- /dev/null
+++ b/kernel/vm/vmmap.c
@@ -0,0 +1,326 @@
+#include "globals.h"
+#include "kernel.h"
+#include <errno.h>
+
+#include "vm/anon.h"
+#include "vm/shadow.h"
+
+#include "util/debug.h"
+#include "util/printf.h"
+#include "util/string.h"
+
+#include "fs/file.h"
+#include "fs/vfs_syscall.h"
+#include "fs/vnode.h"
+
+#include "mm/mm.h"
+#include "mm/mman.h"
+#include "mm/slab.h"
+
+static slab_allocator_t *vmmap_allocator;
+static slab_allocator_t *vmarea_allocator;
+
+void vmmap_init(void)
+{
+ vmmap_allocator = slab_allocator_create("vmmap", sizeof(vmmap_t));
+ vmarea_allocator = slab_allocator_create("vmarea", sizeof(vmarea_t));
+ KASSERT(vmmap_allocator && vmarea_allocator);
+}
+
+/*
+ * Allocate and initialize a new vmarea using vmarea_allocator.
+ */
+vmarea_t *vmarea_alloc(void)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return NULL;
+}
+
+/*
+ * Free the vmarea by removing it from any lists it may be on, putting its
+ * vma_obj if it exists, and freeing the vmarea_t.
+ */
+void vmarea_free(vmarea_t *vma)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
+
+/*
+ * Create and initialize a new vmmap. Initialize all the fields of vmmap_t.
+ */
+vmmap_t *vmmap_create(void)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return NULL;
+}
+
+/*
+ * Destroy the map pointed to by mapp and set *mapp = NULL.
+ * Remember to free each vma in the maps list.
+ */
+void vmmap_destroy(vmmap_t **mapp)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
+
+/*
+ * Add a vmarea to an address space. Assumes (i.e. asserts to some extent) the
+ * vmarea is valid. Iterate through the list of vmareas, and add it
+ * accordingly.
+ */
+void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+}
+
+/*
+ * Find a contiguous range of free virtual pages of length npages in the given
+ * address space. Returns starting page number for the range, without altering the map.
+ * Return -1 if no such range exists.
+ *
+ * Your algorithm should be first fit.
+ * You should assert that dir is VMMAP_DIR_LOHI OR VMMAP_DIR_HILO.
+ * If dir is:
+ * - VMMAP_DIR_HILO: find a gap as high in the address space as possible,
+ * starting from USER_MEM_HIGH.
+ * - VMMAP_DIR_LOHI: find a gap as low in the address space as possible,
+ * starting from USER_MEM_LOW.
+ *
+ * Make sure you are converting between page numbers and addresses correctly!
+ */
+ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return -1;
+}
+
+/*
+ * Return the vm_area that vfn (a page number) lies in. Scan the address space looking
+ * for a vma whose range covers vfn. If the page is unmapped, return NULL.
+ */
+vmarea_t *vmmap_lookup(vmmap_t *map, size_t vfn)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return NULL;
+}
+
+/*
+ * For each vmarea in the map, if it is a shadow object, call shadow_collapse.
+ */
+void vmmap_collapse(vmmap_t *map)
+{
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ if (vma->vma_obj->mo_type == MOBJ_SHADOW)
+ {
+ mobj_lock(vma->vma_obj);
+ shadow_collapse(vma->vma_obj);
+ mobj_unlock(vma->vma_obj);
+ }
+ }
+}
+
+/*
+ * This is where the magic of fork's copy-on-write gets set up.
+ *
+ * Upon successful return, the new vmmap should be a clone of map with all
+ * shadow objects properly set up.
+ *
+ * For each vmarea, clone it's members.
+ * 1) vmarea is share-mapped, you don't need to do anything special.
+ * 2) vmarea is not share-mapped, time for shadow objects:
+ * a) Create two shadow objects, one for map and one for the new vmmap you
+ * are constructing, both of which shadow the current vma_obj the vmarea
+ * being cloned.
+ * b) After creating the shadow objects, put the original vma_obj
+ * c) and insert the shadow objects into their respective vma's.
+ *
+ * Be sure to clean up in any error case, manage the reference counts correctly,
+ * and to lock/unlock properly.
+ */
+vmmap_t *vmmap_clone(vmmap_t *map)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return NULL;
+}
+
+/*
+ *
+ * Insert a mapping into the map starting at lopage for npages pages.
+ *
+ * file - If provided, the vnode of the file to be mapped in
+ * lopage - If provided, the desired start range of the mapping
+ * prot - See mman.h for possible values
+ * flags - See do_mmap()'s comments for possible values
+ * off - Offset in the file to start mapping at, in bytes
+ * dir - VMMAP_DIR_LOHI or VMMAP_DIR_HILO
+ * new_vma - If provided, on success, must point to the new vmarea_t
+ *
+ * Return 0 on success, or:
+ * - ENOMEM: On vmarea_alloc, anon_create, shadow_create or
+ * vmmap_find_range failure
+ * - Propagate errors from file->vn_ops->mmap and vmmap_remove
+ *
+ * Hints:
+ * - You can assume/assert that all input is valid. It may help to write
+ * this function and do_mmap() somewhat in tandem.
+ * - If file is NULL, create an anon object.
+ * - If file is non-NULL, use the vnode's mmap operation to get the mobj.
+ * Do not assume it is file->vn_obj (mostly relevant for special devices).
+ * - If lopage is 0, use vmmap_find_range() to get a valid range
+ * - If lopage is not 0, the direction flag (dir) is ignored.
+ * - If lopage is nonzero and MAP_FIXED is specified and
+ * the given range overlaps with any preexisting mappings,
+ * remove the preexisting mappings.
+ * - If MAP_PRIVATE is specified, set up a shadow object. Be careful with
+ * refcounts!
+ * - Be careful: off is in bytes (albeit should be page-aligned), but
+ * vma->vma_off is in pages.
+ * - Be careful with the order of operations. Hold off on any irreversible
+ * work until there is no more chance of failure.
+ */
+long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages,
+ int prot, int flags, off_t off, int dir, vmarea_t **new_vma)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return -1;
+}
+
+/*
+ * Iterate over the mapping's vmm_list and make sure that the specified range
+ * is completely empty. You will have to handle the following cases:
+ *
+ * Key: [ ] = existing vmarea_t
+ * ******* = region to be unmapped
+ *
+ * Case 1: [ ******* ]
+ * The region to be unmapped lies completely inside the vmarea. We need to
+ * split the old vmarea into two vmareas. Be sure to increment the refcount of
+ * the object associated with the vmarea.
+ *
+ * Case 2: [ *******]**
+ * The region overlaps the end of the vmarea. Just shorten the length of
+ * the mapping.
+ *
+ * Case 3: *[***** ]
+ * The region overlaps the beginning of the vmarea. Move the beginning of
+ * the mapping (remember to update vma_off), and shorten its length.
+ *
+ * Case 4: *[*************]**
+ * The region completely contains the vmarea. Remove the vmarea from the
+ * list.
+ *
+ * Return 0 on success, or:
+ * - ENOMEM: Failed to allocate a new vmarea when splitting a vmarea (case 1).
+ *
+ * Hints:
+ * - Whenever you shorten/remove any mappings, be sure to call pt_unmap_range()
+ * tlb_flush_range() to clean your pagetables and TLB.
+ */
+long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return -1;
+}
+
+/*
+ * Returns 1 if the given address space has no mappings for the given range,
+ * 0 otherwise.
+ */
+long vmmap_is_range_empty(vmmap_t *map, size_t startvfn, size_t npages)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return 0;
+}
+
+/*
+ * Read into 'buf' from the virtual address space of 'map'. Start at 'vaddr'
+ * for size 'count'. 'vaddr' is not necessarily page-aligned. count is in bytes.
+ *
+ * Hints:
+ * 1) Find the vmareas that correspond to the region to read from.
+ * 2) Find the pframes within those vmareas corresponding to the virtual
+ * addresses you want to read.
+ * 3) Read from those page frames and copy it into `buf`.
+ * 4) You will not need to check the permissisons of the area.
+ * 5) You may assume/assert that all areas exist.
+ *
+ * Return 0 on success, -errno on error (propagate from the routines called).
+ * This routine will be used within copy_from_user().
+ */
+long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return 0;
+}
+
+/*
+ * Write from 'buf' into the virtual address space of 'map' starting at
+ * 'vaddr' for size 'count'.
+ *
+ * Hints:
+ * 1) Find the vmareas to write to.
+ * 2) Find the correct pframes within those areas that contain the virtual addresses
+ * that you want to write data to.
+ * 3) Write to the pframes, copying data from buf.
+ * 4) You do not need check permissions of the areas you use.
+ * 5) Assume/assert that all areas exist.
+ * 6) Remember to dirty the pages that you write to.
+ *
+ * Returns 0 on success, -errno on error (propagate from the routines called).
+ * This routine will be used within copy_to_user().
+ */
+long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
+{
+ NOT_YET_IMPLEMENTED("VM: ***none***");
+ return 0;
+}
+
+size_t vmmap_mapping_info(const void *vmmap, char *buf, size_t osize)
+{
+ return vmmap_mapping_info_helper(vmmap, buf, osize, "");
+}
+
+size_t vmmap_mapping_info_helper(const void *vmmap, char *buf, size_t osize,
+ char *prompt)
+{
+ KASSERT(0 < osize);
+ KASSERT(NULL != buf);
+ KASSERT(NULL != vmmap);
+
+ vmmap_t *map = (vmmap_t *)vmmap;
+ ssize_t size = (ssize_t)osize;
+
+ int len =
+ snprintf(buf, (size_t)size, "%s%37s %5s %7s %18s %11s %23s\n", prompt,
+ "VADDR RANGE", "PROT", "FLAGS", "MOBJ", "OFFSET", "VFN RANGE");
+
+ list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
+ {
+ size -= len;
+ buf += len;
+ if (0 >= size)
+ {
+ goto end;
+ }
+
+ len =
+ snprintf(buf, (size_t)size,
+ "%s0x%p-0x%p %c%c%c %7s 0x%p %#.9lx %#.9lx-%#.9lx\n",
+ prompt, (void *)(vma->vma_start << PAGE_SHIFT),
+ (void *)(vma->vma_end << PAGE_SHIFT),
+ (vma->vma_prot & PROT_READ ? 'r' : '-'),
+ (vma->vma_prot & PROT_WRITE ? 'w' : '-'),
+ (vma->vma_prot & PROT_EXEC ? 'x' : '-'),
+ (vma->vma_flags & MAP_SHARED ? " SHARED" : "PRIVATE"),
+ vma->vma_obj, vma->vma_off, vma->vma_start, vma->vma_end);
+ }
+
+end:
+ if (size <= 0)
+ {
+ size = osize;
+ buf[osize - 1] = '\0';
+ }
+ return osize - size;
+}
diff --git a/kernel/vm/vmmap.gdb b/kernel/vm/vmmap.gdb
new file mode 100644
index 0000000..528dd1d
--- /dev/null
+++ b/kernel/vm/vmmap.gdb
@@ -0,0 +1,24 @@
+define vmmap
+ if $argc > 0
+ set $proc = proc_lookup($arg0)
+ if $proc != NULL
+ printf "Process %i (%s):\n", $proc->p_pid, $proc->p_name
+ set $vmmap = $proc->p_vmmap
+ else
+ printf "No process with PID %i exists\n", $arg0
+ set $vmmap = NULL
+ end
+ else
+ printf "Current process %i (%s):\n", curproc->p_pid, curproc->p_name
+ set $vmmap = curproc->p_vmmap
+ end
+
+ if $vmmap != NULL
+ kinfo vmmap_mapping_info $vmmap
+ end
+end
+document pagetable
+Without arguments displays current mappings. Takes an optional integer
+argument to specify the PID of a process whose mappings should be
+printed instead.
+end