diff options
author | sotech117 <26747948+sotech117@users.noreply.github.com> | 2024-04-25 03:45:32 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-25 03:45:32 -0400 |
commit | c9f4da6024393310e254a2cba679b1f1cc67607a (patch) | |
tree | 6f7a61225676b9578c0e0c46cdd54a10bad9c9e3 | |
parent | a17999858ddaada83071d953d920e3c2a2b390c8 (diff) | |
parent | fb4b8fbec69f50c7386017896f0add4d46314a1d (diff) |
Merge branch 'brown-cs1690:master' into master
-rw-r--r-- | init.gdb | 1 | ||||
-rw-r--r-- | kernel/drivers/blockdev.c | 2 | ||||
-rw-r--r-- | kernel/fs/s5fs/s5fs.c | 67 | ||||
-rw-r--r-- | kernel/fs/s5fs/s5fs_subr.c | 25 | ||||
-rw-r--r-- | kernel/include/fs/s5fs/s5fs_subr.h | 6 | ||||
-rw-r--r-- | kernel/include/mm/mobj.h | 6 | ||||
-rw-r--r-- | kernel/mm/mobj.c | 26 | ||||
-rw-r--r-- | kernel/mm/pagecache.c | 7 | ||||
-rw-r--r-- | kernel/proc/kmutex.c | 119 | ||||
-rw-r--r-- | python/weenix/userland_new.py | 41 |
10 files changed, 205 insertions, 95 deletions
@@ -33,6 +33,7 @@ end handle SIGSEGV nostop noprint nopass source ./python/weenix/debug_userland.py +source ./python/weenix/userland_new.py break dbg_panic_halt diff --git a/kernel/drivers/blockdev.c b/kernel/drivers/blockdev.c index 5c8eb82..79f25c2 100644 --- a/kernel/drivers/blockdev.c +++ b/kernel/drivers/blockdev.c @@ -79,7 +79,6 @@ long blockdev_fill_pframe(mobj_t *mobj, pframe_t *pf) KASSERT(mobj && pf); KASSERT(pf->pf_pagenum <= (1UL << (8 * sizeof(blocknum_t)))); blockdev_t *bd = CONTAINER_OF(mobj, s5fs_t, s5f_mobj)->s5f_bdev; - KASSERT(pf->pf_loc); return bd->bd_ops->read_block(bd, pf->pf_addr, (blocknum_t)pf->pf_loc, 1); } @@ -90,7 +89,6 @@ long blockdev_flush_pframe(mobj_t *mobj, pframe_t *pf) KASSERT(pf->pf_pagenum <= (1UL << (8 * sizeof(blocknum_t)))); dbg(DBG_S5FS, "writing disk block %lu\n", pf->pf_pagenum); blockdev_t *bd = CONTAINER_OF(mobj, s5fs_t, s5f_mobj)->s5f_bdev; - KASSERT(pf->pf_loc); return bd->bd_ops->write_block(bd, pf->pf_addr, (blocknum_t)pf->pf_loc, 1); }
\ No newline at end of file diff --git a/kernel/fs/s5fs/s5fs.c b/kernel/fs/s5fs/s5fs.c index fd0c779..2bcec7d 100644 --- a/kernel/fs/s5fs/s5fs.c +++ b/kernel/fs/s5fs/s5fs.c @@ -195,11 +195,11 @@ long s5fs_mount(fs_t *fs) * - FS_TO_S5FS to obtain the s5fs object * - S5_INODE_BLOCK(vn->v_vno) to determine the block number of the block that * contains the inode info - * - s5_get_disk_block and s5_release_disk_block to handle the disk block + * - s5_get_meta_disk_block and s5_release_disk_block to handle the disk block * - S5_INODE_OFFSET to find the desired inode within the disk block * containing it (returns the offset that the inode is stored within the block) * - You should initialize the s5_node_t's inode field by reading directly from - * the inode on disk by using the page frame returned from s5_get_disk_block. Also + * the inode on disk by using the page frame returned from s5_get_meta_disk_block. Also * make sure to initialize the dirtied_inode field. * - Using the inode info, you need to initialize the following vnode fields: * vn_len, vn_mode, and vn_ops using the fields found in the s5_inode struct. @@ -348,23 +348,17 @@ static long s5fs_umount(fs_t *fs) static void s5fs_sync(fs_t *fs) { -#ifdef FIXME s5fs_t *s5fs = FS_TO_S5FS(fs); - #ifdef OLD - mobj_t *mobj = S5FS_TO_VMOBJ(s5fs); - #endif - mobj_t *mobj = 0; // XXX FIX ME - - mobj_lock(mobj); + mobj_t *mobj = &s5fs->s5f_mobj; pframe_t *pf; - mobj_get_pframe(mobj, S5_SUPER_BLOCK, 1, &pf); + s5_get_meta_disk_block(s5fs, S5_SUPER_BLOCK, 1, &pf); memcpy(pf->pf_addr, &s5fs->s5f_super, sizeof(s5_super_t)); - pframe_release(&pf); + s5_release_disk_block(&pf); - mobj_flush(S5FS_TO_VMOBJ(s5fs)); - mobj_unlock(S5FS_TO_VMOBJ(s5fs)); -#endif + mobj_lock(&s5fs->s5f_mobj); + mobj_flush(mobj); + mobj_unlock(&s5fs->s5f_mobj); } /* Wrapper around s5_read_file. */ @@ -1000,21 +994,6 @@ static void s5fs_truncate_file(vnode_t *file) vunlock(file); } -#ifdef OLD -/* - * Wrapper around mobj_get_pframe. Remember to lock the memory object around - * the call to mobj_get_pframe. Assert that the get_pframe does not fail. - */ -inline void s5_get_disk_block(s5fs_t *s5fs, blocknum_t blocknum, long forwrite, - pframe_t **pfp) -{ - mobj_lock(S5FS_TO_VMOBJ(s5fs)); - long ret = mobj_get_pframe(S5FS_TO_VMOBJ(s5fs), blocknum, forwrite, pfp); - mobj_unlock(S5FS_TO_VMOBJ(s5fs)); - KASSERT(!ret && *pfp); -} -#endif - /* * Wrapper around device's read_block function; first looks up block in file-system cache. * If not there, allocates and fills a page frame. @@ -1028,6 +1007,7 @@ inline void s5_get_meta_disk_block(s5fs_t *s5fs, uint64_t blocknum, long forwrit if (*pfp) { // block is cached + (*pfp)->pf_dirty |= forwrite; mobj_unlock(&s5fs->s5f_mobj); return; } @@ -1038,7 +1018,7 @@ inline void s5_get_meta_disk_block(s5fs_t *s5fs, uint64_t blocknum, long forwrit blockdev_t *bd = s5fs->s5f_bdev; long ret = bd->bd_ops->read_block(bd, pf->pf_addr, (blocknum_t)pf->pf_loc, 1); - pf->pf_dirty |= forwrite; // needed? + pf->pf_dirty |= forwrite; // yes, needed KASSERT (!ret); mobj_unlock(&s5fs->s5f_mobj); KASSERT(!ret && *pfp); @@ -1060,7 +1040,7 @@ static inline void s5_get_file_disk_block(vnode_t *vnode, uint64_t blocknum, uin KASSERT(pf->pf_addr); blockdev_t *bd = VNODE_TO_S5FS(vnode)->s5f_bdev; long ret = bd->bd_ops->read_block(bd, pf->pf_addr, pf->pf_loc, 1); - pf->pf_dirty |= forwrite; // needed? + pf->pf_dirty |= forwrite; // yes, needed KASSERT (!ret); } @@ -1103,36 +1083,13 @@ inline void s5_release_disk_block(pframe_t **pfp) { pframe_release(pfp); } static long s5fs_get_pframe(vnode_t *vnode, uint64_t pagenum, long forwrite, pframe_t **pfp) { -#ifdef OLD - if (vnode->vn_len <= pagenum * PAGE_SIZE) - return -EINVAL; - long loc = - s5_file_block_to_disk_block(VNODE_TO_S5NODE(vnode), pagenum, forwrite); - if (loc < 0) - return loc; - if (loc) - { - mobj_find_pframe(&vnode->vn_mobj, pagenum, pfp); - if (*pfp) - { - mobj_free_pframe(&vnode->vn_mobj, pfp); - } - s5_get_disk_block(VNODE_TO_S5FS(vnode), (blocknum_t)loc, forwrite, pfp); - return 0; - } - else - { - KASSERT(!forwrite); - return mobj_default_get_pframe(&vnode->vn_mobj, pagenum, forwrite, pfp); - } -#endif - if (vnode->vn_len <= pagenum * PAGE_SIZE) return -EINVAL; mobj_find_pframe(&vnode->vn_mobj, pagenum, pfp); if (*pfp) { // block is cached + (*pfp)->pf_dirty |= forwrite; return 0; } int new; diff --git a/kernel/fs/s5fs/s5fs_subr.c b/kernel/fs/s5fs/s5fs_subr.c index 0187e74..27b6a92 100644 --- a/kernel/fs/s5fs/s5fs_subr.c +++ b/kernel/fs/s5fs/s5fs_subr.c @@ -91,7 +91,6 @@ static inline void s5_release_file_block(pframe_t **pfp) pframe_release(pfp); } -#ifdef OLD /* Given a file and a file block number, return the disk block number of the * desired file block. * @@ -100,6 +99,8 @@ static inline void s5_release_file_block(pframe_t **pfp) * the file * alloc - If set, allocate the block / indirect block as necessary * If clear, don't allocate sparse blocks + * newp - Return parameter that should be set to 1 if the returned + * block number is new (block has just been allocated) * * Return a disk block number on success, or: * - 0: The block is sparse, and alloc is clear, OR @@ -121,20 +122,13 @@ static inline void s5_release_file_block(pframe_t **pfp) * 1) file_blocknum < S_NDIRECT_BLOCKS * 2) Indirect block is not allocated but alloc is set. Be careful not to * leak a block in an error case! + 2a) Make sure you allocate the indirect block on disk and create a + corresponding pframe_t on the mobj (Hint: see s5_cache_and_clear_block). * 3) Indirect block is allocated. The desired block may be sparse, and you * may have to allocate it. * 4) The indirect block has not been allocated and alloc is clear. */ long s5_file_block_to_disk_block(s5_node_t *sn, size_t file_blocknum, - int alloc) -{ - NOT_YET_IMPLEMENTED("S5FS: s5_file_block_to_disk_block"); - return -1; -} -#endif - - -long s5_file_block_to_disk_block(s5_node_t *sn, size_t file_blocknum, int alloc, int *newp) { NOT_YET_IMPLEMENTED("S5FS: s5_file_block_to_disk_block"); @@ -211,7 +205,6 @@ ssize_t s5_write_file(s5_node_t *sn, size_t pos, const char *buf, size_t len) return -1; } -#ifdef OLD /* Allocate one block from the filesystem. * * Return the block number of the newly allocated block, or: @@ -243,13 +236,6 @@ static long s5_alloc_block(s5fs_t *s5fs) NOT_YET_IMPLEMENTED("S5FS: s5_alloc_block"); return -1; } -#endif - -static long s5_alloc_block(s5fs_t *s5fs) -{ - NOT_YET_IMPLEMENTED("S5FS: s5_alloc_block"); - return -1; -} /* * The exact opposite of s5_alloc_block: add blockno to the free list of the @@ -282,6 +268,9 @@ static void s5_free_block(s5fs_t *s5fs, blocknum_t blockno) else { s->s5s_free_blocks[s->s5s_nfree++] = blockno; + // only delete in this case b/c in first case we're still using that + // block as a "meta" block, just to store free block numbers + mobj_delete_pframe(&s5fs->s5f_mobj, blockno); } s5_unlock_super(s5fs); } diff --git a/kernel/include/fs/s5fs/s5fs_subr.h b/kernel/include/fs/s5fs/s5fs_subr.h index ff4c570..df2c835 100644 --- a/kernel/include/fs/s5fs/s5fs_subr.h +++ b/kernel/include/fs/s5fs/s5fs_subr.h @@ -44,10 +44,4 @@ void s5_remove_blocks(struct s5_node *vnode); /* Converts a vnode_t* to the s5fs_t* (s5fs file system) struct */ #define VNODE_TO_S5FS(vn) ((s5fs_t *)((vn)->vn_fs->fs_i)) -#ifdef OLD -/* Converts an s5fs_t* to its memory object (the memory object of the block device) */ -#define S5FS_TO_VMOBJ(s5fs) (&(s5fs)->s5f_bdev->bd_mobj) -#endif - - pframe_t *s5_cache_and_clear_block(mobj_t *mo, long block, long loc); diff --git a/kernel/include/mm/mobj.h b/kernel/include/mm/mobj.h index bca1b38..03f0766 100644 --- a/kernel/include/mm/mobj.h +++ b/kernel/include/mm/mobj.h @@ -14,11 +14,7 @@ typedef enum MOBJ_VNODE = 1, MOBJ_SHADOW, MOBJ_ANON, -#ifdef OLD - MOBJ_BLOCKDEV, -#else MOBJ_FS, -#endif } mobj_type_t; typedef struct mobj_ops @@ -65,6 +61,8 @@ long mobj_flush(mobj_t *o); long mobj_free_pframe(mobj_t *o, struct pframe **pfp); +void mobj_delete_pframe(mobj_t *o, size_t pagenum); + long mobj_default_get_pframe(mobj_t *o, uint64_t pagenum, long forwrite, struct pframe **pfp); diff --git a/kernel/mm/mobj.c b/kernel/mm/mobj.c index 4b9c80f..83addd5 100644 --- a/kernel/mm/mobj.c +++ b/kernel/mm/mobj.c @@ -122,9 +122,6 @@ long mobj_get_pframe(mobj_t *o, uint64_t pagenum, long forwrite, * Create and initialize a pframe and add it to the mobj's mo_pframes list. * Upon successful return, the pframe's pf_mutex is locked. */ -#ifdef OLD -static void mobj_create_pframe(mobj_t *o, uint64_t pagenum, pframe_t **pfp) -#endif void mobj_create_pframe(mobj_t *o, uint64_t pagenum, uint64_t loc, pframe_t **pfp) { KASSERT(kmutex_owns_mutex(&o->mo_mutex)); @@ -285,6 +282,29 @@ long mobj_free_pframe(mobj_t *o, pframe_t **pfp) return 0; } +void mobj_delete_pframe(mobj_t *o, size_t pagenum) +{ + pframe_t *pf = NULL; + list_iterate(&o->mo_pframes, p, pframe_t, pf_link) + { + if (p->pf_pagenum == pagenum) + pf = p; + } + + if (pf) + { + kmutex_lock(&pf->pf_mutex); + list_remove(&pf->pf_link); + + pf->pf_dirty = 0; + if (pf->pf_addr) + { + page_free(pf->pf_addr); + pf->pf_addr = NULL; + } + pframe_free(&pf); + } +} /* * Simply flush the memory object */ diff --git a/kernel/mm/pagecache.c b/kernel/mm/pagecache.c index b1763ba..e3bf73e 100644 --- a/kernel/mm/pagecache.c +++ b/kernel/mm/pagecache.c @@ -14,10 +14,3 @@ long pagecache_get_page(pframe_t *pf) { KASSERT(0 && "page not in pagecache"); return 0; } - -#ifdef NO -void pagecache_newsource(pframe_t pf, blockdev_t *dev, long loc) { - pf->pf_srcdev.pf_dev = dev; - pf->pf_loc = loc; -} -#endif
\ No newline at end of file diff --git a/kernel/proc/kmutex.c b/kernel/proc/kmutex.c new file mode 100644 index 0000000..7e39d63 --- /dev/null +++ b/kernel/proc/kmutex.c @@ -0,0 +1,119 @@ +// SMP.1 + SMP.3 +// spinlock + mask interrupts +#include "proc/kmutex.h" +#include "globals.h" +#include "main/interrupt.h" +#include <errno.h> + +/* + * IMPORTANT: Mutexes can _NEVER_ be locked or unlocked from an + * interrupt context. Mutexes are _ONLY_ lock or unlocked from a + * thread context. + */ + +/* + * Checks for the specific deadlock case where: + * curthr wants mtx, but the owner of mtx is waiting on a mutex that curthr is + * holding + */ +#define DEBUG_DEADLOCKS 1 +void detect_deadlocks(kmutex_t *mtx) +{ +#if DEBUG_DEADLOCKS + list_iterate(&curthr->kt_mutexes, held, kmutex_t, km_link) + { + list_iterate(&held->km_waitq.tq_list, waiter, kthread_t, kt_qlink) + { + if (waiter == mtx->km_holder) + { + panic( + "detected deadlock between P%d and P%d (mutexes 0x%p, " + "0x%p)\n", + curproc->p_pid, waiter->kt_proc->p_pid, held, mtx); + } + } + } +#endif +} + +/* + * Initializes the members of mtx + */ +void kmutex_init(kmutex_t *mtx) +{ + /* PROCS {{{ */ + mtx->km_holder = NULL; + sched_queue_init(&mtx->km_waitq); + list_link_init(&mtx->km_link); + /* PROCS }}} */ +} + +/* + * Obtains a mutex, potentially blocking. + * + * Hints: + * You are strongly advised to maintain the kt_mutexes member of curthr and call + * detect_deadlocks() to help debugging. + */ +void kmutex_lock(kmutex_t *mtx) +{ + /* PROCS {{{ */ + + dbg(DBG_ERROR, "locked mutex: %p\n", mtx); + KASSERT(curthr && "need thread context to lock mutex"); + KASSERT(!kmutex_owns_mutex(mtx) && "already owner"); + + if (mtx->km_holder) + { + detect_deadlocks(mtx); + sched_sleep_on(&mtx->km_waitq); + KASSERT(kmutex_owns_mutex(mtx)); + } + else + { + mtx->km_holder = curthr; + list_insert_tail(&curthr->kt_mutexes, &mtx->km_link); + } + /* PROCS }}} */ +} + +/* + * Releases a mutex. + * + * Hints: + * Again, you are strongly advised to maintain kt_mutexes. + * Use sched_wakeup_on() to hand off the mutex - think carefully about how + * these two functions interact to ensure that the mutex's km_holder is + * properly set before the new owner is runnable. + */ +void kmutex_unlock(kmutex_t *mtx) +{ + /* PROCS {{{ */ + dbg(DBG_ERROR, "unlocked mutex: %p\n", mtx); + KASSERT(curthr && (curthr == mtx->km_holder) && + "unlocking a mutex we don\'t own"); + sched_wakeup_on(&mtx->km_waitq, &mtx->km_holder); + KASSERT(!kmutex_owns_mutex(mtx)); + list_remove(&mtx->km_link); + if (mtx->km_holder) + list_insert_tail(&mtx->km_holder->kt_mutexes, &mtx->km_link); + + /* PROCS }}} */ +} + +/* + * Checks if mtx's wait queue is empty. + */ +long kmutex_has_waiters(kmutex_t *mtx) +{ + return !sched_queue_empty(&mtx->km_waitq); + ; +} + +/* + * Checks if the current thread owns mtx. + */ +inline long kmutex_owns_mutex(kmutex_t *mtx) +{ + return curthr && mtx->km_holder == curthr; +} diff --git a/python/weenix/userland_new.py b/python/weenix/userland_new.py new file mode 100644 index 0000000..7d4ff55 --- /dev/null +++ b/python/weenix/userland_new.py @@ -0,0 +1,41 @@ +import subprocess +from os import path + +# Define the command and arguments +command = [ + "objdump", + "--headers", + "--section=.text", + "user/usr/bin/s5fstest.exec" +] + + +class NewUserland(gdb.Command): + def __init__(self): + super(NewUserland, self).__init__("new-userland", gdb.COMMAND_USER) + + def invoke(self, arg, from_tty): + directory = 'user/usr/bin/' + filename = directory + arg + '.exec' + if not path.exists(filename): + filename = 'user/bin/' + arg + '.exec' + if arg == 'init': + filename = 'user/sbin/init.exec' + + + command = f"objdump --headers --section='.text' {filename} | grep .text | awk '{{print $4}}'" + + result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True) + + if result.returncode == 0: + print("VMA of the .text section:") + text_section = result.stdout.strip() + + gdb.execute(f"add-symbol-file {filename} 0x{text_section}") + gdb.execute(f"break main") + else: + print("Command failed with error:") + print(result.stderr) + + +NewUserland() |