#include "vm/mmap.h" #include "errno.h" #include "fs/file.h" #include "fs/vfs.h" #include "fs/vnode.h" #include "globals.h" #include "mm/mm.h" #include "mm/mman.h" #include "mm/tlb.h" #include "util/debug.h" /* * This function implements the mmap(2) syscall: Add a mapping to the current * process's address space. Supports the following flags: MAP_SHARED, * MAP_PRIVATE, MAP_FIXED, and MAP_ANON. * * ret - If provided, on success, *ret must point to the start of the mapped area * * Return 0 on success, or: * - EACCES: * - a file mapping was requested, but fd is not open for reading. * - MAP_SHARED was requested and PROT_WRITE is set, but fd is * not open in read/write (O_RDWR) mode. * - PROT_WRITE is set, but the file has FMODE_APPEND specified. * - EBADF: * - fd is not a valid file descriptor and MAP_ANON was * not set * - EINVAL: * - addr is not page aligned and MAP_FIXED is specified * - addr is out of range of the user address space and MAP_FIXED is specified * - off is not page aligned * - len is <= 0 or off < 0 * - flags do not contain MAP_PRIVATE or MAP_SHARED * - ENODEV: * - The underlying filesystem of the specified file does not * support memory mapping or in other words, the file's vnode's mmap * operation doesn't exist * - Propagate errors from vmmap_map() * * See the errors section of the mmap(2) man page for more details * * Hints: * 1) A lot of error checking. * 2) Call vmmap_map() to create the mapping. * a) Use VMMAP_DIR_HILO as default, which will make other stencil code in * Weenix happy. * 3) Call tlb_flush_range() on the newly-mapped region. This is because the * newly-mapped region could have been used by someone else, and you don't * want to get stale mappings. * 4) Don't forget to set ret if it was provided. * * If you are mapping less than a page, make sure that you are still allocating * a full page. */ long do_mmap(void *addr, size_t len, int prot, int flags, int fd, off_t off, void **ret) { // NOT_YET_IMPLEMENTED("VM: do_mmap"); // check if addr is page aligned when MAP_FIXED is specified if (PAGE_ALIGNED(addr) == 0 && (flags & MAP_FIXED)) { return -EINVAL; } // check if MAP_FIXED is specified and addr is out of range of the user address space if ((flags & MAP_FIXED) && ((uintptr_t)addr < USER_MEM_LOW || (uintptr_t)addr + len > USER_MEM_HIGH)) { return -EINVAL; } // check if len is not zero (len is an unsigned value, so it is always positive) if (len == 0) { return -EINVAL; } // check if offset is positive and aligned if (off < 0 || PAGE_ALIGNED(off) == 0) { return -EINVAL; } // check if flags do not contain MAP_PRIVATE or MAP_SHARED if ((flags & MAP_PRIVATE) == 0 && (flags & MAP_SHARED) == 0) { return -EINVAL; } // check if fd is not a valid file descriptor and MAP_ANON was not set if (fd < 0 && (flags & MAP_ANON) == 0) { return -EBADF; } // check if a file mapping was requested, but fd is not open for reading // file error checking is done in if statement below file_t *file = NULL; if (fd >= 0 && (flags & MAP_ANON) == 0) { // get the file and check if it is valid file = fget(fd); if (file == NULL) { return -EBADF; } // ENODEV CHECKS // check if the file's vnode's mmap operation doesn't exist if (file->f_vnode->vn_ops == NULL || file->f_vnode->vn_ops->mmap == NULL) { fput(&file); return -ENODEV; } // ACCESS CHECKS // check if thef FMODE_READ flag is not set if ((file->f_mode & FMODE_READ) == 0) { fput(&file); return -EACCES; } // check if append mode is set and PROT_WRITE is set if ((prot & PROT_WRITE) && (file->f_mode & FMODE_APPEND)) { fput(&file); return -EACCES; } // check if MAP_SHARED was requested and PROT_WRITE is set, but fd is not open in read/write (O_RDWR) mode if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && (file->f_mode & FMODE_READ) == 0) { fput(&file); return -EACCES; } // check if PROT_WRITE is set, but the file has FMODE_APPEND specified if ((prot & PROT_WRITE) && (file->f_mode & FMODE_APPEND)) { fput(&file); return -EACCES; } fput(&file); } // Now that error checking is done, we can proceed with the mapping vmarea_t *vma = NULL; long err = vmmap_map( curproc->p_vmmap, file ? file->f_vnode : NULL, ADDR_TO_PN(PAGE_ALIGN_DOWN(addr)), ADDR_TO_PN(PAGE_ALIGN_UP((uintptr_t)addr + len)) - ADDR_TO_PN(PAGE_ALIGN_DOWN(addr)), prot, flags, off, VMMAP_DIR_HILO, &vma ); // check if vmmap_map() failed if (err < 0) { return err; } // set ret if it was provided void *start = PN_TO_ADDR(vma->vma_start); if (ret) { *ret = start; } // flush the TLB tlb_flush_range( (uintptr_t) start, PAGE_SIZE * (vma->vma_end - vma->vma_start) ); // return 0 on success return 0; } /* * This function implements the munmap(2) syscall. * * Return 0 on success, or: * - EINVAL: * - addr is not aligned on a page boundary * - the region to unmap is out of range of the user address space * - len is 0 * - Propagate errors from vmmap_remove() * * See the errors section of the munmap(2) man page for more details * * Hints: * - Similar to do_mmap(): * 1) Perform error checking. * 2) Call vmmap_remove(). */ long do_munmap(void *addr, size_t len) { // NOT_YET_IMPLEMENTED("VM: do_munmap"); // Check if addr is page aligned if (PAGE_ALIGNED(addr) == 0) { return -EINVAL; } // Check if len is in bounds if (len > USER_MEM_HIGH || len == 0) { return -EINVAL; } // Check if the addr is out of range of the user address space if ((uintptr_t)addr < USER_MEM_LOW || (uintptr_t)addr + len > USER_MEM_HIGH) { return -EINVAL; } // Remove the mapping size_t start = ADDR_TO_PN(addr); size_t end = ADDR_TO_PN(PAGE_ALIGN_UP((uintptr_t)addr + len)); long ret = vmmap_remove( curproc->p_vmmap, start, end - start ); return ret; }