aboutsummaryrefslogtreecommitdiff
path: root/kernel/vm/vmmap.c
blob: f683ca0ff4154b921fc4aefc70d94d379f749310 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
#include "globals.h"
#include "kernel.h"
#include <errno.h>

#include "vm/anon.h"
#include "vm/shadow.h"

#include "util/debug.h"
#include "util/printf.h"
#include "util/string.h"

#include "fs/file.h"
#include "fs/vfs_syscall.h"
#include "fs/vnode.h"

#include "mm/mm.h"
#include "mm/mman.h"
#include "mm/slab.h"

static slab_allocator_t *vmmap_allocator;
static slab_allocator_t *vmarea_allocator;

void vmmap_init(void)
{
    vmmap_allocator = slab_allocator_create("vmmap", sizeof(vmmap_t));
    vmarea_allocator = slab_allocator_create("vmarea", sizeof(vmarea_t));
    KASSERT(vmmap_allocator && vmarea_allocator);
}

/*
 * Allocate and initialize a new vmarea using vmarea_allocator.
 */
vmarea_t *vmarea_alloc(void)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return NULL;
}

/*
 * Free the vmarea by removing it from any lists it may be on, putting its
 * vma_obj if it exists, and freeing the vmarea_t.
 */
void vmarea_free(vmarea_t *vma)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
}

/*
 * Create and initialize a new vmmap. Initialize all the fields of vmmap_t.
 */
vmmap_t *vmmap_create(void)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return NULL;
}

/*
 * Destroy the map pointed to by mapp and set *mapp = NULL.
 * Remember to free each vma in the maps list.
 */
void vmmap_destroy(vmmap_t **mapp)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
}

/*
 * Add a vmarea to an address space. Assumes (i.e. asserts to some extent) the
 * vmarea is valid. Iterate through the list of vmareas, and add it 
 * accordingly. 
 */
void vmmap_insert(vmmap_t *map, vmarea_t *new_vma)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
}

/*
 * Find a contiguous range of free virtual pages of length npages in the given
 * address space. Returns starting page number for the range, without altering the map.
 * Return -1 if no such range exists.
 *
 * Your algorithm should be first fit. 
 * You should assert that dir is VMMAP_DIR_LOHI OR VMMAP_DIR_HILO.
 * If dir is:
 *    - VMMAP_DIR_HILO: find a gap as high in the address space as possible, 
 *                      starting from USER_MEM_HIGH.
 *    - VMMAP_DIR_LOHI: find a gap as low in the address space as possible, 
 *                      starting from USER_MEM_LOW.
 * 
 * Make sure you are converting between page numbers and addresses correctly! 
 */
ssize_t vmmap_find_range(vmmap_t *map, size_t npages, int dir)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return -1;
}

/*
 * Return the vm_area that vfn (a page number) lies in. Scan the address space looking
 * for a vma whose range covers vfn. If the page is unmapped, return NULL.
 */
vmarea_t *vmmap_lookup(vmmap_t *map, size_t vfn)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return NULL;
}

/*
 * For each vmarea in the map, if it is a shadow object, call shadow_collapse.
 */
void vmmap_collapse(vmmap_t *map)
{
    list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
    {
        if (vma->vma_obj->mo_type == MOBJ_SHADOW)
        {
            mobj_lock(vma->vma_obj);
            shadow_collapse(vma->vma_obj);
            mobj_unlock(vma->vma_obj);
        }
    }
}

/*
 * This is where the magic of fork's copy-on-write gets set up. 
 * 
 * Upon successful return, the new vmmap should be a clone of map with all 
 * shadow objects properly set up.
 *
 * For each vmarea, clone it's members. 
 *  1) vmarea is share-mapped, you don't need to do anything special. 
 *  2) vmarea is not share-mapped, time for shadow objects: 
 *     a) Create two shadow objects, one for map and one for the new vmmap you
 *        are constructing, both of which shadow the current vma_obj the vmarea
 *        being cloned. 
 *     b) After creating the shadow objects, put the original vma_obj
 *     c) and insert the shadow objects into their respective vma's.
 *
 * Be sure to clean up in any error case, manage the reference counts correctly,
 * and to lock/unlock properly.
 */
vmmap_t *vmmap_clone(vmmap_t *map)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return NULL;
}

/*
 *
 * Insert a mapping into the map starting at lopage for npages pages.
 * 
 *  file    - If provided, the vnode of the file to be mapped in
 *  lopage  - If provided, the desired start range of the mapping
 *  prot    - See mman.h for possible values
 *  flags   - See do_mmap()'s comments for possible values
 *  off     - Offset in the file to start mapping at, in bytes
 *  dir     - VMMAP_DIR_LOHI or VMMAP_DIR_HILO
 *  new_vma - If provided, on success, must point to the new vmarea_t
 * 
 *  Return 0 on success, or:
 *  - ENOMEM: On vmarea_alloc, anon_create, shadow_create or 
 *    vmmap_find_range failure 
 *  - Propagate errors from file->vn_ops->mmap and vmmap_remove
 * 
 * Hints:
 *  - You can assume/assert that all input is valid. It may help to write
 *    this function and do_mmap() somewhat in tandem.
 *  - If file is NULL, create an anon object.
 *  - If file is non-NULL, use the vnode's mmap operation to get the mobj.
 *    Do not assume it is file->vn_obj (mostly relevant for special devices).
 *  - If lopage is 0, use vmmap_find_range() to get a valid range
 *  - If lopage is not 0, the direction flag (dir) is ignored.
 *  - If lopage is nonzero and MAP_FIXED is specified and 
 *    the given range overlaps with any preexisting mappings, 
 *    remove the preexisting mappings.
 *  - If MAP_PRIVATE is specified, set up a shadow object. Be careful with
 *    refcounts!
 *  - Be careful: off is in bytes (albeit should be page-aligned), but
 *    vma->vma_off is in pages.
 *  - Be careful with the order of operations. Hold off on any irreversible
 *    work until there is no more chance of failure.
 */
long vmmap_map(vmmap_t *map, vnode_t *file, size_t lopage, size_t npages,
               int prot, int flags, off_t off, int dir, vmarea_t **new_vma)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return -1;
}

/*
 * Iterate over the mapping's vmm_list and make sure that the specified range
 * is completely empty. You will have to handle the following cases:
 *
 * Key:     [             ] = existing vmarea_t
 *              *******     = region to be unmapped
 *
 * Case 1:  [   *******   ]
 * The region to be unmapped lies completely inside the vmarea. We need to
 * split the old vmarea into two vmareas. Be sure to increment the refcount of
 * the object associated with the vmarea.
 *
 * Case 2:  [      *******]**
 * The region overlaps the end of the vmarea. Just shorten the length of
 * the mapping.
 *
 * Case 3: *[*****        ]
 * The region overlaps the beginning of the vmarea. Move the beginning of
 * the mapping (remember to update vma_off), and shorten its length.
 *
 * Case 4: *[*************]**
 * The region completely contains the vmarea. Remove the vmarea from the
 * list.
 * 
 * Return 0 on success, or:
 *  - ENOMEM: Failed to allocate a new vmarea when splitting a vmarea (case 1).
 * 
 * Hints:
 *  - Whenever you shorten/remove any mappings, be sure to call pt_unmap_range()
 *    tlb_flush_range() to clean your pagetables and TLB.
 */
long vmmap_remove(vmmap_t *map, size_t lopage, size_t npages)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return -1;
}

/*
 * Returns 1 if the given address space has no mappings for the given range,
 * 0 otherwise.
 */
long vmmap_is_range_empty(vmmap_t *map, size_t startvfn, size_t npages)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return 0;
}

/*
 * Read into 'buf' from the virtual address space of 'map'. Start at 'vaddr'
 * for size 'count'. 'vaddr' is not necessarily page-aligned. count is in bytes.
 * 
 * Hints:
 *  1) Find the vmareas that correspond to the region to read from.
 *  2) Find the pframes within those vmareas corresponding to the virtual 
 *     addresses you want to read.
 *  3) Read from those page frames and copy it into `buf`.
 *  4) You will not need to check the permissisons of the area.
 *  5) You may assume/assert that all areas exist.
 * 
 * Return 0 on success, -errno on error (propagate from the routines called).
 * This routine will be used within copy_from_user(). 
 */
long vmmap_read(vmmap_t *map, const void *vaddr, void *buf, size_t count)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return 0;
}

/*
 * Write from 'buf' into the virtual address space of 'map' starting at
 * 'vaddr' for size 'count'.
 * 
 * Hints:
 *  1) Find the vmareas to write to.
 *  2) Find the correct pframes within those areas that contain the virtual addresses
 *     that you want to write data to.
 *  3) Write to the pframes, copying data from buf.
 *  4) You do not need check permissions of the areas you use.
 *  5) Assume/assert that all areas exist.
 *  6) Remember to dirty the pages that you write to. 
 * 
 * Returns 0 on success, -errno on error (propagate from the routines called).
 * This routine will be used within copy_to_user(). 
 */
long vmmap_write(vmmap_t *map, void *vaddr, const void *buf, size_t count)
{
    NOT_YET_IMPLEMENTED("VM: ***none***");
    return 0;
}

size_t vmmap_mapping_info(const void *vmmap, char *buf, size_t osize)
{
    return vmmap_mapping_info_helper(vmmap, buf, osize, "");
}

size_t vmmap_mapping_info_helper(const void *vmmap, char *buf, size_t osize,
                                 char *prompt)
{
    KASSERT(0 < osize);
    KASSERT(NULL != buf);
    KASSERT(NULL != vmmap);

    vmmap_t *map = (vmmap_t *)vmmap;
    ssize_t size = (ssize_t)osize;

    int len =
        snprintf(buf, (size_t)size, "%s%37s %5s %7s %18s %11s %23s\n", prompt,
                 "VADDR RANGE", "PROT", "FLAGS", "MOBJ", "OFFSET", "VFN RANGE");

    list_iterate(&map->vmm_list, vma, vmarea_t, vma_plink)
    {
        size -= len;
        buf += len;
        if (0 >= size)
        {
            goto end;
        }

        len =
            snprintf(buf, (size_t)size,
                     "%s0x%p-0x%p  %c%c%c  %7s 0x%p %#.9lx %#.9lx-%#.9lx\n",
                     prompt, (void *)(vma->vma_start << PAGE_SHIFT),
                     (void *)(vma->vma_end << PAGE_SHIFT),
                     (vma->vma_prot & PROT_READ ? 'r' : '-'),
                     (vma->vma_prot & PROT_WRITE ? 'w' : '-'),
                     (vma->vma_prot & PROT_EXEC ? 'x' : '-'),
                     (vma->vma_flags & MAP_SHARED ? " SHARED" : "PRIVATE"),
                     vma->vma_obj, vma->vma_off, vma->vma_start, vma->vma_end);
    }

end:
    if (size <= 0)
    {
        size = osize;
        buf[osize - 1] = '\0';
    }
    return osize - size;
}