Annotation of src/sys/uvm/uvm_map.c, Revision 1.204.2.9
1.204.2.9! yamt 1: /* $NetBSD: uvm_map.c,v 1.204.2.8 2008/02/27 08:37:09 yamt Exp $ */
1.1 mrg 2:
1.98 chs 3: /*
1.1 mrg 4: * Copyright (c) 1997 Charles D. Cranor and Washington University.
1.98 chs 5: * Copyright (c) 1991, 1993, The Regents of the University of California.
1.1 mrg 6: *
7: * All rights reserved.
8: *
9: * This code is derived from software contributed to Berkeley by
10: * The Mach Operating System project at Carnegie-Mellon University.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by Charles D. Cranor,
1.98 chs 23: * Washington University, the University of California, Berkeley and
1.1 mrg 24: * its contributors.
25: * 4. Neither the name of the University nor the names of its contributors
26: * may be used to endorse or promote products derived from this software
27: * without specific prior written permission.
28: *
29: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39: * SUCH DAMAGE.
40: *
41: * @(#)vm_map.c 8.3 (Berkeley) 1/12/94
1.3 mrg 42: * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
1.1 mrg 43: *
44: *
45: * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46: * All rights reserved.
1.98 chs 47: *
1.1 mrg 48: * Permission to use, copy, modify and distribute this software and
49: * its documentation is hereby granted, provided that both the copyright
50: * notice and this permission notice appear in all copies of the
51: * software, derivative works or modified versions, and any portions
52: * thereof, and that both notices appear in supporting documentation.
1.98 chs 53: *
54: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
1.1 mrg 56: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
1.98 chs 57: *
1.1 mrg 58: * Carnegie Mellon requests users of this software to return to
59: *
60: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61: * School of Computer Science
62: * Carnegie Mellon University
63: * Pittsburgh PA 15213-3890
64: *
65: * any improvements or extensions that they make and grant Carnegie the
66: * rights to redistribute these changes.
67: */
68:
1.114 lukem 69: /*
70: * uvm_map.c: uvm map operations
71: */
72:
73: #include <sys/cdefs.h>
1.204.2.9! yamt 74: __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.204.2.8 2008/02/27 08:37:09 yamt Exp $");
1.114 lukem 75:
1.21 jonathan 76: #include "opt_ddb.h"
1.6 mrg 77: #include "opt_uvmhist.h"
1.169 petrov 78: #include "opt_uvm.h"
1.31 tron 79: #include "opt_sysv.h"
1.1 mrg 80:
81: #include <sys/param.h>
82: #include <sys/systm.h>
83: #include <sys/mman.h>
84: #include <sys/proc.h>
85: #include <sys/malloc.h>
1.25 thorpej 86: #include <sys/pool.h>
1.104 chs 87: #include <sys/kernel.h>
1.112 thorpej 88: #include <sys/mount.h>
1.109 thorpej 89: #include <sys/vnode.h>
1.204.2.6 yamt 90: #include <sys/lockdebug.h>
1.204.2.7 yamt 91: #include <sys/atomic.h>
1.1 mrg 92:
93: #ifdef SYSVSHM
94: #include <sys/shm.h>
95: #endif
96:
97: #include <uvm/uvm.h>
1.151 chs 98: #undef RB_AUGMENT
99: #define RB_AUGMENT(x) uvm_rb_augment(x)
1.21 jonathan 100:
101: #ifdef DDB
102: #include <uvm/uvm_ddb.h>
103: #endif
104:
1.204.2.1 yamt 105: #if defined(UVMMAP_NOCOUNTERS)
1.169 petrov 106:
1.204.2.1 yamt 107: #define UVMMAP_EVCNT_DEFINE(name) /* nothing */
108: #define UVMMAP_EVCNT_INCR(ev) /* nothing */
109: #define UVMMAP_EVCNT_DECR(ev) /* nothing */
110:
111: #else /* defined(UVMMAP_NOCOUNTERS) */
112:
1.204.2.2 yamt 113: #include <sys/evcnt.h>
1.204.2.1 yamt 114: #define UVMMAP_EVCNT_DEFINE(name) \
115: struct evcnt uvmmap_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
116: "uvmmap", #name); \
117: EVCNT_ATTACH_STATIC(uvmmap_evcnt_##name);
118: #define UVMMAP_EVCNT_INCR(ev) uvmmap_evcnt_##ev.ev_count++
119: #define UVMMAP_EVCNT_DECR(ev) uvmmap_evcnt_##ev.ev_count--
120:
121: #endif /* defined(UVMMAP_NOCOUNTERS) */
122:
123: UVMMAP_EVCNT_DEFINE(ubackmerge)
124: UVMMAP_EVCNT_DEFINE(uforwmerge)
125: UVMMAP_EVCNT_DEFINE(ubimerge)
126: UVMMAP_EVCNT_DEFINE(unomerge)
127: UVMMAP_EVCNT_DEFINE(kbackmerge)
128: UVMMAP_EVCNT_DEFINE(kforwmerge)
129: UVMMAP_EVCNT_DEFINE(kbimerge)
130: UVMMAP_EVCNT_DEFINE(knomerge)
131: UVMMAP_EVCNT_DEFINE(map_call)
132: UVMMAP_EVCNT_DEFINE(mlk_call)
133: UVMMAP_EVCNT_DEFINE(mlk_hint)
134:
135: UVMMAP_EVCNT_DEFINE(uke_alloc)
136: UVMMAP_EVCNT_DEFINE(uke_free)
137: UVMMAP_EVCNT_DEFINE(ukh_alloc)
138: UVMMAP_EVCNT_DEFINE(ukh_free)
1.169 petrov 139:
1.87 enami 140: const char vmmapbsy[] = "vmmapbsy";
1.1 mrg 141:
142: /*
1.204.2.7 yamt 143: * cache for vmspace structures.
1.25 thorpej 144: */
145:
1.204.2.7 yamt 146: static struct pool_cache uvm_vmspace_cache;
1.25 thorpej 147:
1.26 thorpej 148: /*
1.204.2.7 yamt 149: * cache for dynamically-allocated map entries.
1.26 thorpej 150: */
151:
1.204.2.7 yamt 152: static struct pool_cache uvm_map_entry_cache;
1.130 thorpej 153:
154: MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
155: MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
1.25 thorpej 156:
1.40 thorpej 157: #ifdef PMAP_GROWKERNEL
158: /*
159: * This global represents the end of the kernel virtual address
160: * space. If we want to exceed this, we must grow the kernel
161: * virtual address space dynamically.
162: *
163: * Note, this variable is locked by kernel_map's lock.
164: */
165: vaddr_t uvm_maxkaddr;
166: #endif
167:
1.25 thorpej 168: /*
1.1 mrg 169: * macros
170: */
171:
172: /*
1.174 yamt 173: * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
174: * for the vm_map.
175: */
176: extern struct vm_map *pager_map; /* XXX */
1.204.2.1 yamt 177: #define VM_MAP_USE_KMAPENT_FLAGS(flags) \
178: (((flags) & VM_MAP_INTRSAFE) != 0)
1.174 yamt 179: #define VM_MAP_USE_KMAPENT(map) \
1.204.2.1 yamt 180: (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
1.174 yamt 181:
182: /*
1.194 yamt 183: * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
184: */
185:
186: #define UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
187: prot, maxprot, inh, adv, wire) \
188: ((ent)->etype == (type) && \
189: (((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
190: == 0 && \
191: (ent)->object.uvm_obj == (uobj) && \
192: (ent)->protection == (prot) && \
193: (ent)->max_protection == (maxprot) && \
194: (ent)->inheritance == (inh) && \
195: (ent)->advice == (adv) && \
196: (ent)->wired_count == (wire))
197:
198: /*
1.1 mrg 199: * uvm_map_entry_link: insert entry into a map
200: *
201: * => map must be locked
202: */
1.10 mrg 203: #define uvm_map_entry_link(map, after_where, entry) do { \
1.204.2.1 yamt 204: uvm_mapent_check(entry); \
1.10 mrg 205: (map)->nentries++; \
206: (entry)->prev = (after_where); \
207: (entry)->next = (after_where)->next; \
208: (entry)->prev->next = (entry); \
209: (entry)->next->prev = (entry); \
1.144 yamt 210: uvm_rb_insert((map), (entry)); \
1.124 perry 211: } while (/*CONSTCOND*/ 0)
1.10 mrg 212:
1.1 mrg 213: /*
214: * uvm_map_entry_unlink: remove entry from a map
215: *
216: * => map must be locked
217: */
1.10 mrg 218: #define uvm_map_entry_unlink(map, entry) do { \
1.204.2.1 yamt 219: KASSERT((entry) != (map)->first_free); \
220: KASSERT((entry) != (map)->hint); \
221: uvm_mapent_check(entry); \
1.10 mrg 222: (map)->nentries--; \
223: (entry)->next->prev = (entry)->prev; \
224: (entry)->prev->next = (entry)->next; \
1.144 yamt 225: uvm_rb_remove((map), (entry)); \
1.124 perry 226: } while (/*CONSTCOND*/ 0)
1.1 mrg 227:
228: /*
229: * SAVE_HINT: saves the specified entry as the hint for future lookups.
230: *
1.204.2.7 yamt 231: * => map need not be locked.
1.1 mrg 232: */
1.204.2.7 yamt 233: #define SAVE_HINT(map, check, value) do { \
234: atomic_cas_ptr(&(map)->hint, (check), (value)); \
1.124 perry 235: } while (/*CONSTCOND*/ 0)
1.1 mrg 236:
237: /*
1.204.2.1 yamt 238: * clear_hints: ensure that hints don't point to the entry.
239: *
240: * => map must be write-locked.
241: */
242: static void
243: clear_hints(struct vm_map *map, struct vm_map_entry *ent)
244: {
245:
246: SAVE_HINT(map, ent, ent->prev);
247: if (map->first_free == ent) {
248: map->first_free = ent->prev;
249: }
250: }
251:
252: /*
1.1 mrg 253: * VM_MAP_RANGE_CHECK: check and correct range
254: *
255: * => map must at least be read locked
256: */
257:
1.10 mrg 258: #define VM_MAP_RANGE_CHECK(map, start, end) do { \
1.139 enami 259: if (start < vm_map_min(map)) \
260: start = vm_map_min(map); \
261: if (end > vm_map_max(map)) \
262: end = vm_map_max(map); \
263: if (start > end) \
264: start = end; \
1.124 perry 265: } while (/*CONSTCOND*/ 0)
1.1 mrg 266:
267: /*
268: * local prototypes
269: */
270:
1.138 enami 271: static struct vm_map_entry *
272: uvm_mapent_alloc(struct vm_map *, int);
1.174 yamt 273: static struct vm_map_entry *
274: uvm_mapent_alloc_split(struct vm_map *,
275: const struct vm_map_entry *, int,
276: struct uvm_mapent_reservation *);
1.138 enami 277: static void uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
278: static void uvm_mapent_free(struct vm_map_entry *);
1.204.2.1 yamt 279: #if defined(DEBUG)
280: static void _uvm_mapent_check(const struct vm_map_entry *, const char *,
281: int);
282: #define uvm_mapent_check(map) _uvm_mapent_check(map, __FILE__, __LINE__)
283: #else /* defined(DEBUG) */
284: #define uvm_mapent_check(e) /* nothing */
285: #endif /* defined(DEBUG) */
1.174 yamt 286: static struct vm_map_entry *
287: uvm_kmapent_alloc(struct vm_map *, int);
288: static void uvm_kmapent_free(struct vm_map_entry *);
1.204.2.1 yamt 289: static vsize_t uvm_kmapent_overhead(vsize_t);
290:
1.138 enami 291: static void uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
292: static void uvm_map_reference_amap(struct vm_map_entry *, int);
1.140 enami 293: static int uvm_map_space_avail(vaddr_t *, vsize_t, voff_t, vsize_t, int,
294: struct vm_map_entry *);
1.138 enami 295: static void uvm_map_unreference_amap(struct vm_map_entry *, int);
1.1 mrg 296:
1.204.2.1 yamt 297: int _uvm_map_sanity(struct vm_map *);
298: int _uvm_tree_sanity(struct vm_map *);
1.144 yamt 299: static vsize_t uvm_rb_subtree_space(const struct vm_map_entry *);
300:
1.204.2.1 yamt 301: static inline int
1.144 yamt 302: uvm_compare(const struct vm_map_entry *a, const struct vm_map_entry *b)
303: {
304:
305: if (a->start < b->start)
306: return (-1);
307: else if (a->start > b->start)
308: return (1);
1.164 junyoung 309:
1.144 yamt 310: return (0);
311: }
312:
1.204.2.1 yamt 313: static inline void
1.144 yamt 314: uvm_rb_augment(struct vm_map_entry *entry)
315: {
316:
317: entry->space = uvm_rb_subtree_space(entry);
318: }
319:
320: RB_PROTOTYPE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
321:
322: RB_GENERATE(uvm_tree, vm_map_entry, rb_entry, uvm_compare);
323:
1.204.2.1 yamt 324: static inline vsize_t
1.144 yamt 325: uvm_rb_space(const struct vm_map *map, const struct vm_map_entry *entry)
326: {
327: /* XXX map is not used */
328:
329: KASSERT(entry->next != NULL);
330: return entry->next->start - entry->end;
331: }
332:
333: static vsize_t
334: uvm_rb_subtree_space(const struct vm_map_entry *entry)
335: {
336: vaddr_t space, tmp;
337:
338: space = entry->ownspace;
339: if (RB_LEFT(entry, rb_entry)) {
340: tmp = RB_LEFT(entry, rb_entry)->space;
341: if (tmp > space)
342: space = tmp;
343: }
344:
345: if (RB_RIGHT(entry, rb_entry)) {
346: tmp = RB_RIGHT(entry, rb_entry)->space;
347: if (tmp > space)
348: space = tmp;
349: }
350:
351: return (space);
352: }
353:
1.204.2.1 yamt 354: static inline void
1.144 yamt 355: uvm_rb_fixup(struct vm_map *map, struct vm_map_entry *entry)
356: {
357: /* We need to traverse to the very top */
358: do {
359: entry->ownspace = uvm_rb_space(map, entry);
360: entry->space = uvm_rb_subtree_space(entry);
361: } while ((entry = RB_PARENT(entry, rb_entry)) != NULL);
362: }
363:
1.203 thorpej 364: static void
1.144 yamt 365: uvm_rb_insert(struct vm_map *map, struct vm_map_entry *entry)
366: {
367: vaddr_t space = uvm_rb_space(map, entry);
368: struct vm_map_entry *tmp;
369:
370: entry->ownspace = entry->space = space;
371: tmp = RB_INSERT(uvm_tree, &(map)->rbhead, entry);
372: #ifdef DIAGNOSTIC
373: if (tmp != NULL)
374: panic("uvm_rb_insert: duplicate entry?");
375: #endif
376: uvm_rb_fixup(map, entry);
377: if (entry->prev != &map->header)
378: uvm_rb_fixup(map, entry->prev);
379: }
380:
1.203 thorpej 381: static void
1.144 yamt 382: uvm_rb_remove(struct vm_map *map, struct vm_map_entry *entry)
383: {
384: struct vm_map_entry *parent;
385:
386: parent = RB_PARENT(entry, rb_entry);
387: RB_REMOVE(uvm_tree, &(map)->rbhead, entry);
388: if (entry->prev != &map->header)
389: uvm_rb_fixup(map, entry->prev);
390: if (parent)
391: uvm_rb_fixup(map, parent);
392: }
393:
1.204.2.1 yamt 394: #if defined(DEBUG)
395: int uvm_debug_check_map = 0;
1.159 yamt 396: int uvm_debug_check_rbtree = 0;
1.204.2.1 yamt 397: #define uvm_map_check(map, name) \
398: _uvm_map_check((map), (name), __FILE__, __LINE__)
399: static void
400: _uvm_map_check(struct vm_map *map, const char *name,
401: const char *file, int line)
402: {
1.144 yamt 403:
1.204.2.1 yamt 404: if ((uvm_debug_check_map && _uvm_map_sanity(map)) ||
405: (uvm_debug_check_rbtree && _uvm_tree_sanity(map))) {
406: panic("uvm_map_check failed: \"%s\" map=%p (%s:%d)",
407: name, map, file, line);
408: }
409: }
410: #else /* defined(DEBUG) */
411: #define uvm_map_check(map, name) /* nothing */
412: #endif /* defined(DEBUG) */
413:
414: #if defined(DEBUG) || defined(DDB)
1.144 yamt 415: int
1.204.2.1 yamt 416: _uvm_map_sanity(struct vm_map *map)
417: {
1.204.2.3 yamt 418: bool first_free_found = false;
419: bool hint_found = false;
1.204.2.1 yamt 420: const struct vm_map_entry *e;
421:
422: e = &map->header;
423: for (;;) {
424: if (map->first_free == e) {
1.204.2.3 yamt 425: first_free_found = true;
1.204.2.1 yamt 426: } else if (!first_free_found && e->next->start > e->end) {
427: printf("first_free %p should be %p\n",
428: map->first_free, e);
429: return -1;
430: }
431: if (map->hint == e) {
1.204.2.3 yamt 432: hint_found = true;
1.204.2.1 yamt 433: }
434:
435: e = e->next;
436: if (e == &map->header) {
437: break;
438: }
439: }
440: if (!first_free_found) {
441: printf("stale first_free\n");
442: return -1;
443: }
444: if (!hint_found) {
445: printf("stale hint\n");
446: return -1;
447: }
448: return 0;
449: }
450:
451: int
452: _uvm_tree_sanity(struct vm_map *map)
1.144 yamt 453: {
454: struct vm_map_entry *tmp, *trtmp;
455: int n = 0, i = 1;
456:
457: RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
458: if (tmp->ownspace != uvm_rb_space(map, tmp)) {
1.204.2.1 yamt 459: printf("%d/%d ownspace %lx != %lx %s\n",
460: n + 1, map->nentries,
1.144 yamt 461: (ulong)tmp->ownspace, (ulong)uvm_rb_space(map, tmp),
462: tmp->next == &map->header ? "(last)" : "");
463: goto error;
464: }
465: }
466: trtmp = NULL;
467: RB_FOREACH(tmp, uvm_tree, &map->rbhead) {
468: if (tmp->space != uvm_rb_subtree_space(tmp)) {
1.204.2.1 yamt 469: printf("space %lx != %lx\n",
470: (ulong)tmp->space,
1.144 yamt 471: (ulong)uvm_rb_subtree_space(tmp));
472: goto error;
473: }
474: if (trtmp != NULL && trtmp->start >= tmp->start) {
1.204.2.1 yamt 475: printf("corrupt: 0x%lx >= 0x%lx\n",
476: trtmp->start, tmp->start);
1.144 yamt 477: goto error;
478: }
479: n++;
480:
481: trtmp = tmp;
482: }
483:
484: if (n != map->nentries) {
1.204.2.1 yamt 485: printf("nentries: %d vs %d\n", n, map->nentries);
1.144 yamt 486: goto error;
487: }
488:
489: for (tmp = map->header.next; tmp && tmp != &map->header;
490: tmp = tmp->next, i++) {
491: trtmp = RB_FIND(uvm_tree, &map->rbhead, tmp);
492: if (trtmp != tmp) {
1.204.2.1 yamt 493: printf("lookup: %d: %p - %p: %p\n", i, tmp, trtmp,
1.144 yamt 494: RB_PARENT(tmp, rb_entry));
495: goto error;
496: }
497: }
498:
499: return (0);
500: error:
501: return (-1);
502: }
1.204.2.1 yamt 503: #endif /* defined(DEBUG) || defined(DDB) */
1.144 yamt 504:
1.201 dsl 505: #ifdef DIAGNOSTIC
1.203 thorpej 506: static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
1.201 dsl 507: #endif
1.174 yamt 508:
1.1 mrg 509: /*
1.204.2.4 yamt 510: * vm_map_lock: acquire an exclusive (write) lock on a map.
511: *
512: * => Note that "intrsafe" maps use only exclusive, spin locks.
513: *
514: * => The locking protocol provides for guaranteed upgrade from shared ->
515: * exclusive by whichever thread currently has the map marked busy.
516: * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among
517: * other problems, it defeats any fairness guarantees provided by RW
518: * locks.
519: */
520:
521: void
522: vm_map_lock(struct vm_map *map)
523: {
524:
525: if ((map->flags & VM_MAP_INTRSAFE) != 0) {
526: mutex_spin_enter(&map->mutex);
527: return;
528: }
529:
530: for (;;) {
531: rw_enter(&map->lock, RW_WRITER);
532: if (map->busy == NULL)
533: break;
1.204.2.7 yamt 534: if (map->busy == curlwp)
535: break;
1.204.2.4 yamt 536: mutex_enter(&map->misc_lock);
537: rw_exit(&map->lock);
1.204.2.7 yamt 538: if (map->busy != NULL)
539: cv_wait(&map->cv, &map->misc_lock);
1.204.2.4 yamt 540: mutex_exit(&map->misc_lock);
541: }
542:
543: map->timestamp++;
544: }
545:
546: /*
547: * vm_map_lock_try: try to lock a map, failing if it is already locked.
548: */
549:
550: bool
551: vm_map_lock_try(struct vm_map *map)
552: {
553:
554: if ((map->flags & VM_MAP_INTRSAFE) != 0)
555: return mutex_tryenter(&map->mutex);
556: if (!rw_tryenter(&map->lock, RW_WRITER))
557: return false;
558: if (map->busy != NULL) {
559: rw_exit(&map->lock);
560: return false;
561: }
562:
563: map->timestamp++;
564: return true;
565: }
566:
567: /*
568: * vm_map_unlock: release an exclusive lock on a map.
569: */
570:
571: void
572: vm_map_unlock(struct vm_map *map)
573: {
574:
575: if ((map->flags & VM_MAP_INTRSAFE) != 0)
576: mutex_spin_exit(&map->mutex);
577: else {
578: KASSERT(rw_write_held(&map->lock));
1.204.2.7 yamt 579: KASSERT(map->busy == NULL || map->busy == curlwp);
1.204.2.4 yamt 580: rw_exit(&map->lock);
581: }
582: }
583:
584: /*
585: * vm_map_unbusy: mark the map as unbusy, and wake any waiters that
586: * want an exclusive lock.
587: */
588:
589: void
590: vm_map_unbusy(struct vm_map *map)
591: {
592:
593: KASSERT(map->busy == curlwp);
594:
595: /*
596: * Safe to clear 'busy' and 'waiters' with only a read lock held:
597: *
598: * o they can only be set with a write lock held
599: * o writers are blocked out with a read or write hold
600: * o at any time, only one thread owns the set of values
601: */
602: mutex_enter(&map->misc_lock);
1.204.2.7 yamt 603: map->busy = NULL;
1.204.2.4 yamt 604: cv_broadcast(&map->cv);
605: mutex_exit(&map->misc_lock);
606: }
607:
608: /*
1.204.2.7 yamt 609: * vm_map_lock_read: acquire a shared (read) lock on a map.
610: */
611:
612: void
613: vm_map_lock_read(struct vm_map *map)
614: {
615:
616: KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
617:
618: rw_enter(&map->lock, RW_READER);
619: }
620:
621: /*
622: * vm_map_unlock_read: release a shared lock on a map.
623: */
624:
625: void
626: vm_map_unlock_read(struct vm_map *map)
627: {
628:
629: KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
630:
631: rw_exit(&map->lock);
632: }
633:
634: /*
635: * vm_map_busy: mark a map as busy.
636: *
637: * => the caller must hold the map write locked
638: */
639:
640: void
641: vm_map_busy(struct vm_map *map)
642: {
643:
644: KASSERT(rw_write_held(&map->lock));
645: KASSERT(map->busy == NULL);
646:
647: map->busy = curlwp;
648: }
649:
650: /*
651: * vm_map_locked_p: return true if the map is write locked.
652: */
653:
654: bool
655: vm_map_locked_p(struct vm_map *map)
656: {
657:
658: if ((map->flags & VM_MAP_INTRSAFE) != 0) {
659: return mutex_owned(&map->mutex);
660: } else {
661: return rw_write_held(&map->lock);
662: }
663: }
664:
665: /*
1.1 mrg 666: * uvm_mapent_alloc: allocate a map entry
667: */
668:
1.203 thorpej 669: static struct vm_map_entry *
1.138 enami 670: uvm_mapent_alloc(struct vm_map *map, int flags)
1.10 mrg 671: {
1.99 chs 672: struct vm_map_entry *me;
1.127 thorpej 673: int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
1.104 chs 674: UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
1.1 mrg 675:
1.174 yamt 676: if (VM_MAP_USE_KMAPENT(map)) {
677: me = uvm_kmapent_alloc(map, flags);
1.104 chs 678: } else {
1.204.2.7 yamt 679: me = pool_cache_get(&uvm_map_entry_cache, pflags);
1.126 bouyer 680: if (__predict_false(me == NULL))
681: return NULL;
1.104 chs 682: me->flags = 0;
1.10 mrg 683: }
1.1 mrg 684:
1.104 chs 685: UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
686: ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
1.139 enami 687: return (me);
1.1 mrg 688: }
689:
690: /*
1.174 yamt 691: * uvm_mapent_alloc_split: allocate a map entry for clipping.
1.204.2.7 yamt 692: *
693: * => map must be locked by caller if UVM_MAP_QUANTUM is set.
1.174 yamt 694: */
695:
1.203 thorpej 696: static struct vm_map_entry *
1.174 yamt 697: uvm_mapent_alloc_split(struct vm_map *map,
698: const struct vm_map_entry *old_entry, int flags,
699: struct uvm_mapent_reservation *umr)
700: {
701: struct vm_map_entry *me;
702:
703: KASSERT(!VM_MAP_USE_KMAPENT(map) ||
704: (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
705:
706: if (old_entry->flags & UVM_MAP_QUANTUM) {
1.175 yamt 707: struct vm_map_kernel *vmk = vm_map_to_kernel(map);
1.174 yamt 708:
1.204.2.7 yamt 709: KASSERT(vm_map_locked_p(map));
1.175 yamt 710: me = vmk->vmk_merged_entries;
1.174 yamt 711: KASSERT(me);
1.175 yamt 712: vmk->vmk_merged_entries = me->next;
1.174 yamt 713: KASSERT(me->flags & UVM_MAP_QUANTUM);
714: } else {
715: me = uvm_mapent_alloc(map, flags);
716: }
717:
718: return me;
719: }
720:
721: /*
1.1 mrg 722: * uvm_mapent_free: free map entry
723: */
724:
1.203 thorpej 725: static void
1.138 enami 726: uvm_mapent_free(struct vm_map_entry *me)
1.1 mrg 727: {
1.104 chs 728: UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
729:
1.98 chs 730: UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
1.1 mrg 731: me, me->flags, 0, 0);
1.174 yamt 732: if (me->flags & UVM_MAP_KERNEL) {
733: uvm_kmapent_free(me);
734: } else {
1.204.2.7 yamt 735: pool_cache_put(&uvm_map_entry_cache, me);
1.174 yamt 736: }
737: }
738:
739: /*
1.194 yamt 740: * uvm_mapent_free_merged: free merged map entry
1.174 yamt 741: *
742: * => keep the entry if needed.
1.194 yamt 743: * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
1.204.2.7 yamt 744: * => map should be locked if UVM_MAP_QUANTUM is set.
1.174 yamt 745: */
746:
1.203 thorpej 747: static void
1.181 yamt 748: uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
1.174 yamt 749: {
750:
1.181 yamt 751: KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
752:
1.174 yamt 753: if (me->flags & UVM_MAP_QUANTUM) {
754: /*
755: * keep this entry for later splitting.
756: */
1.175 yamt 757: struct vm_map_kernel *vmk;
1.174 yamt 758:
1.204.2.7 yamt 759: KASSERT(vm_map_locked_p(map));
1.181 yamt 760: KASSERT(VM_MAP_IS_KERNEL(map));
761: KASSERT(!VM_MAP_USE_KMAPENT(map) ||
762: (me->flags & UVM_MAP_KERNEL));
1.174 yamt 763:
1.175 yamt 764: vmk = vm_map_to_kernel(map);
765: me->next = vmk->vmk_merged_entries;
766: vmk->vmk_merged_entries = me;
1.104 chs 767: } else {
1.174 yamt 768: uvm_mapent_free(me);
1.10 mrg 769: }
1.1 mrg 770: }
771:
772: /*
773: * uvm_mapent_copy: copy a map entry, preserving flags
774: */
775:
1.204.2.1 yamt 776: static inline void
1.138 enami 777: uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
1.10 mrg 778: {
1.139 enami 779:
1.106 chs 780: memcpy(dst, src, ((char *)&src->uvm_map_entry_stop_copy) -
1.139 enami 781: ((char *)src));
1.1 mrg 782: }
783:
784: /*
1.204.2.1 yamt 785: * uvm_mapent_overhead: calculate maximum kva overhead necessary for
786: * map entries.
787: *
788: * => size and flags are the same as uvm_km_suballoc's ones.
789: */
790:
791: vsize_t
792: uvm_mapent_overhead(vsize_t size, int flags)
793: {
794:
795: if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
796: return uvm_kmapent_overhead(size);
797: }
798: return 0;
799: }
800:
801: #if defined(DEBUG)
802: static void
803: _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
804: {
805:
806: if (entry->start >= entry->end) {
807: goto bad;
808: }
809: if (UVM_ET_ISOBJ(entry)) {
810: if (entry->object.uvm_obj == NULL) {
811: goto bad;
812: }
813: } else if (UVM_ET_ISSUBMAP(entry)) {
814: if (entry->object.sub_map == NULL) {
815: goto bad;
816: }
817: } else {
818: if (entry->object.uvm_obj != NULL ||
819: entry->object.sub_map != NULL) {
820: goto bad;
821: }
822: }
823: if (!UVM_ET_ISOBJ(entry)) {
824: if (entry->offset != 0) {
825: goto bad;
826: }
827: }
828:
829: return;
830:
831: bad:
832: panic("%s: bad entry %p (%s:%d)", __func__, entry, file, line);
833: }
834: #endif /* defined(DEBUG) */
835:
836: /*
1.1 mrg 837: * uvm_map_entry_unwire: unwire a map entry
838: *
839: * => map should be locked by caller
840: */
841:
1.204.2.1 yamt 842: static inline void
1.138 enami 843: uvm_map_entry_unwire(struct vm_map *map, struct vm_map_entry *entry)
1.10 mrg 844: {
1.139 enami 845:
1.10 mrg 846: entry->wired_count = 0;
1.57 thorpej 847: uvm_fault_unwire_locked(map, entry->start, entry->end);
1.1 mrg 848: }
849:
1.85 chs 850:
851: /*
852: * wrapper for calling amap_ref()
853: */
1.204.2.1 yamt 854: static inline void
1.138 enami 855: uvm_map_reference_amap(struct vm_map_entry *entry, int flags)
1.85 chs 856: {
1.139 enami 857:
1.99 chs 858: amap_ref(entry->aref.ar_amap, entry->aref.ar_pageoff,
1.139 enami 859: (entry->end - entry->start) >> PAGE_SHIFT, flags);
1.85 chs 860: }
861:
862:
863: /*
1.98 chs 864: * wrapper for calling amap_unref()
1.85 chs 865: */
1.204.2.1 yamt 866: static inline void
1.138 enami 867: uvm_map_unreference_amap(struct vm_map_entry *entry, int flags)
1.85 chs 868: {
1.139 enami 869:
1.99 chs 870: amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff,
1.139 enami 871: (entry->end - entry->start) >> PAGE_SHIFT, flags);
1.85 chs 872: }
873:
874:
1.1 mrg 875: /*
1.204.2.7 yamt 876: * uvm_map_init: init mapping system at boot time.
1.1 mrg 877: */
878:
1.10 mrg 879: void
1.138 enami 880: uvm_map_init(void)
1.1 mrg 881: {
882: #if defined(UVMHIST)
1.10 mrg 883: static struct uvm_history_ent maphistbuf[100];
884: static struct uvm_history_ent pdhistbuf[100];
1.1 mrg 885: #endif
1.10 mrg 886:
887: /*
888: * first, init logging system.
889: */
1.1 mrg 890:
1.10 mrg 891: UVMHIST_FUNC("uvm_map_init");
892: UVMHIST_INIT_STATIC(maphist, maphistbuf);
893: UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
894: UVMHIST_CALLED(maphist);
895: UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
896:
897: /*
1.174 yamt 898: * initialize the global lock for kernel map entry.
1.10 mrg 899: */
900:
1.204.2.4 yamt 901: mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
1.204.2.7 yamt 902:
903: /*
904: * initialize caches.
905: */
906:
907: pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
908: 0, 0, 0, "vmmpepl", NULL, IPL_NONE, NULL, NULL, NULL);
909: pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
910: 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
1.1 mrg 911: }
912:
913: /*
914: * clippers
915: */
916:
917: /*
1.204.2.1 yamt 918: * uvm_mapent_splitadj: adjust map entries for splitting, after uvm_mapent_copy.
919: */
920:
921: static void
922: uvm_mapent_splitadj(struct vm_map_entry *entry1, struct vm_map_entry *entry2,
923: vaddr_t splitat)
924: {
925: vaddr_t adj;
926:
927: KASSERT(entry1->start < splitat);
928: KASSERT(splitat < entry1->end);
929:
930: adj = splitat - entry1->start;
931: entry1->end = entry2->start = splitat;
932:
933: if (entry1->aref.ar_amap) {
934: amap_splitref(&entry1->aref, &entry2->aref, adj);
935: }
936: if (UVM_ET_ISSUBMAP(entry1)) {
937: /* ... unlikely to happen, but play it safe */
938: uvm_map_reference(entry1->object.sub_map);
939: } else if (UVM_ET_ISOBJ(entry1)) {
940: KASSERT(entry1->object.uvm_obj != NULL); /* suppress coverity */
941: entry2->offset += adj;
942: if (entry1->object.uvm_obj->pgops &&
943: entry1->object.uvm_obj->pgops->pgo_reference)
944: entry1->object.uvm_obj->pgops->pgo_reference(
945: entry1->object.uvm_obj);
946: }
947: }
948:
949: /*
1.1 mrg 950: * uvm_map_clip_start: ensure that the entry begins at or after
951: * the starting address, if it doesn't we split the entry.
1.98 chs 952: *
1.1 mrg 953: * => caller should use UVM_MAP_CLIP_START macro rather than calling
954: * this directly
955: * => map must be locked by caller
956: */
957:
1.99 chs 958: void
1.138 enami 959: uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
1.174 yamt 960: vaddr_t start, struct uvm_mapent_reservation *umr)
1.1 mrg 961: {
1.99 chs 962: struct vm_map_entry *new_entry;
1.1 mrg 963:
964: /* uvm_map_simplify_entry(map, entry); */ /* XXX */
965:
1.204.2.1 yamt 966: uvm_map_check(map, "clip_start entry");
967: uvm_mapent_check(entry);
1.144 yamt 968:
1.10 mrg 969: /*
970: * Split off the front portion. note that we must insert the new
971: * entry BEFORE this one, so that this entry has the specified
1.1 mrg 972: * starting address.
1.10 mrg 973: */
1.174 yamt 974: new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1.1 mrg 975: uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1.204.2.1 yamt 976: uvm_mapent_splitadj(new_entry, entry, start);
1.10 mrg 977: uvm_map_entry_link(map, entry->prev, new_entry);
1.85 chs 978:
1.204.2.1 yamt 979: uvm_map_check(map, "clip_start leave");
1.1 mrg 980: }
981:
982: /*
983: * uvm_map_clip_end: ensure that the entry ends at or before
984: * the ending address, if it does't we split the reference
1.98 chs 985: *
1.1 mrg 986: * => caller should use UVM_MAP_CLIP_END macro rather than calling
987: * this directly
988: * => map must be locked by caller
989: */
990:
1.10 mrg 991: void
1.174 yamt 992: uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
993: struct uvm_mapent_reservation *umr)
1.1 mrg 994: {
1.204.2.1 yamt 995: struct vm_map_entry *new_entry;
1.1 mrg 996:
1.204.2.1 yamt 997: uvm_map_check(map, "clip_end entry");
998: uvm_mapent_check(entry);
1.174 yamt 999:
1.1 mrg 1000: /*
1001: * Create a new entry and insert it
1002: * AFTER the specified entry
1003: */
1.174 yamt 1004: new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
1.1 mrg 1005: uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1.204.2.1 yamt 1006: uvm_mapent_splitadj(entry, new_entry, end);
1007: uvm_map_entry_link(map, entry, new_entry);
1.1 mrg 1008:
1.204.2.1 yamt 1009: uvm_map_check(map, "clip_end leave");
1010: }
1.144 yamt 1011:
1.204.2.1 yamt 1012: static void
1013: vm_map_drain(struct vm_map *map, uvm_flag_t flags)
1014: {
1.1 mrg 1015:
1.204.2.1 yamt 1016: if (!VM_MAP_IS_KERNEL(map)) {
1017: return;
1.1 mrg 1018: }
1.144 yamt 1019:
1.204.2.1 yamt 1020: uvm_km_va_drain(map, flags);
1.1 mrg 1021: }
1022:
1023: /*
1024: * M A P - m a i n e n t r y p o i n t
1025: */
1026: /*
1027: * uvm_map: establish a valid mapping in a map
1028: *
1029: * => assume startp is page aligned.
1030: * => assume size is a multiple of PAGE_SIZE.
1031: * => assume sys_mmap provides enough of a "hint" to have us skip
1032: * over text/data/bss area.
1033: * => map must be unlocked (we will lock it)
1034: * => <uobj,uoffset> value meanings (4 cases):
1.139 enami 1035: * [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
1.1 mrg 1036: * [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
1037: * [3] <uobj,uoffset> == normal mapping
1038: * [4] <uobj,UVM_UNKNOWN_OFFSET> == uvm_map finds offset based on VA
1.98 chs 1039: *
1.1 mrg 1040: * case [4] is for kernel mappings where we don't know the offset until
1.8 chuck 1041: * we've found a virtual address. note that kernel object offsets are
1042: * always relative to vm_map_min(kernel_map).
1.81 thorpej 1043: *
1.165 yamt 1044: * => if `align' is non-zero, we align the virtual address to the specified
1045: * alignment.
1046: * this is provided as a mechanism for large pages.
1.81 thorpej 1047: *
1.1 mrg 1048: * => XXXCDC: need way to map in external amap?
1049: */
1050:
1.10 mrg 1051: int
1.138 enami 1052: uvm_map(struct vm_map *map, vaddr_t *startp /* IN/OUT */, vsize_t size,
1053: struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags)
1.10 mrg 1054: {
1.174 yamt 1055: struct uvm_map_args args;
1056: struct vm_map_entry *new_entry;
1057: int error;
1058:
1.181 yamt 1059: KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
1.187 yamt 1060: KASSERT((size & PAGE_MASK) == 0);
1.174 yamt 1061:
1062: /*
1063: * for pager_map, allocate the new entry first to avoid sleeping
1064: * for memory while we have the map locked.
1065: *
1066: * besides, because we allocates entries for in-kernel maps
1067: * a bit differently (cf. uvm_kmapent_alloc/free), we need to
1068: * allocate them before locking the map.
1069: */
1070:
1071: new_entry = NULL;
1.181 yamt 1072: if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
1073: map == pager_map) {
1.174 yamt 1074: new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
1075: if (__predict_false(new_entry == NULL))
1076: return ENOMEM;
1077: if (flags & UVM_FLAG_QUANTUM)
1078: new_entry->flags |= UVM_MAP_QUANTUM;
1079: }
1080: if (map == pager_map)
1081: flags |= UVM_FLAG_NOMERGE;
1082:
1083: error = uvm_map_prepare(map, *startp, size, uobj, uoffset, align,
1084: flags, &args);
1085: if (!error) {
1086: error = uvm_map_enter(map, &args, new_entry);
1087: *startp = args.uma_start;
1.189 yamt 1088: } else if (new_entry) {
1089: uvm_mapent_free(new_entry);
1.174 yamt 1090: }
1091:
1.187 yamt 1092: #if defined(DEBUG)
1093: if (!error && VM_MAP_IS_KERNEL(map)) {
1094: uvm_km_check_empty(*startp, *startp + size,
1095: (map->flags & VM_MAP_INTRSAFE) != 0);
1096: }
1097: #endif /* defined(DEBUG) */
1098:
1.174 yamt 1099: return error;
1100: }
1101:
1102: int
1103: uvm_map_prepare(struct vm_map *map, vaddr_t start, vsize_t size,
1104: struct uvm_object *uobj, voff_t uoffset, vsize_t align, uvm_flag_t flags,
1105: struct uvm_map_args *args)
1106: {
1107: struct vm_map_entry *prev_entry;
1108: vm_prot_t prot = UVM_PROTECTION(flags);
1109: vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1110:
1111: UVMHIST_FUNC("uvm_map_prepare");
1.10 mrg 1112: UVMHIST_CALLED(maphist);
1.1 mrg 1113:
1.174 yamt 1114: UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1115: map, start, size, flags);
1.10 mrg 1116: UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1.107 chs 1117:
1118: /*
1119: * detect a popular device driver bug.
1120: */
1121:
1.139 enami 1122: KASSERT(doing_shutdown || curlwp != NULL ||
1.129 christos 1123: (map->flags & VM_MAP_INTRSAFE));
1.1 mrg 1124:
1.10 mrg 1125: /*
1.144 yamt 1126: * zero-sized mapping doesn't make any sense.
1127: */
1128: KASSERT(size > 0);
1129:
1.180 yamt 1130: KASSERT((~flags & (UVM_FLAG_NOWAIT | UVM_FLAG_WAITVA)) != 0);
1131:
1.204.2.1 yamt 1132: uvm_map_check(map, "map entry");
1.144 yamt 1133:
1134: /*
1.106 chs 1135: * check sanity of protection code
1.10 mrg 1136: */
1.1 mrg 1137:
1.10 mrg 1138: if ((prot & maxprot) != prot) {
1.98 chs 1139: UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x",
1.10 mrg 1140: prot, maxprot,0,0);
1.94 chs 1141: return EACCES;
1.10 mrg 1142: }
1.1 mrg 1143:
1.10 mrg 1144: /*
1.106 chs 1145: * figure out where to put new VM range
1.10 mrg 1146: */
1.1 mrg 1147:
1.180 yamt 1148: retry:
1.204.2.3 yamt 1149: if (vm_map_lock_try(map) == false) {
1.204.2.7 yamt 1150: if ((flags & UVM_FLAG_TRYLOCK) != 0 &&
1151: (map->flags & VM_MAP_INTRSAFE) == 0) {
1.94 chs 1152: return EAGAIN;
1.106 chs 1153: }
1.10 mrg 1154: vm_map_lock(map); /* could sleep here */
1155: }
1.204.2.1 yamt 1156: prev_entry = uvm_map_findspace(map, start, size, &start,
1157: uobj, uoffset, align, flags);
1158: if (prev_entry == NULL) {
1.180 yamt 1159: unsigned int timestamp;
1160:
1161: timestamp = map->timestamp;
1162: UVMHIST_LOG(maphist,"waiting va timestamp=0x%x",
1163: timestamp,0,0,0);
1164: map->flags |= VM_MAP_WANTVA;
1.10 mrg 1165: vm_map_unlock(map);
1.180 yamt 1166:
1167: /*
1.204.2.1 yamt 1168: * try to reclaim kva and wait until someone does unmap.
1.204.2.4 yamt 1169: * fragile locking here, so we awaken every second to
1170: * recheck the condition.
1.180 yamt 1171: */
1172:
1.204.2.1 yamt 1173: vm_map_drain(map, flags);
1174:
1.204.2.4 yamt 1175: mutex_enter(&map->misc_lock);
1.180 yamt 1176: while ((map->flags & VM_MAP_WANTVA) != 0 &&
1177: map->timestamp == timestamp) {
1.204.2.1 yamt 1178: if ((flags & UVM_FLAG_WAITVA) == 0) {
1.204.2.4 yamt 1179: mutex_exit(&map->misc_lock);
1.204.2.1 yamt 1180: UVMHIST_LOG(maphist,
1181: "<- uvm_map_findspace failed!", 0,0,0,0);
1182: return ENOMEM;
1183: } else {
1.204.2.4 yamt 1184: cv_timedwait(&map->cv, &map->misc_lock, hz);
1.204.2.1 yamt 1185: }
1.180 yamt 1186: }
1.204.2.4 yamt 1187: mutex_exit(&map->misc_lock);
1.180 yamt 1188: goto retry;
1.10 mrg 1189: }
1.1 mrg 1190:
1.40 thorpej 1191: #ifdef PMAP_GROWKERNEL
1.152 simonb 1192: /*
1193: * If the kernel pmap can't map the requested space,
1194: * then allocate more resources for it.
1195: */
1.174 yamt 1196: if (map == kernel_map && uvm_maxkaddr < (start + size))
1197: uvm_maxkaddr = pmap_growkernel(start + size);
1.10 mrg 1198: #endif
1199:
1.204.2.1 yamt 1200: UVMMAP_EVCNT_INCR(map_call);
1.10 mrg 1201:
1202: /*
1203: * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
1.98 chs 1204: * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET. in
1205: * either case we want to zero it before storing it in the map entry
1.10 mrg 1206: * (because it looks strange and confusing when debugging...)
1.98 chs 1207: *
1208: * if uobj is not null
1.10 mrg 1209: * if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
1210: * and we do not need to change uoffset.
1211: * if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
1212: * now (based on the starting address of the map). this case is
1213: * for kernel object mappings where we don't know the offset until
1214: * the virtual address is found (with uvm_map_findspace). the
1215: * offset is the distance we are from the start of the map.
1216: */
1217:
1218: if (uobj == NULL) {
1219: uoffset = 0;
1220: } else {
1221: if (uoffset == UVM_UNKNOWN_OFFSET) {
1.85 chs 1222: KASSERT(UVM_OBJ_IS_KERN_OBJECT(uobj));
1.174 yamt 1223: uoffset = start - vm_map_min(kernel_map);
1.10 mrg 1224: }
1225: }
1226:
1.174 yamt 1227: args->uma_flags = flags;
1228: args->uma_prev = prev_entry;
1229: args->uma_start = start;
1230: args->uma_size = size;
1231: args->uma_uobj = uobj;
1232: args->uma_uoffset = uoffset;
1233:
1234: return 0;
1235: }
1236:
1237: int
1238: uvm_map_enter(struct vm_map *map, const struct uvm_map_args *args,
1239: struct vm_map_entry *new_entry)
1240: {
1241: struct vm_map_entry *prev_entry = args->uma_prev;
1242: struct vm_map_entry *dead = NULL;
1243:
1244: const uvm_flag_t flags = args->uma_flags;
1245: const vm_prot_t prot = UVM_PROTECTION(flags);
1246: const vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
1247: const vm_inherit_t inherit = UVM_INHERIT(flags);
1248: const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
1249: AMAP_EXTEND_NOWAIT : 0;
1250: const int advice = UVM_ADVICE(flags);
1251: const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
1252: UVM_MAP_QUANTUM : 0;
1253:
1254: vaddr_t start = args->uma_start;
1255: vsize_t size = args->uma_size;
1256: struct uvm_object *uobj = args->uma_uobj;
1257: voff_t uoffset = args->uma_uoffset;
1258:
1259: const int kmap = (vm_map_pmap(map) == pmap_kernel());
1260: int merged = 0;
1261: int error;
1.176 yamt 1262: int newetype;
1.174 yamt 1263:
1264: UVMHIST_FUNC("uvm_map_enter");
1265: UVMHIST_CALLED(maphist);
1266:
1267: UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)",
1268: map, start, size, flags);
1269: UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1270:
1.204.2.1 yamt 1271: KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
1272:
1.174 yamt 1273: if (flags & UVM_FLAG_QUANTUM) {
1274: KASSERT(new_entry);
1275: KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
1276: }
1277:
1.176 yamt 1278: if (uobj)
1279: newetype = UVM_ET_OBJ;
1280: else
1281: newetype = 0;
1282:
1283: if (flags & UVM_FLAG_COPYONW) {
1284: newetype |= UVM_ET_COPYONWRITE;
1285: if ((flags & UVM_FLAG_OVERLAY) == 0)
1286: newetype |= UVM_ET_NEEDSCOPY;
1287: }
1288:
1.10 mrg 1289: /*
1.106 chs 1290: * try and insert in map by extending previous entry, if possible.
1.10 mrg 1291: * XXX: we don't try and pull back the next entry. might be useful
1292: * for a stack, but we are currently allocating our stack in advance.
1293: */
1294:
1.121 atatat 1295: if (flags & UVM_FLAG_NOMERGE)
1296: goto nomerge;
1297:
1.194 yamt 1298: if (prev_entry->end == start &&
1.121 atatat 1299: prev_entry != &map->header &&
1.194 yamt 1300: UVM_ET_ISCOMPATIBLE(prev_entry, newetype, uobj, meflagval,
1301: prot, maxprot, inherit, advice, 0)) {
1.161 matt 1302:
1.10 mrg 1303: if (uobj && prev_entry->offset +
1304: (prev_entry->end - prev_entry->start) != uoffset)
1.121 atatat 1305: goto forwardmerge;
1.10 mrg 1306:
1307: /*
1.98 chs 1308: * can't extend a shared amap. note: no need to lock amap to
1.34 chuck 1309: * look at refs since we don't care about its exact value.
1.10 mrg 1310: * if it is one (i.e. we have only reference) it will stay there
1311: */
1.85 chs 1312:
1.10 mrg 1313: if (prev_entry->aref.ar_amap &&
1.34 chuck 1314: amap_refs(prev_entry->aref.ar_amap) != 1) {
1.121 atatat 1315: goto forwardmerge;
1.10 mrg 1316: }
1.85 chs 1317:
1.119 chs 1318: if (prev_entry->aref.ar_amap) {
1.139 enami 1319: error = amap_extend(prev_entry, size,
1.126 bouyer 1320: amapwaitflag | AMAP_EXTEND_FORWARDS);
1.174 yamt 1321: if (error)
1.191 yamt 1322: goto nomerge;
1.119 chs 1323: }
1.10 mrg 1324:
1.121 atatat 1325: if (kmap)
1.204.2.1 yamt 1326: UVMMAP_EVCNT_INCR(kbackmerge);
1.121 atatat 1327: else
1.204.2.1 yamt 1328: UVMMAP_EVCNT_INCR(ubackmerge);
1.10 mrg 1329: UVMHIST_LOG(maphist," starting back merge", 0, 0, 0, 0);
1330:
1331: /*
1332: * drop our reference to uobj since we are extending a reference
1333: * that we already have (the ref count can not drop to zero).
1334: */
1.119 chs 1335:
1.10 mrg 1336: if (uobj && uobj->pgops->pgo_detach)
1337: uobj->pgops->pgo_detach(uobj);
1338:
1339: prev_entry->end += size;
1.145 yamt 1340: uvm_rb_fixup(map, prev_entry);
1341:
1.204.2.1 yamt 1342: uvm_map_check(map, "map backmerged");
1.10 mrg 1343:
1344: UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
1.121 atatat 1345: merged++;
1.106 chs 1346: }
1.10 mrg 1347:
1.121 atatat 1348: forwardmerge:
1.194 yamt 1349: if (prev_entry->next->start == (start + size) &&
1.121 atatat 1350: prev_entry->next != &map->header &&
1.194 yamt 1351: UVM_ET_ISCOMPATIBLE(prev_entry->next, newetype, uobj, meflagval,
1352: prot, maxprot, inherit, advice, 0)) {
1.161 matt 1353:
1.121 atatat 1354: if (uobj && prev_entry->next->offset != uoffset + size)
1355: goto nomerge;
1356:
1357: /*
1358: * can't extend a shared amap. note: no need to lock amap to
1359: * look at refs since we don't care about its exact value.
1.122 atatat 1360: * if it is one (i.e. we have only reference) it will stay there.
1361: *
1362: * note that we also can't merge two amaps, so if we
1363: * merged with the previous entry which has an amap,
1364: * and the next entry also has an amap, we give up.
1365: *
1.125 atatat 1366: * Interesting cases:
1367: * amap, new, amap -> give up second merge (single fwd extend)
1368: * amap, new, none -> double forward extend (extend again here)
1369: * none, new, amap -> double backward extend (done here)
1370: * uobj, new, amap -> single backward extend (done here)
1371: *
1.122 atatat 1372: * XXX should we attempt to deal with someone refilling
1373: * the deallocated region between two entries that are
1374: * backed by the same amap (ie, arefs is 2, "prev" and
1375: * "next" refer to it, and adding this allocation will
1376: * close the hole, thus restoring arefs to 1 and
1377: * deallocating the "next" vm_map_entry)? -- @@@
1.121 atatat 1378: */
1379:
1380: if (prev_entry->next->aref.ar_amap &&
1.122 atatat 1381: (amap_refs(prev_entry->next->aref.ar_amap) != 1 ||
1382: (merged && prev_entry->aref.ar_amap))) {
1.121 atatat 1383: goto nomerge;
1384: }
1385:
1.122 atatat 1386: if (merged) {
1.123 atatat 1387: /*
1388: * Try to extend the amap of the previous entry to
1389: * cover the next entry as well. If it doesn't work
1390: * just skip on, don't actually give up, since we've
1391: * already completed the back merge.
1392: */
1.125 atatat 1393: if (prev_entry->aref.ar_amap) {
1394: if (amap_extend(prev_entry,
1395: prev_entry->next->end -
1396: prev_entry->next->start,
1.126 bouyer 1397: amapwaitflag | AMAP_EXTEND_FORWARDS))
1.142 enami 1398: goto nomerge;
1.125 atatat 1399: }
1400:
1401: /*
1402: * Try to extend the amap of the *next* entry
1403: * back to cover the new allocation *and* the
1404: * previous entry as well (the previous merge
1405: * didn't have an amap already otherwise we
1406: * wouldn't be checking here for an amap). If
1407: * it doesn't work just skip on, again, don't
1408: * actually give up, since we've already
1409: * completed the back merge.
1410: */
1411: else if (prev_entry->next->aref.ar_amap) {
1412: if (amap_extend(prev_entry->next,
1413: prev_entry->end -
1.141 atatat 1414: prev_entry->start,
1.126 bouyer 1415: amapwaitflag | AMAP_EXTEND_BACKWARDS))
1.142 enami 1416: goto nomerge;
1.125 atatat 1417: }
1418: } else {
1419: /*
1420: * Pull the next entry's amap backwards to cover this
1421: * new allocation.
1422: */
1423: if (prev_entry->next->aref.ar_amap) {
1424: error = amap_extend(prev_entry->next, size,
1.126 bouyer 1425: amapwaitflag | AMAP_EXTEND_BACKWARDS);
1.174 yamt 1426: if (error)
1.191 yamt 1427: goto nomerge;
1.125 atatat 1428: }
1.122 atatat 1429: }
1430:
1.121 atatat 1431: if (merged) {
1432: if (kmap) {
1.204.2.1 yamt 1433: UVMMAP_EVCNT_DECR(kbackmerge);
1434: UVMMAP_EVCNT_INCR(kbimerge);
1.121 atatat 1435: } else {
1.204.2.1 yamt 1436: UVMMAP_EVCNT_DECR(ubackmerge);
1437: UVMMAP_EVCNT_INCR(ubimerge);
1.121 atatat 1438: }
1.122 atatat 1439: } else {
1.121 atatat 1440: if (kmap)
1.204.2.1 yamt 1441: UVMMAP_EVCNT_INCR(kforwmerge);
1.121 atatat 1442: else
1.204.2.1 yamt 1443: UVMMAP_EVCNT_INCR(uforwmerge);
1.121 atatat 1444: }
1445: UVMHIST_LOG(maphist," starting forward merge", 0, 0, 0, 0);
1.10 mrg 1446:
1.121 atatat 1447: /*
1448: * drop our reference to uobj since we are extending a reference
1449: * that we already have (the ref count can not drop to zero).
1450: * (if merged, we've already detached)
1451: */
1452: if (uobj && uobj->pgops->pgo_detach && !merged)
1453: uobj->pgops->pgo_detach(uobj);
1.1 mrg 1454:
1.121 atatat 1455: if (merged) {
1.174 yamt 1456: dead = prev_entry->next;
1.121 atatat 1457: prev_entry->end = dead->end;
1458: uvm_map_entry_unlink(map, dead);
1.125 atatat 1459: if (dead->aref.ar_amap != NULL) {
1460: prev_entry->aref = dead->aref;
1461: dead->aref.ar_amap = NULL;
1462: }
1.121 atatat 1463: } else {
1464: prev_entry->next->start -= size;
1.145 yamt 1465: if (prev_entry != &map->header)
1466: uvm_rb_fixup(map, prev_entry);
1.121 atatat 1467: if (uobj)
1468: prev_entry->next->offset = uoffset;
1469: }
1.145 yamt 1470:
1.204.2.1 yamt 1471: uvm_map_check(map, "map forwardmerged");
1.1 mrg 1472:
1.121 atatat 1473: UVMHIST_LOG(maphist,"<- done forwardmerge", 0, 0, 0, 0);
1474: merged++;
1.106 chs 1475: }
1.121 atatat 1476:
1477: nomerge:
1478: if (!merged) {
1479: UVMHIST_LOG(maphist," allocating new map entry", 0, 0, 0, 0);
1480: if (kmap)
1.204.2.1 yamt 1481: UVMMAP_EVCNT_INCR(knomerge);
1.121 atatat 1482: else
1.204.2.1 yamt 1483: UVMMAP_EVCNT_INCR(unomerge);
1.106 chs 1484:
1.10 mrg 1485: /*
1.121 atatat 1486: * allocate new entry and link it in.
1.10 mrg 1487: */
1.106 chs 1488:
1.121 atatat 1489: if (new_entry == NULL) {
1.126 bouyer 1490: new_entry = uvm_mapent_alloc(map,
1.127 thorpej 1491: (flags & UVM_FLAG_NOWAIT));
1.126 bouyer 1492: if (__predict_false(new_entry == NULL)) {
1.174 yamt 1493: error = ENOMEM;
1494: goto done;
1.126 bouyer 1495: }
1.121 atatat 1496: }
1.174 yamt 1497: new_entry->start = start;
1.121 atatat 1498: new_entry->end = new_entry->start + size;
1499: new_entry->object.uvm_obj = uobj;
1500: new_entry->offset = uoffset;
1501:
1.176 yamt 1502: new_entry->etype = newetype;
1.121 atatat 1503:
1.161 matt 1504: if (flags & UVM_FLAG_NOMERGE) {
1505: new_entry->flags |= UVM_MAP_NOMERGE;
1506: }
1.121 atatat 1507:
1508: new_entry->protection = prot;
1509: new_entry->max_protection = maxprot;
1510: new_entry->inheritance = inherit;
1511: new_entry->wired_count = 0;
1512: new_entry->advice = advice;
1513: if (flags & UVM_FLAG_OVERLAY) {
1514:
1515: /*
1516: * to_add: for BSS we overallocate a little since we
1517: * are likely to extend
1518: */
1519:
1520: vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1521: UVM_AMAP_CHUNK << PAGE_SHIFT : 0;
1.126 bouyer 1522: struct vm_amap *amap = amap_alloc(size, to_add,
1.204.2.2 yamt 1523: (flags & UVM_FLAG_NOWAIT));
1.126 bouyer 1524: if (__predict_false(amap == NULL)) {
1.174 yamt 1525: error = ENOMEM;
1526: goto done;
1.126 bouyer 1527: }
1.121 atatat 1528: new_entry->aref.ar_pageoff = 0;
1529: new_entry->aref.ar_amap = amap;
1530: } else {
1531: new_entry->aref.ar_pageoff = 0;
1532: new_entry->aref.ar_amap = NULL;
1533: }
1534: uvm_map_entry_link(map, prev_entry, new_entry);
1.1 mrg 1535:
1.121 atatat 1536: /*
1537: * Update the free space hint
1538: */
1.10 mrg 1539:
1.121 atatat 1540: if ((map->first_free == prev_entry) &&
1541: (prev_entry->end >= new_entry->start))
1542: map->first_free = new_entry;
1.174 yamt 1543:
1544: new_entry = NULL;
1.121 atatat 1545: }
1.10 mrg 1546:
1.146 yamt 1547: map->size += size;
1548:
1.10 mrg 1549: UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1.174 yamt 1550:
1551: error = 0;
1552: done:
1.204.2.7 yamt 1553: if ((flags & UVM_FLAG_QUANTUM) == 0) {
1554: /*
1555: * vmk_merged_entries is locked by the map's lock.
1556: */
1557: vm_map_unlock(map);
1558: }
1559: if (new_entry && error == 0) {
1560: KDASSERT(merged);
1561: uvm_mapent_free_merged(map, new_entry);
1562: new_entry = NULL;
1.174 yamt 1563: }
1564: if (dead) {
1565: KDASSERT(merged);
1.181 yamt 1566: uvm_mapent_free_merged(map, dead);
1.174 yamt 1567: }
1.204.2.7 yamt 1568: if ((flags & UVM_FLAG_QUANTUM) != 0) {
1569: vm_map_unlock(map);
1570: }
1571: if (new_entry != NULL) {
1572: uvm_mapent_free(new_entry);
1573: }
1.174 yamt 1574: return error;
1.1 mrg 1575: }
1576:
1577: /*
1.204.2.7 yamt 1578: * uvm_map_lookup_entry_bytree: lookup an entry in tree
1579: */
1580:
1581: static bool
1582: uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address,
1583: struct vm_map_entry **entry /* OUT */)
1584: {
1585: struct vm_map_entry *prev = &map->header;
1586: struct vm_map_entry *cur = RB_ROOT(&map->rbhead);
1587:
1588: while (cur) {
1589: if (address >= cur->start) {
1590: if (address < cur->end) {
1591: *entry = cur;
1592: return true;
1593: }
1594: prev = cur;
1595: cur = RB_RIGHT(cur, rb_entry);
1596: } else
1597: cur = RB_LEFT(cur, rb_entry);
1598: }
1599: *entry = prev;
1600: return false;
1601: }
1602:
1603: /*
1.1 mrg 1604: * uvm_map_lookup_entry: find map entry at or before an address
1605: *
1606: * => map must at least be read-locked by caller
1607: * => entry is returned in "entry"
1608: * => return value is true if address is in the returned entry
1609: */
1610:
1.204.2.3 yamt 1611: bool
1.138 enami 1612: uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
1613: struct vm_map_entry **entry /* OUT */)
1.1 mrg 1614: {
1.99 chs 1615: struct vm_map_entry *cur;
1.204.2.3 yamt 1616: bool use_tree = false;
1.1 mrg 1617: UVMHIST_FUNC("uvm_map_lookup_entry");
1618: UVMHIST_CALLED(maphist);
1619:
1620: UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1.10 mrg 1621: map, address, entry, 0);
1.1 mrg 1622:
1623: /*
1.10 mrg 1624: * start looking either from the head of the
1625: * list, or from the hint.
1.1 mrg 1626: */
1627:
1628: cur = map->hint;
1629:
1630: if (cur == &map->header)
1631: cur = cur->next;
1632:
1.204.2.1 yamt 1633: UVMMAP_EVCNT_INCR(mlk_call);
1.1 mrg 1634: if (address >= cur->start) {
1.99 chs 1635:
1.139 enami 1636: /*
1.10 mrg 1637: * go from hint to end of list.
1.1 mrg 1638: *
1.10 mrg 1639: * but first, make a quick check to see if
1640: * we are already looking at the entry we
1641: * want (which is usually the case).
1642: * note also that we don't need to save the hint
1643: * here... it is the same hint (unless we are
1644: * at the header, in which case the hint didn't
1645: * buy us anything anyway).
1.1 mrg 1646: */
1.99 chs 1647:
1.144 yamt 1648: if (cur != &map->header && cur->end > address) {
1.204.2.1 yamt 1649: UVMMAP_EVCNT_INCR(mlk_hint);
1.1 mrg 1650: *entry = cur;
1651: UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1.10 mrg 1652: cur, 0, 0, 0);
1.204.2.1 yamt 1653: uvm_mapent_check(*entry);
1.204.2.3 yamt 1654: return (true);
1.1 mrg 1655: }
1.144 yamt 1656:
1657: if (map->nentries > 30)
1.204.2.3 yamt 1658: use_tree = true;
1.10 mrg 1659: } else {
1.99 chs 1660:
1.139 enami 1661: /*
1.144 yamt 1662: * invalid hint. use tree.
1.1 mrg 1663: */
1.204.2.3 yamt 1664: use_tree = true;
1.144 yamt 1665: }
1666:
1.204.2.1 yamt 1667: uvm_map_check(map, __func__);
1.144 yamt 1668:
1669: if (use_tree) {
1670: /*
1671: * Simple lookup in the tree. Happens when the hint is
1672: * invalid, or nentries reach a threshold.
1673: */
1.204.2.7 yamt 1674: if (uvm_map_lookup_entry_bytree(map, address, entry)) {
1675: goto got;
1676: } else {
1677: goto failed;
1.144 yamt 1678: }
1.1 mrg 1679: }
1680:
1681: /*
1.10 mrg 1682: * search linearly
1.1 mrg 1683: */
1684:
1.144 yamt 1685: while (cur != &map->header) {
1.1 mrg 1686: if (cur->end > address) {
1687: if (address >= cur->start) {
1.139 enami 1688: /*
1.10 mrg 1689: * save this lookup for future
1690: * hints, and return
1.1 mrg 1691: */
1692:
1693: *entry = cur;
1.144 yamt 1694: got:
1695: SAVE_HINT(map, map->hint, *entry);
1.1 mrg 1696: UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1.10 mrg 1697: cur, 0, 0, 0);
1.144 yamt 1698: KDASSERT((*entry)->start <= address);
1699: KDASSERT(address < (*entry)->end);
1.204.2.1 yamt 1700: uvm_mapent_check(*entry);
1.204.2.3 yamt 1701: return (true);
1.1 mrg 1702: }
1703: break;
1704: }
1705: cur = cur->next;
1706: }
1707: *entry = cur->prev;
1.144 yamt 1708: failed:
1.82 thorpej 1709: SAVE_HINT(map, map->hint, *entry);
1.1 mrg 1710: UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1.147 yamt 1711: KDASSERT((*entry) == &map->header || (*entry)->end <= address);
1.144 yamt 1712: KDASSERT((*entry)->next == &map->header ||
1713: address < (*entry)->next->start);
1.204.2.3 yamt 1714: return (false);
1.1 mrg 1715: }
1716:
1717: /*
1.140 enami 1718: * See if the range between start and start + length fits in the gap
1719: * entry->next->start and entry->end. Returns 1 if fits, 0 if doesn't
1720: * fit, and -1 address wraps around.
1721: */
1.203 thorpej 1722: static int
1.140 enami 1723: uvm_map_space_avail(vaddr_t *start, vsize_t length, voff_t uoffset,
1724: vsize_t align, int topdown, struct vm_map_entry *entry)
1725: {
1726: vaddr_t end;
1727:
1728: #ifdef PMAP_PREFER
1729: /*
1730: * push start address forward as needed to avoid VAC alias problems.
1731: * we only do this if a valid offset is specified.
1732: */
1733:
1734: if (uoffset != UVM_UNKNOWN_OFFSET)
1.182 atatat 1735: PMAP_PREFER(uoffset, start, length, topdown);
1.140 enami 1736: #endif
1737: if (align != 0) {
1738: if ((*start & (align - 1)) != 0) {
1739: if (topdown)
1740: *start &= ~(align - 1);
1741: else
1742: *start = roundup(*start, align);
1743: }
1744: /*
1745: * XXX Should we PMAP_PREFER() here again?
1.182 atatat 1746: * eh...i think we're okay
1.140 enami 1747: */
1748: }
1749:
1750: /*
1751: * Find the end of the proposed new region. Be sure we didn't
1752: * wrap around the address; if so, we lose. Otherwise, if the
1753: * proposed new region fits before the next entry, we win.
1754: */
1755:
1756: end = *start + length;
1757: if (end < *start)
1758: return (-1);
1759:
1760: if (entry->next->start >= end && *start >= entry->end)
1761: return (1);
1762:
1763: return (0);
1764: }
1765:
1766: /*
1.1 mrg 1767: * uvm_map_findspace: find "length" sized space in "map".
1768: *
1.167 junyoung 1769: * => "hint" is a hint about where we want it, unless UVM_FLAG_FIXED is
1770: * set in "flags" (in which case we insist on using "hint").
1.1 mrg 1771: * => "result" is VA returned
1772: * => uobj/uoffset are to be used to handle VAC alignment, if required
1.167 junyoung 1773: * => if "align" is non-zero, we attempt to align to that value.
1.1 mrg 1774: * => caller must at least have read-locked map
1775: * => returns NULL on failure, or pointer to prev. map entry if success
1776: * => note this is a cross between the old vm_map_findspace and vm_map_find
1777: */
1778:
1.99 chs 1779: struct vm_map_entry *
1.138 enami 1780: uvm_map_findspace(struct vm_map *map, vaddr_t hint, vsize_t length,
1781: vaddr_t *result /* OUT */, struct uvm_object *uobj, voff_t uoffset,
1782: vsize_t align, int flags)
1.1 mrg 1783: {
1.140 enami 1784: struct vm_map_entry *entry;
1.144 yamt 1785: struct vm_map_entry *child, *prev, *tmp;
1.140 enami 1786: vaddr_t orig_hint;
1.131 atatat 1787: const int topdown = map->flags & VM_MAP_TOPDOWN;
1.1 mrg 1788: UVMHIST_FUNC("uvm_map_findspace");
1789: UVMHIST_CALLED(maphist);
1790:
1.98 chs 1791: UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)",
1.140 enami 1792: map, hint, length, flags);
1.85 chs 1793: KASSERT((align & (align - 1)) == 0);
1794: KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
1.81 thorpej 1795:
1.204.2.1 yamt 1796: uvm_map_check(map, "map_findspace entry");
1.144 yamt 1797:
1.81 thorpej 1798: /*
1799: * remember the original hint. if we are aligning, then we
1800: * may have to try again with no alignment constraint if
1801: * we fail the first time.
1802: */
1.85 chs 1803:
1.81 thorpej 1804: orig_hint = hint;
1.184 chs 1805: if (hint < vm_map_min(map)) { /* check ranges ... */
1.81 thorpej 1806: if (flags & UVM_FLAG_FIXED) {
1.1 mrg 1807: UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
1.139 enami 1808: return (NULL);
1.1 mrg 1809: }
1.184 chs 1810: hint = vm_map_min(map);
1.1 mrg 1811: }
1.184 chs 1812: if (hint > vm_map_max(map)) {
1.1 mrg 1813: UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
1.184 chs 1814: hint, vm_map_min(map), vm_map_max(map), 0);
1.139 enami 1815: return (NULL);
1.1 mrg 1816: }
1817:
1818: /*
1819: * Look for the first possible address; if there's already
1820: * something at this address, we have to start after it.
1821: */
1822:
1.131 atatat 1823: /*
1824: * @@@: there are four, no, eight cases to consider.
1825: *
1826: * 0: found, fixed, bottom up -> fail
1827: * 1: found, fixed, top down -> fail
1.140 enami 1828: * 2: found, not fixed, bottom up -> start after entry->end,
1829: * loop up
1830: * 3: found, not fixed, top down -> start before entry->start,
1831: * loop down
1832: * 4: not found, fixed, bottom up -> check entry->next->start, fail
1833: * 5: not found, fixed, top down -> check entry->next->start, fail
1834: * 6: not found, not fixed, bottom up -> check entry->next->start,
1835: * loop up
1836: * 7: not found, not fixed, top down -> check entry->next->start,
1837: * loop down
1.131 atatat 1838: *
1839: * as you can see, it reduces to roughly five cases, and that
1840: * adding top down mapping only adds one unique case (without
1841: * it, there would be four cases).
1842: */
1843:
1.184 chs 1844: if ((flags & UVM_FLAG_FIXED) == 0 && hint == vm_map_min(map)) {
1.140 enami 1845: entry = map->first_free;
1.1 mrg 1846: } else {
1.140 enami 1847: if (uvm_map_lookup_entry(map, hint, &entry)) {
1.1 mrg 1848: /* "hint" address already in use ... */
1.81 thorpej 1849: if (flags & UVM_FLAG_FIXED) {
1.140 enami 1850: UVMHIST_LOG(maphist, "<- fixed & VA in use",
1.10 mrg 1851: 0, 0, 0, 0);
1.139 enami 1852: return (NULL);
1.1 mrg 1853: }
1.140 enami 1854: if (topdown)
1855: /* Start from lower gap. */
1856: entry = entry->prev;
1857: } else if (flags & UVM_FLAG_FIXED) {
1858: if (entry->next->start >= hint + length &&
1859: hint + length > hint)
1860: goto found;
1861:
1862: /* "hint" address is gap but too small */
1863: UVMHIST_LOG(maphist, "<- fixed mapping failed",
1864: 0, 0, 0, 0);
1865: return (NULL); /* only one shot at it ... */
1866: } else {
1867: /*
1868: * See if given hint fits in this gap.
1869: */
1870: switch (uvm_map_space_avail(&hint, length,
1871: uoffset, align, topdown, entry)) {
1872: case 1:
1873: goto found;
1874: case -1:
1875: goto wraparound;
1876: }
1877:
1.148 yamt 1878: if (topdown) {
1.140 enami 1879: /*
1880: * Still there is a chance to fit
1881: * if hint > entry->end.
1882: */
1.148 yamt 1883: } else {
1.168 junyoung 1884: /* Start from higher gap. */
1.148 yamt 1885: entry = entry->next;
1886: if (entry == &map->header)
1887: goto notfound;
1.140 enami 1888: goto nextgap;
1.148 yamt 1889: }
1.1 mrg 1890: }
1891: }
1892:
1893: /*
1.144 yamt 1894: * Note that all UVM_FLAGS_FIXED case is already handled.
1895: */
1896: KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1897:
1898: /* Try to find the space in the red-black tree */
1899:
1900: /* Check slot before any entry */
1901: hint = topdown ? entry->next->start - length : entry->end;
1902: switch (uvm_map_space_avail(&hint, length, uoffset, align,
1903: topdown, entry)) {
1904: case 1:
1905: goto found;
1906: case -1:
1907: goto wraparound;
1908: }
1909:
1910: nextgap:
1.148 yamt 1911: KDASSERT((flags & UVM_FLAG_FIXED) == 0);
1.144 yamt 1912: /* If there is not enough space in the whole tree, we fail */
1913: tmp = RB_ROOT(&map->rbhead);
1914: if (tmp == NULL || tmp->space < length)
1915: goto notfound;
1916:
1917: prev = NULL; /* previous candidate */
1918:
1919: /* Find an entry close to hint that has enough space */
1920: for (; tmp;) {
1921: KASSERT(tmp->next->start == tmp->end + tmp->ownspace);
1922: if (topdown) {
1923: if (tmp->next->start < hint + length &&
1924: (prev == NULL || tmp->end > prev->end)) {
1925: if (tmp->ownspace >= length)
1926: prev = tmp;
1927: else if ((child = RB_LEFT(tmp, rb_entry))
1928: != NULL && child->space >= length)
1929: prev = tmp;
1930: }
1931: } else {
1932: if (tmp->end >= hint &&
1933: (prev == NULL || tmp->end < prev->end)) {
1934: if (tmp->ownspace >= length)
1935: prev = tmp;
1936: else if ((child = RB_RIGHT(tmp, rb_entry))
1937: != NULL && child->space >= length)
1938: prev = tmp;
1939: }
1940: }
1941: if (tmp->next->start < hint + length)
1942: child = RB_RIGHT(tmp, rb_entry);
1943: else if (tmp->end > hint)
1944: child = RB_LEFT(tmp, rb_entry);
1945: else {
1946: if (tmp->ownspace >= length)
1947: break;
1948: if (topdown)
1949: child = RB_LEFT(tmp, rb_entry);
1950: else
1951: child = RB_RIGHT(tmp, rb_entry);
1952: }
1953: if (child == NULL || child->space < length)
1954: break;
1955: tmp = child;
1956: }
1957:
1.148 yamt 1958: if (tmp != NULL && tmp->start < hint && hint < tmp->next->start) {
1.164 junyoung 1959: /*
1.144 yamt 1960: * Check if the entry that we found satifies the
1961: * space requirement
1962: */
1.148 yamt 1963: if (topdown) {
1.149 yamt 1964: if (hint > tmp->next->start - length)
1965: hint = tmp->next->start - length;
1.148 yamt 1966: } else {
1.149 yamt 1967: if (hint < tmp->end)
1968: hint = tmp->end;
1.148 yamt 1969: }
1970: switch (uvm_map_space_avail(&hint, length, uoffset, align,
1971: topdown, tmp)) {
1972: case 1:
1.144 yamt 1973: entry = tmp;
1974: goto found;
1.148 yamt 1975: case -1:
1976: goto wraparound;
1.144 yamt 1977: }
1978: if (tmp->ownspace >= length)
1979: goto listsearch;
1980: }
1981: if (prev == NULL)
1982: goto notfound;
1983:
1.148 yamt 1984: if (topdown) {
1.150 yamt 1985: KASSERT(orig_hint >= prev->next->start - length ||
1.148 yamt 1986: prev->next->start - length > prev->next->start);
1987: hint = prev->next->start - length;
1988: } else {
1.150 yamt 1989: KASSERT(orig_hint <= prev->end);
1.148 yamt 1990: hint = prev->end;
1991: }
1992: switch (uvm_map_space_avail(&hint, length, uoffset, align,
1993: topdown, prev)) {
1994: case 1:
1.144 yamt 1995: entry = prev;
1996: goto found;
1.148 yamt 1997: case -1:
1998: goto wraparound;
1.144 yamt 1999: }
2000: if (prev->ownspace >= length)
2001: goto listsearch;
1.164 junyoung 2002:
1.144 yamt 2003: if (topdown)
2004: tmp = RB_LEFT(prev, rb_entry);
2005: else
2006: tmp = RB_RIGHT(prev, rb_entry);
2007: for (;;) {
2008: KASSERT(tmp && tmp->space >= length);
2009: if (topdown)
2010: child = RB_RIGHT(tmp, rb_entry);
2011: else
2012: child = RB_LEFT(tmp, rb_entry);
2013: if (child && child->space >= length) {
2014: tmp = child;
2015: continue;
2016: }
2017: if (tmp->ownspace >= length)
2018: break;
2019: if (topdown)
2020: tmp = RB_LEFT(tmp, rb_entry);
2021: else
2022: tmp = RB_RIGHT(tmp, rb_entry);
2023: }
1.164 junyoung 2024:
1.148 yamt 2025: if (topdown) {
1.150 yamt 2026: KASSERT(orig_hint >= tmp->next->start - length ||
1.148 yamt 2027: tmp->next->start - length > tmp->next->start);
2028: hint = tmp->next->start - length;
2029: } else {
1.150 yamt 2030: KASSERT(orig_hint <= tmp->end);
1.148 yamt 2031: hint = tmp->end;
2032: }
1.144 yamt 2033: switch (uvm_map_space_avail(&hint, length, uoffset, align,
2034: topdown, tmp)) {
2035: case 1:
2036: entry = tmp;
2037: goto found;
1.148 yamt 2038: case -1:
2039: goto wraparound;
1.144 yamt 2040: }
2041:
1.164 junyoung 2042: /*
1.144 yamt 2043: * The tree fails to find an entry because of offset or alignment
2044: * restrictions. Search the list instead.
2045: */
2046: listsearch:
2047: /*
1.1 mrg 2048: * Look through the rest of the map, trying to fit a new region in
2049: * the gap between existing regions, or after the very last region.
1.140 enami 2050: * note: entry->end = base VA of current gap,
2051: * entry->next->start = VA of end of current gap
1.1 mrg 2052: */
1.99 chs 2053:
1.140 enami 2054: for (;;) {
2055: /* Update hint for current gap. */
2056: hint = topdown ? entry->next->start - length : entry->end;
2057:
2058: /* See if it fits. */
2059: switch (uvm_map_space_avail(&hint, length, uoffset, align,
2060: topdown, entry)) {
2061: case 1:
2062: goto found;
2063: case -1:
2064: goto wraparound;
2065: }
2066:
2067: /* Advance to next/previous gap */
2068: if (topdown) {
2069: if (entry == &map->header) {
2070: UVMHIST_LOG(maphist, "<- failed (off start)",
2071: 0,0,0,0);
2072: goto notfound;
1.134 matt 2073: }
1.140 enami 2074: entry = entry->prev;
2075: } else {
2076: entry = entry->next;
2077: if (entry == &map->header) {
2078: UVMHIST_LOG(maphist, "<- failed (off end)",
1.81 thorpej 2079: 0,0,0,0);
1.140 enami 2080: goto notfound;
1.81 thorpej 2081: }
1.1 mrg 2082: }
2083: }
1.140 enami 2084:
2085: found:
1.82 thorpej 2086: SAVE_HINT(map, map->hint, entry);
1.1 mrg 2087: *result = hint;
2088: UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0);
1.148 yamt 2089: KASSERT( topdown || hint >= orig_hint);
2090: KASSERT(!topdown || hint <= orig_hint);
1.144 yamt 2091: KASSERT(entry->end <= hint);
2092: KASSERT(hint + length <= entry->next->start);
1.1 mrg 2093: return (entry);
1.140 enami 2094:
2095: wraparound:
2096: UVMHIST_LOG(maphist, "<- failed (wrap around)", 0,0,0,0);
2097:
1.165 yamt 2098: return (NULL);
2099:
1.140 enami 2100: notfound:
1.165 yamt 2101: UVMHIST_LOG(maphist, "<- failed (notfound)", 0,0,0,0);
2102:
1.140 enami 2103: return (NULL);
1.1 mrg 2104: }
2105:
2106: /*
2107: * U N M A P - m a i n h e l p e r f u n c t i o n s
2108: */
2109:
2110: /*
2111: * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
2112: *
1.98 chs 2113: * => caller must check alignment and size
1.1 mrg 2114: * => map must be locked by caller
2115: * => we return a list of map entries that we've remove from the map
2116: * in "entry_list"
2117: */
2118:
1.94 chs 2119: void
1.138 enami 2120: uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
1.174 yamt 2121: struct vm_map_entry **entry_list /* OUT */,
1.187 yamt 2122: struct uvm_mapent_reservation *umr, int flags)
1.10 mrg 2123: {
1.99 chs 2124: struct vm_map_entry *entry, *first_entry, *next;
1.24 eeh 2125: vaddr_t len;
1.99 chs 2126: UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
1.10 mrg 2127:
2128: UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
2129: map, start, end, 0);
2130: VM_MAP_RANGE_CHECK(map, start, end);
2131:
1.204.2.1 yamt 2132: uvm_map_check(map, "unmap_remove entry");
1.144 yamt 2133:
1.10 mrg 2134: /*
2135: * find first entry
2136: */
1.99 chs 2137:
1.204.2.3 yamt 2138: if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
1.29 chuck 2139: /* clip and go... */
1.10 mrg 2140: entry = first_entry;
1.174 yamt 2141: UVM_MAP_CLIP_START(map, entry, start, umr);
1.10 mrg 2142: /* critical! prevents stale hint */
1.82 thorpej 2143: SAVE_HINT(map, entry, entry->prev);
1.10 mrg 2144: } else {
2145: entry = first_entry->next;
2146: }
2147:
2148: /*
2149: * Save the free space hint
2150: */
2151:
1.204.2.1 yamt 2152: if (map->first_free != &map->header && map->first_free->start >= start)
1.10 mrg 2153: map->first_free = entry->prev;
2154:
2155: /*
2156: * note: we now re-use first_entry for a different task. we remove
2157: * a number of map entries from the map and save them in a linked
2158: * list headed by "first_entry". once we remove them from the map
2159: * the caller should unlock the map and drop the references to the
2160: * backing objects [c.f. uvm_unmap_detach]. the object is to
1.100 wiz 2161: * separate unmapping from reference dropping. why?
1.10 mrg 2162: * [1] the map has to be locked for unmapping
2163: * [2] the map need not be locked for reference dropping
2164: * [3] dropping references may trigger pager I/O, and if we hit
2165: * a pager that does synchronous I/O we may have to wait for it.
2166: * [4] we would like all waiting for I/O to occur with maps unlocked
1.98 chs 2167: * so that we don't block other threads.
1.10 mrg 2168: */
1.99 chs 2169:
1.10 mrg 2170: first_entry = NULL;
1.106 chs 2171: *entry_list = NULL;
1.10 mrg 2172:
2173: /*
1.98 chs 2174: * break up the area into map entry sized regions and unmap. note
1.10 mrg 2175: * that all mappings have to be removed before we can even consider
2176: * dropping references to amaps or VM objects (otherwise we could end
2177: * up with a mapping to a page on the free list which would be very bad)
2178: */
2179:
2180: while ((entry != &map->header) && (entry->start < end)) {
1.174 yamt 2181: KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
2182:
2183: UVM_MAP_CLIP_END(map, entry, end, umr);
1.10 mrg 2184: next = entry->next;
2185: len = entry->end - entry->start;
1.81 thorpej 2186:
1.10 mrg 2187: /*
2188: * unwire before removing addresses from the pmap; otherwise
2189: * unwiring will put the entries back into the pmap (XXX).
2190: */
1.1 mrg 2191:
1.106 chs 2192: if (VM_MAPENT_ISWIRED(entry)) {
1.10 mrg 2193: uvm_map_entry_unwire(map, entry);
1.106 chs 2194: }
1.187 yamt 2195: if (flags & UVM_FLAG_VAONLY) {
2196:
2197: /* nothing */
2198:
2199: } else if ((map->flags & VM_MAP_PAGEABLE) == 0) {
1.10 mrg 2200:
1.106 chs 2201: /*
2202: * if the map is non-pageable, any pages mapped there
2203: * must be wired and entered with pmap_kenter_pa(),
2204: * and we should free any such pages immediately.
2205: * this is mostly used for kmem_map and mb_map.
2206: */
1.99 chs 2207:
1.174 yamt 2208: if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2209: uvm_km_pgremove_intrsafe(entry->start,
2210: entry->end);
2211: pmap_kremove(entry->start, len);
2212: }
1.106 chs 2213: } else if (UVM_ET_ISOBJ(entry) &&
2214: UVM_OBJ_IS_KERN_OBJECT(entry->object.uvm_obj)) {
1.85 chs 2215: KASSERT(vm_map_pmap(map) == pmap_kernel());
1.1 mrg 2216:
1.10 mrg 2217: /*
2218: * note: kernel object mappings are currently used in
2219: * two ways:
2220: * [1] "normal" mappings of pages in the kernel object
2221: * [2] uvm_km_valloc'd allocations in which we
2222: * pmap_enter in some non-kernel-object page
2223: * (e.g. vmapbuf).
2224: *
2225: * for case [1], we need to remove the mapping from
2226: * the pmap and then remove the page from the kernel
2227: * object (because, once pages in a kernel object are
2228: * unmapped they are no longer needed, unlike, say,
2229: * a vnode where you might want the data to persist
2230: * until flushed out of a queue).
2231: *
2232: * for case [2], we need to remove the mapping from
2233: * the pmap. there shouldn't be any pages at the
2234: * specified offset in the kernel object [but it
2235: * doesn't hurt to call uvm_km_pgremove just to be
2236: * safe?]
2237: *
1.98 chs 2238: * uvm_km_pgremove currently does the following:
2239: * for pages in the kernel object in range:
1.43 thorpej 2240: * - drops the swap slot
1.10 mrg 2241: * - uvm_pagefree the page
2242: */
2243:
2244: /*
1.43 thorpej 2245: * remove mappings from pmap and drop the pages
2246: * from the object. offsets are always relative
2247: * to vm_map_min(kernel_map).
1.10 mrg 2248: */
1.99 chs 2249:
1.106 chs 2250: pmap_remove(pmap_kernel(), entry->start,
2251: entry->start + len);
1.187 yamt 2252: uvm_km_pgremove(entry->start, entry->end);
1.10 mrg 2253:
2254: /*
2255: * null out kernel_object reference, we've just
2256: * dropped it
2257: */
1.99 chs 2258:
1.10 mrg 2259: entry->etype &= ~UVM_ET_OBJ;
1.106 chs 2260: entry->object.uvm_obj = NULL;
2261: } else if (UVM_ET_ISOBJ(entry) || entry->aref.ar_amap) {
1.99 chs 2262:
1.29 chuck 2263: /*
1.139 enami 2264: * remove mappings the standard way.
2265: */
1.99 chs 2266:
1.29 chuck 2267: pmap_remove(map->pmap, entry->start, entry->end);
1.10 mrg 2268: }
2269:
1.177 yamt 2270: #if defined(DEBUG)
2271: if ((entry->flags & UVM_MAP_KMAPENT) == 0) {
2272:
2273: /*
2274: * check if there's remaining mapping,
2275: * which is a bug in caller.
2276: */
2277:
2278: vaddr_t va;
2279: for (va = entry->start; va < entry->end;
2280: va += PAGE_SIZE) {
2281: if (pmap_extract(vm_map_pmap(map), va, NULL)) {
2282: panic("uvm_unmap_remove: has mapping");
2283: }
2284: }
1.187 yamt 2285:
2286: if (VM_MAP_IS_KERNEL(map)) {
2287: uvm_km_check_empty(entry->start, entry->end,
2288: (map->flags & VM_MAP_INTRSAFE) != 0);
2289: }
1.177 yamt 2290: }
2291: #endif /* defined(DEBUG) */
2292:
1.10 mrg 2293: /*
1.98 chs 2294: * remove entry from map and put it on our list of entries
1.106 chs 2295: * that we've nuked. then go to next entry.
1.10 mrg 2296: */
1.99 chs 2297:
1.10 mrg 2298: UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0);
1.82 thorpej 2299:
2300: /* critical! prevents stale hint */
2301: SAVE_HINT(map, entry, entry->prev);
2302:
1.10 mrg 2303: uvm_map_entry_unlink(map, entry);
1.146 yamt 2304: KASSERT(map->size >= len);
1.10 mrg 2305: map->size -= len;
1.131 atatat 2306: entry->prev = NULL;
1.10 mrg 2307: entry->next = first_entry;
2308: first_entry = entry;
1.106 chs 2309: entry = next;
1.10 mrg 2310: }
1.120 chs 2311: if ((map->flags & VM_MAP_DYING) == 0) {
2312: pmap_update(vm_map_pmap(map));
2313: }
1.10 mrg 2314:
1.204.2.1 yamt 2315: uvm_map_check(map, "unmap_remove leave");
1.144 yamt 2316:
1.10 mrg 2317: /*
2318: * now we've cleaned up the map and are ready for the caller to drop
1.98 chs 2319: * references to the mapped objects.
1.10 mrg 2320: */
2321:
2322: *entry_list = first_entry;
2323: UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1.180 yamt 2324:
2325: if (map->flags & VM_MAP_WANTVA) {
1.204.2.4 yamt 2326: mutex_enter(&map->misc_lock);
1.180 yamt 2327: map->flags &= ~VM_MAP_WANTVA;
1.204.2.4 yamt 2328: cv_broadcast(&map->cv);
2329: mutex_exit(&map->misc_lock);
1.180 yamt 2330: }
1.1 mrg 2331: }
2332:
2333: /*
2334: * uvm_unmap_detach: drop references in a chain of map entries
2335: *
2336: * => we will free the map entries as we traverse the list.
2337: */
2338:
1.10 mrg 2339: void
1.138 enami 2340: uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
1.1 mrg 2341: {
1.99 chs 2342: struct vm_map_entry *next_entry;
1.10 mrg 2343: UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
1.1 mrg 2344:
1.10 mrg 2345: while (first_entry) {
1.85 chs 2346: KASSERT(!VM_MAPENT_ISWIRED(first_entry));
1.10 mrg 2347: UVMHIST_LOG(maphist,
1.98 chs 2348: " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d",
2349: first_entry, first_entry->aref.ar_amap,
1.29 chuck 2350: first_entry->object.uvm_obj,
2351: UVM_ET_ISSUBMAP(first_entry));
1.1 mrg 2352:
1.10 mrg 2353: /*
2354: * drop reference to amap, if we've got one
2355: */
2356:
2357: if (first_entry->aref.ar_amap)
1.85 chs 2358: uvm_map_unreference_amap(first_entry, flags);
1.10 mrg 2359:
2360: /*
2361: * drop reference to our backing object, if we've got one
2362: */
1.85 chs 2363:
1.120 chs 2364: KASSERT(!UVM_ET_ISSUBMAP(first_entry));
2365: if (UVM_ET_ISOBJ(first_entry) &&
2366: first_entry->object.uvm_obj->pgops->pgo_detach) {
2367: (*first_entry->object.uvm_obj->pgops->pgo_detach)
2368: (first_entry->object.uvm_obj);
1.10 mrg 2369: }
2370: next_entry = first_entry->next;
2371: uvm_mapent_free(first_entry);
2372: first_entry = next_entry;
2373: }
2374: UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
1.1 mrg 2375: }
2376:
2377: /*
2378: * E X T R A C T I O N F U N C T I O N S
2379: */
2380:
1.98 chs 2381: /*
1.1 mrg 2382: * uvm_map_reserve: reserve space in a vm_map for future use.
2383: *
1.98 chs 2384: * => we reserve space in a map by putting a dummy map entry in the
1.1 mrg 2385: * map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
2386: * => map should be unlocked (we will write lock it)
2387: * => we return true if we were able to reserve space
2388: * => XXXCDC: should be inline?
2389: */
2390:
1.10 mrg 2391: int
1.138 enami 2392: uvm_map_reserve(struct vm_map *map, vsize_t size,
2393: vaddr_t offset /* hint for pmap_prefer */,
1.204.2.5 yamt 2394: vsize_t align /* alignment */,
1.204.2.1 yamt 2395: vaddr_t *raddr /* IN:hint, OUT: reserved VA */,
2396: uvm_flag_t flags /* UVM_FLAG_FIXED or 0 */)
1.1 mrg 2397: {
1.98 chs 2398: UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
1.85 chs 2399:
1.10 mrg 2400: UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
1.139 enami 2401: map,size,offset,raddr);
1.85 chs 2402:
1.10 mrg 2403: size = round_page(size);
1.85 chs 2404:
1.10 mrg 2405: /*
2406: * reserve some virtual space.
2407: */
1.85 chs 2408:
1.204.2.5 yamt 2409: if (uvm_map(map, raddr, size, NULL, offset, align,
1.10 mrg 2410: UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1.204.2.1 yamt 2411: UVM_ADV_RANDOM, UVM_FLAG_NOMERGE|flags)) != 0) {
1.10 mrg 2412: UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
1.204.2.3 yamt 2413: return (false);
1.98 chs 2414: }
1.85 chs 2415:
1.10 mrg 2416: UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
1.204.2.3 yamt 2417: return (true);
1.1 mrg 2418: }
2419:
2420: /*
1.98 chs 2421: * uvm_map_replace: replace a reserved (blank) area of memory with
1.1 mrg 2422: * real mappings.
2423: *
1.98 chs 2424: * => caller must WRITE-LOCK the map
1.204.2.3 yamt 2425: * => we return true if replacement was a success
1.1 mrg 2426: * => we expect the newents chain to have nnewents entrys on it and
2427: * we expect newents->prev to point to the last entry on the list
2428: * => note newents is allowed to be NULL
2429: */
2430:
1.10 mrg 2431: int
1.138 enami 2432: uvm_map_replace(struct vm_map *map, vaddr_t start, vaddr_t end,
2433: struct vm_map_entry *newents, int nnewents)
1.10 mrg 2434: {
1.99 chs 2435: struct vm_map_entry *oldent, *last;
1.1 mrg 2436:
1.204.2.1 yamt 2437: uvm_map_check(map, "map_replace entry");
1.144 yamt 2438:
1.10 mrg 2439: /*
2440: * first find the blank map entry at the specified address
2441: */
1.85 chs 2442:
1.10 mrg 2443: if (!uvm_map_lookup_entry(map, start, &oldent)) {
1.204.2.3 yamt 2444: return (false);
1.10 mrg 2445: }
1.85 chs 2446:
1.10 mrg 2447: /*
2448: * check to make sure we have a proper blank entry
2449: */
1.1 mrg 2450:
1.204.2.1 yamt 2451: if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
2452: UVM_MAP_CLIP_END(map, oldent, end, NULL);
2453: }
1.98 chs 2454: if (oldent->start != start || oldent->end != end ||
1.10 mrg 2455: oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
1.204.2.3 yamt 2456: return (false);
1.10 mrg 2457: }
1.1 mrg 2458:
2459: #ifdef DIAGNOSTIC
1.99 chs 2460:
1.10 mrg 2461: /*
2462: * sanity check the newents chain
2463: */
1.99 chs 2464:
1.10 mrg 2465: {
1.99 chs 2466: struct vm_map_entry *tmpent = newents;
1.10 mrg 2467: int nent = 0;
1.24 eeh 2468: vaddr_t cur = start;
1.10 mrg 2469:
2470: while (tmpent) {
2471: nent++;
2472: if (tmpent->start < cur)
2473: panic("uvm_map_replace1");
2474: if (tmpent->start > tmpent->end || tmpent->end > end) {
2475: printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
2476: tmpent->start, tmpent->end, end);
2477: panic("uvm_map_replace2");
2478: }
2479: cur = tmpent->end;
2480: if (tmpent->next) {
2481: if (tmpent->next->prev != tmpent)
2482: panic("uvm_map_replace3");
2483: } else {
2484: if (newents->prev != tmpent)
2485: panic("uvm_map_replace4");
2486: }
2487: tmpent = tmpent->next;
2488: }
2489: if (nent != nnewents)
2490: panic("uvm_map_replace5");
2491: }
2492: #endif
2493:
2494: /*
2495: * map entry is a valid blank! replace it. (this does all the
2496: * work of map entry link/unlink...).
2497: */
2498:
2499: if (newents) {
1.99 chs 2500: last = newents->prev;
1.10 mrg 2501:
2502: /* critical: flush stale hints out of map */
1.82 thorpej 2503: SAVE_HINT(map, map->hint, newents);
1.10 mrg 2504: if (map->first_free == oldent)
2505: map->first_free = last;
2506:
2507: last->next = oldent->next;
2508: last->next->prev = last;
1.144 yamt 2509:
2510: /* Fix RB tree */
2511: uvm_rb_remove(map, oldent);
2512:
1.10 mrg 2513: newents->prev = oldent->prev;
2514: newents->prev->next = newents;
2515: map->nentries = map->nentries + (nnewents - 1);
2516:
1.144 yamt 2517: /* Fixup the RB tree */
2518: {
2519: int i;
2520: struct vm_map_entry *tmp;
2521:
2522: tmp = newents;
2523: for (i = 0; i < nnewents && tmp; i++) {
2524: uvm_rb_insert(map, tmp);
2525: tmp = tmp->next;
2526: }
2527: }
1.10 mrg 2528: } else {
2529: /* NULL list of new entries: just remove the old one */
1.204.2.1 yamt 2530: clear_hints(map, oldent);
1.10 mrg 2531: uvm_map_entry_unlink(map, oldent);
2532: }
2533:
1.204.2.1 yamt 2534: uvm_map_check(map, "map_replace leave");
1.10 mrg 2535:
2536: /*
1.204.2.1 yamt 2537: * now we can free the old blank entry and return.
1.10 mrg 2538: */
1.1 mrg 2539:
1.10 mrg 2540: uvm_mapent_free(oldent);
1.204.2.3 yamt 2541: return (true);
1.1 mrg 2542: }
2543:
2544: /*
2545: * uvm_map_extract: extract a mapping from a map and put it somewhere
2546: * (maybe removing the old mapping)
2547: *
2548: * => maps should be unlocked (we will write lock them)
2549: * => returns 0 on success, error code otherwise
2550: * => start must be page aligned
2551: * => len must be page sized
2552: * => flags:
2553: * UVM_EXTRACT_REMOVE: remove mappings from srcmap
2554: * UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
2555: * UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
2556: * UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
2557: * >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
2558: * >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
2559: * be used from within the kernel in a kernel level map <<<
2560: */
2561:
1.10 mrg 2562: int
1.138 enami 2563: uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
2564: struct vm_map *dstmap, vaddr_t *dstaddrp, int flags)
1.10 mrg 2565: {
1.163 mycroft 2566: vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge;
1.99 chs 2567: struct vm_map_entry *chain, *endchain, *entry, *orig_entry, *newentry,
2568: *deadentry, *oldentry;
1.24 eeh 2569: vsize_t elen;
1.10 mrg 2570: int nchain, error, copy_ok;
2571: UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
1.85 chs 2572:
1.10 mrg 2573: UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
2574: len,0);
2575: UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
2576:
1.204.2.1 yamt 2577: uvm_map_check(srcmap, "map_extract src enter");
2578: uvm_map_check(dstmap, "map_extract dst enter");
1.144 yamt 2579:
1.10 mrg 2580: /*
2581: * step 0: sanity check: start must be on a page boundary, length
2582: * must be page sized. can't ask for CONTIG/QREF if you asked for
2583: * REMOVE.
2584: */
2585:
1.85 chs 2586: KASSERT((start & PAGE_MASK) == 0 && (len & PAGE_MASK) == 0);
2587: KASSERT((flags & UVM_EXTRACT_REMOVE) == 0 ||
2588: (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF)) == 0);
1.10 mrg 2589:
2590: /*
2591: * step 1: reserve space in the target map for the extracted area
2592: */
2593:
1.204.2.1 yamt 2594: if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2595: dstaddr = vm_map_min(dstmap);
2596: if (!uvm_map_reserve(dstmap, len, start, 0, &dstaddr, 0))
2597: return (ENOMEM);
2598: *dstaddrp = dstaddr; /* pass address back to caller */
2599: UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0);
2600: } else {
2601: dstaddr = *dstaddrp;
2602: }
1.10 mrg 2603:
2604: /*
1.98 chs 2605: * step 2: setup for the extraction process loop by init'ing the
1.10 mrg 2606: * map entry chain, locking src map, and looking up the first useful
2607: * entry in the map.
2608: */
1.1 mrg 2609:
1.10 mrg 2610: end = start + len;
2611: newend = dstaddr + len;
2612: chain = endchain = NULL;
2613: nchain = 0;
2614: vm_map_lock(srcmap);
2615:
2616: if (uvm_map_lookup_entry(srcmap, start, &entry)) {
2617:
2618: /* "start" is within an entry */
2619: if (flags & UVM_EXTRACT_QREF) {
1.85 chs 2620:
1.10 mrg 2621: /*
2622: * for quick references we don't clip the entry, so
2623: * the entry may map space "before" the starting
2624: * virtual address... this is the "fudge" factor
2625: * (which can be non-zero only the first time
2626: * through the "while" loop in step 3).
2627: */
1.85 chs 2628:
1.10 mrg 2629: fudge = start - entry->start;
2630: } else {
1.85 chs 2631:
1.10 mrg 2632: /*
2633: * normal reference: we clip the map to fit (thus
2634: * fudge is zero)
2635: */
1.85 chs 2636:
1.174 yamt 2637: UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
1.82 thorpej 2638: SAVE_HINT(srcmap, srcmap->hint, entry->prev);
1.10 mrg 2639: fudge = 0;
2640: }
1.85 chs 2641: } else {
1.1 mrg 2642:
1.10 mrg 2643: /* "start" is not within an entry ... skip to next entry */
2644: if (flags & UVM_EXTRACT_CONTIG) {
2645: error = EINVAL;
2646: goto bad; /* definite hole here ... */
2647: }
1.1 mrg 2648:
1.10 mrg 2649: entry = entry->next;
2650: fudge = 0;
2651: }
1.85 chs 2652:
1.10 mrg 2653: /* save values from srcmap for step 6 */
2654: orig_entry = entry;
2655: orig_fudge = fudge;
1.1 mrg 2656:
1.10 mrg 2657: /*
2658: * step 3: now start looping through the map entries, extracting
2659: * as we go.
2660: */
1.1 mrg 2661:
1.10 mrg 2662: while (entry->start < end && entry != &srcmap->header) {
1.85 chs 2663:
1.10 mrg 2664: /* if we are not doing a quick reference, clip it */
2665: if ((flags & UVM_EXTRACT_QREF) == 0)
1.174 yamt 2666: UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
1.10 mrg 2667:
2668: /* clear needs_copy (allow chunking) */
2669: if (UVM_ET_ISNEEDSCOPY(entry)) {
1.204.2.1 yamt 2670: amap_copy(srcmap, entry,
2671: AMAP_COPY_NOWAIT|AMAP_COPY_NOMERGE, start, end);
1.10 mrg 2672: if (UVM_ET_ISNEEDSCOPY(entry)) { /* failed? */
2673: error = ENOMEM;
2674: goto bad;
2675: }
1.85 chs 2676:
1.10 mrg 2677: /* amap_copy could clip (during chunk)! update fudge */
2678: if (fudge) {
1.163 mycroft 2679: fudge = start - entry->start;
1.10 mrg 2680: orig_fudge = fudge;
2681: }
2682: }
1.1 mrg 2683:
1.10 mrg 2684: /* calculate the offset of this from "start" */
2685: oldoffset = (entry->start + fudge) - start;
1.1 mrg 2686:
1.10 mrg 2687: /* allocate a new map entry */
1.126 bouyer 2688: newentry = uvm_mapent_alloc(dstmap, 0);
1.10 mrg 2689: if (newentry == NULL) {
2690: error = ENOMEM;
2691: goto bad;
2692: }
2693:
2694: /* set up new map entry */
2695: newentry->next = NULL;
2696: newentry->prev = endchain;
2697: newentry->start = dstaddr + oldoffset;
2698: newentry->end =
2699: newentry->start + (entry->end - (entry->start + fudge));
1.37 chs 2700: if (newentry->end > newend || newentry->end < newentry->start)
1.10 mrg 2701: newentry->end = newend;
2702: newentry->object.uvm_obj = entry->object.uvm_obj;
2703: if (newentry->object.uvm_obj) {
2704: if (newentry->object.uvm_obj->pgops->pgo_reference)
2705: newentry->object.uvm_obj->pgops->
2706: pgo_reference(newentry->object.uvm_obj);
2707: newentry->offset = entry->offset + fudge;
2708: } else {
2709: newentry->offset = 0;
2710: }
2711: newentry->etype = entry->etype;
1.98 chs 2712: newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
2713: entry->max_protection : entry->protection;
1.10 mrg 2714: newentry->max_protection = entry->max_protection;
2715: newentry->inheritance = entry->inheritance;
2716: newentry->wired_count = 0;
2717: newentry->aref.ar_amap = entry->aref.ar_amap;
2718: if (newentry->aref.ar_amap) {
1.34 chuck 2719: newentry->aref.ar_pageoff =
2720: entry->aref.ar_pageoff + (fudge >> PAGE_SHIFT);
1.85 chs 2721: uvm_map_reference_amap(newentry, AMAP_SHARED |
1.10 mrg 2722: ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
2723: } else {
1.34 chuck 2724: newentry->aref.ar_pageoff = 0;
1.10 mrg 2725: }
2726: newentry->advice = entry->advice;
1.204.2.6 yamt 2727: if ((flags & UVM_EXTRACT_QREF) != 0) {
2728: newentry->flags |= UVM_MAP_NOMERGE;
2729: }
1.10 mrg 2730:
2731: /* now link it on the chain */
2732: nchain++;
2733: if (endchain == NULL) {
2734: chain = endchain = newentry;
2735: } else {
2736: endchain->next = newentry;
2737: endchain = newentry;
2738: }
2739:
2740: /* end of 'while' loop! */
1.98 chs 2741: if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
1.10 mrg 2742: (entry->next == &srcmap->header ||
2743: entry->next->start != entry->end)) {
2744: error = EINVAL;
2745: goto bad;
2746: }
2747: entry = entry->next;
2748: fudge = 0;
2749: }
2750:
2751: /*
2752: * step 4: close off chain (in format expected by uvm_map_replace)
2753: */
2754:
2755: if (chain)
2756: chain->prev = endchain;
2757:
2758: /*
2759: * step 5: attempt to lock the dest map so we can pmap_copy.
1.98 chs 2760: * note usage of copy_ok:
1.10 mrg 2761: * 1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
2762: * 0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
2763: */
1.85 chs 2764:
1.204.2.3 yamt 2765: if (srcmap == dstmap || vm_map_lock_try(dstmap) == true) {
1.10 mrg 2766: copy_ok = 1;
2767: if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2768: nchain)) {
2769: if (srcmap != dstmap)
2770: vm_map_unlock(dstmap);
2771: error = EIO;
2772: goto bad;
2773: }
2774: } else {
2775: copy_ok = 0;
2776: /* replace defered until step 7 */
2777: }
2778:
2779: /*
2780: * step 6: traverse the srcmap a second time to do the following:
2781: * - if we got a lock on the dstmap do pmap_copy
2782: * - if UVM_EXTRACT_REMOVE remove the entries
2783: * we make use of orig_entry and orig_fudge (saved in step 2)
2784: */
2785:
2786: if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
2787:
2788: /* purge possible stale hints from srcmap */
2789: if (flags & UVM_EXTRACT_REMOVE) {
1.82 thorpej 2790: SAVE_HINT(srcmap, srcmap->hint, orig_entry->prev);
1.204.2.1 yamt 2791: if (srcmap->first_free != &srcmap->header &&
2792: srcmap->first_free->start >= start)
1.10 mrg 2793: srcmap->first_free = orig_entry->prev;
2794: }
2795:
2796: entry = orig_entry;
2797: fudge = orig_fudge;
2798: deadentry = NULL; /* for UVM_EXTRACT_REMOVE */
2799:
2800: while (entry->start < end && entry != &srcmap->header) {
2801: if (copy_ok) {
1.74 thorpej 2802: oldoffset = (entry->start + fudge) - start;
1.90 chs 2803: elen = MIN(end, entry->end) -
1.74 thorpej 2804: (entry->start + fudge);
2805: pmap_copy(dstmap->pmap, srcmap->pmap,
2806: dstaddr + oldoffset, elen,
2807: entry->start + fudge);
1.10 mrg 2808: }
2809:
1.74 thorpej 2810: /* we advance "entry" in the following if statement */
1.10 mrg 2811: if (flags & UVM_EXTRACT_REMOVE) {
1.98 chs 2812: pmap_remove(srcmap->pmap, entry->start,
1.20 chuck 2813: entry->end);
1.139 enami 2814: oldentry = entry; /* save entry */
2815: entry = entry->next; /* advance */
1.20 chuck 2816: uvm_map_entry_unlink(srcmap, oldentry);
2817: /* add to dead list */
2818: oldentry->next = deadentry;
2819: deadentry = oldentry;
1.139 enami 2820: } else {
2821: entry = entry->next; /* advance */
1.10 mrg 2822: }
2823:
2824: /* end of 'while' loop */
2825: fudge = 0;
2826: }
1.105 chris 2827: pmap_update(srcmap->pmap);
1.10 mrg 2828:
2829: /*
2830: * unlock dstmap. we will dispose of deadentry in
2831: * step 7 if needed
2832: */
1.85 chs 2833:
1.10 mrg 2834: if (copy_ok && srcmap != dstmap)
2835: vm_map_unlock(dstmap);
2836:
1.99 chs 2837: } else {
2838: deadentry = NULL;
1.10 mrg 2839: }
2840:
2841: /*
2842: * step 7: we are done with the source map, unlock. if copy_ok
2843: * is 0 then we have not replaced the dummy mapping in dstmap yet
2844: * and we need to do so now.
2845: */
2846:
2847: vm_map_unlock(srcmap);
2848: if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
2849: uvm_unmap_detach(deadentry, 0); /* dispose of old entries */
2850:
2851: /* now do the replacement if we didn't do it in step 5 */
2852: if (copy_ok == 0) {
2853: vm_map_lock(dstmap);
2854: error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
2855: nchain);
2856: vm_map_unlock(dstmap);
2857:
1.204.2.3 yamt 2858: if (error == false) {
1.10 mrg 2859: error = EIO;
2860: goto bad2;
2861: }
2862: }
1.144 yamt 2863:
1.204.2.1 yamt 2864: uvm_map_check(srcmap, "map_extract src leave");
2865: uvm_map_check(dstmap, "map_extract dst leave");
1.144 yamt 2866:
1.139 enami 2867: return (0);
1.10 mrg 2868:
2869: /*
2870: * bad: failure recovery
2871: */
2872: bad:
2873: vm_map_unlock(srcmap);
2874: bad2: /* src already unlocked */
2875: if (chain)
2876: uvm_unmap_detach(chain,
2877: (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
1.144 yamt 2878:
1.204.2.1 yamt 2879: uvm_map_check(srcmap, "map_extract src err leave");
2880: uvm_map_check(dstmap, "map_extract dst err leave");
1.144 yamt 2881:
1.204.2.1 yamt 2882: if ((flags & UVM_EXTRACT_RESERVED) == 0) {
2883: uvm_unmap(dstmap, dstaddr, dstaddr+len); /* ??? */
2884: }
1.139 enami 2885: return (error);
1.10 mrg 2886: }
2887:
2888: /* end of extraction functions */
1.1 mrg 2889:
2890: /*
2891: * uvm_map_submap: punch down part of a map into a submap
2892: *
2893: * => only the kernel_map is allowed to be submapped
2894: * => the purpose of submapping is to break up the locking granularity
2895: * of a larger map
2896: * => the range specified must have been mapped previously with a uvm_map()
2897: * call [with uobj==NULL] to create a blank map entry in the main map.
2898: * [And it had better still be blank!]
2899: * => maps which contain submaps should never be copied or forked.
1.98 chs 2900: * => to remove a submap, use uvm_unmap() on the main map
1.1 mrg 2901: * and then uvm_map_deallocate() the submap.
2902: * => main map must be unlocked.
2903: * => submap must have been init'd and have a zero reference count.
2904: * [need not be locked as we don't actually reference it]
2905: */
1.85 chs 2906:
1.10 mrg 2907: int
1.138 enami 2908: uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
2909: struct vm_map *submap)
1.10 mrg 2910: {
1.99 chs 2911: struct vm_map_entry *entry;
1.174 yamt 2912: struct uvm_mapent_reservation umr;
1.94 chs 2913: int error;
1.1 mrg 2914:
1.174 yamt 2915: uvm_mapent_reserve(map, &umr, 2, 0);
2916:
1.10 mrg 2917: vm_map_lock(map);
1.85 chs 2918: VM_MAP_RANGE_CHECK(map, start, end);
1.1 mrg 2919:
1.10 mrg 2920: if (uvm_map_lookup_entry(map, start, &entry)) {
1.174 yamt 2921: UVM_MAP_CLIP_START(map, entry, start, &umr);
2922: UVM_MAP_CLIP_END(map, entry, end, &umr); /* to be safe */
1.94 chs 2923: } else {
1.10 mrg 2924: entry = NULL;
2925: }
1.1 mrg 2926:
1.98 chs 2927: if (entry != NULL &&
1.10 mrg 2928: entry->start == start && entry->end == end &&
2929: entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
2930: !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
1.29 chuck 2931: entry->etype |= UVM_ET_SUBMAP;
1.10 mrg 2932: entry->object.sub_map = submap;
2933: entry->offset = 0;
2934: uvm_map_reference(submap);
1.94 chs 2935: error = 0;
1.10 mrg 2936: } else {
1.94 chs 2937: error = EINVAL;
1.10 mrg 2938: }
2939: vm_map_unlock(map);
1.174 yamt 2940:
2941: uvm_mapent_unreserve(map, &umr);
2942:
1.94 chs 2943: return error;
1.1 mrg 2944: }
2945:
1.175 yamt 2946: /*
2947: * uvm_map_setup_kernel: init in-kernel map
2948: *
2949: * => map must not be in service yet.
2950: */
2951:
2952: void
2953: uvm_map_setup_kernel(struct vm_map_kernel *map,
1.199 christos 2954: vaddr_t vmin, vaddr_t vmax, int flags)
1.175 yamt 2955: {
2956:
1.199 christos 2957: uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
1.204.2.4 yamt 2958: callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
1.175 yamt 2959: LIST_INIT(&map->vmk_kentry_free);
2960: map->vmk_merged_entries = NULL;
2961: }
2962:
1.1 mrg 2963:
2964: /*
2965: * uvm_map_protect: change map protection
2966: *
2967: * => set_max means set max_protection.
2968: * => map must be unlocked.
2969: */
2970:
1.139 enami 2971: #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
1.36 mycroft 2972: ~VM_PROT_WRITE : VM_PROT_ALL)
1.1 mrg 2973:
1.10 mrg 2974: int
1.138 enami 2975: uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
1.204.2.3 yamt 2976: vm_prot_t new_prot, bool set_max)
1.10 mrg 2977: {
1.99 chs 2978: struct vm_map_entry *current, *entry;
1.94 chs 2979: int error = 0;
1.10 mrg 2980: UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
2981: UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
1.85 chs 2982: map, start, end, new_prot);
2983:
1.10 mrg 2984: vm_map_lock(map);
2985: VM_MAP_RANGE_CHECK(map, start, end);
2986: if (uvm_map_lookup_entry(map, start, &entry)) {
1.174 yamt 2987: UVM_MAP_CLIP_START(map, entry, start, NULL);
1.10 mrg 2988: } else {
2989: entry = entry->next;
2990: }
2991:
1.1 mrg 2992: /*
1.10 mrg 2993: * make a first pass to check for protection violations.
1.1 mrg 2994: */
2995:
1.10 mrg 2996: current = entry;
2997: while ((current != &map->header) && (current->start < end)) {
1.65 thorpej 2998: if (UVM_ET_ISSUBMAP(current)) {
1.94 chs 2999: error = EINVAL;
1.65 thorpej 3000: goto out;
3001: }
1.10 mrg 3002: if ((new_prot & current->max_protection) != new_prot) {
1.94 chs 3003: error = EACCES;
1.65 thorpej 3004: goto out;
1.112 thorpej 3005: }
3006: /*
3007: * Don't allow VM_PROT_EXECUTE to be set on entries that
3008: * point to vnodes that are associated with a NOEXEC file
3009: * system.
3010: */
3011: if (UVM_ET_ISOBJ(current) &&
3012: UVM_OBJ_IS_VNODE(current->object.uvm_obj)) {
3013: struct vnode *vp =
3014: (struct vnode *) current->object.uvm_obj;
3015:
3016: if ((new_prot & VM_PROT_EXECUTE) != 0 &&
3017: (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
3018: error = EACCES;
3019: goto out;
3020: }
1.10 mrg 3021: }
1.204.2.1 yamt 3022:
1.65 thorpej 3023: current = current->next;
1.10 mrg 3024: }
3025:
3026: /* go back and fix up protections (no need to clip this time). */
3027:
3028: current = entry;
3029: while ((current != &map->header) && (current->start < end)) {
3030: vm_prot_t old_prot;
1.85 chs 3031:
1.174 yamt 3032: UVM_MAP_CLIP_END(map, current, end, NULL);
1.10 mrg 3033: old_prot = current->protection;
3034: if (set_max)
3035: current->protection =
3036: (current->max_protection = new_prot) & old_prot;
3037: else
3038: current->protection = new_prot;
3039:
3040: /*
1.98 chs 3041: * update physical map if necessary. worry about copy-on-write
1.10 mrg 3042: * here -- CHECK THIS XXX
3043: */
3044:
3045: if (current->protection != old_prot) {
1.29 chuck 3046: /* update pmap! */
3047: pmap_protect(map->pmap, current->start, current->end,
3048: current->protection & MASK(entry));
1.109 thorpej 3049:
3050: /*
3051: * If this entry points at a vnode, and the
3052: * protection includes VM_PROT_EXECUTE, mark
1.111 thorpej 3053: * the vnode as VEXECMAP.
1.109 thorpej 3054: */
3055: if (UVM_ET_ISOBJ(current)) {
3056: struct uvm_object *uobj =
3057: current->object.uvm_obj;
3058:
3059: if (UVM_OBJ_IS_VNODE(uobj) &&
1.204.2.5 yamt 3060: (current->protection & VM_PROT_EXECUTE)) {
1.204.2.7 yamt 3061: mutex_enter(&uobj->vmobjlock);
1.110 thorpej 3062: vn_markexec((struct vnode *) uobj);
1.204.2.7 yamt 3063: mutex_exit(&uobj->vmobjlock);
1.204.2.5 yamt 3064: }
1.109 thorpej 3065: }
1.65 thorpej 3066: }
1.10 mrg 3067:
1.65 thorpej 3068: /*
3069: * If the map is configured to lock any future mappings,
3070: * wire this entry now if the old protection was VM_PROT_NONE
3071: * and the new protection is not VM_PROT_NONE.
3072: */
3073:
3074: if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
3075: VM_MAPENT_ISWIRED(entry) == 0 &&
3076: old_prot == VM_PROT_NONE &&
3077: new_prot != VM_PROT_NONE) {
3078: if (uvm_map_pageable(map, entry->start,
1.204.2.3 yamt 3079: entry->end, false,
1.94 chs 3080: UVM_LK_ENTER|UVM_LK_EXIT) != 0) {
1.99 chs 3081:
1.65 thorpej 3082: /*
3083: * If locking the entry fails, remember the
3084: * error if it's the first one. Note we
3085: * still continue setting the protection in
1.94 chs 3086: * the map, but will return the error
3087: * condition regardless.
1.65 thorpej 3088: *
3089: * XXX Ignore what the actual error is,
3090: * XXX just call it a resource shortage
3091: * XXX so that it doesn't get confused
3092: * XXX what uvm_map_protect() itself would
3093: * XXX normally return.
3094: */
1.99 chs 3095:
1.94 chs 3096: error = ENOMEM;
1.65 thorpej 3097: }
1.10 mrg 3098: }
3099: current = current->next;
3100: }
1.105 chris 3101: pmap_update(map->pmap);
1.85 chs 3102:
1.65 thorpej 3103: out:
1.10 mrg 3104: vm_map_unlock(map);
1.174 yamt 3105:
1.94 chs 3106: UVMHIST_LOG(maphist, "<- done, error=%d",error,0,0,0);
3107: return error;
1.1 mrg 3108: }
3109:
3110: #undef MASK
3111:
1.98 chs 3112: /*
1.1 mrg 3113: * uvm_map_inherit: set inheritance code for range of addrs in map.
3114: *
3115: * => map must be unlocked
3116: * => note that the inherit code is used during a "fork". see fork
3117: * code for details.
3118: */
3119:
1.10 mrg 3120: int
1.138 enami 3121: uvm_map_inherit(struct vm_map *map, vaddr_t start, vaddr_t end,
3122: vm_inherit_t new_inheritance)
1.10 mrg 3123: {
1.99 chs 3124: struct vm_map_entry *entry, *temp_entry;
1.10 mrg 3125: UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
3126: UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
3127: map, start, end, new_inheritance);
3128:
3129: switch (new_inheritance) {
1.80 wiz 3130: case MAP_INHERIT_NONE:
3131: case MAP_INHERIT_COPY:
3132: case MAP_INHERIT_SHARE:
1.10 mrg 3133: break;
3134: default:
3135: UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
1.94 chs 3136: return EINVAL;
1.10 mrg 3137: }
1.1 mrg 3138:
1.10 mrg 3139: vm_map_lock(map);
3140: VM_MAP_RANGE_CHECK(map, start, end);
3141: if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3142: entry = temp_entry;
1.174 yamt 3143: UVM_MAP_CLIP_START(map, entry, start, NULL);
1.10 mrg 3144: } else {
3145: entry = temp_entry->next;
3146: }
3147: while ((entry != &map->header) && (entry->start < end)) {
1.174 yamt 3148: UVM_MAP_CLIP_END(map, entry, end, NULL);
1.10 mrg 3149: entry->inheritance = new_inheritance;
3150: entry = entry->next;
3151: }
3152: vm_map_unlock(map);
3153: UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
1.94 chs 3154: return 0;
1.41 mrg 3155: }
3156:
1.98 chs 3157: /*
1.41 mrg 3158: * uvm_map_advice: set advice code for range of addrs in map.
3159: *
3160: * => map must be unlocked
3161: */
3162:
3163: int
1.138 enami 3164: uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
1.41 mrg 3165: {
1.99 chs 3166: struct vm_map_entry *entry, *temp_entry;
1.41 mrg 3167: UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
3168: UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)",
3169: map, start, end, new_advice);
3170:
3171: vm_map_lock(map);
3172: VM_MAP_RANGE_CHECK(map, start, end);
3173: if (uvm_map_lookup_entry(map, start, &temp_entry)) {
3174: entry = temp_entry;
1.174 yamt 3175: UVM_MAP_CLIP_START(map, entry, start, NULL);
1.41 mrg 3176: } else {
3177: entry = temp_entry->next;
3178: }
1.61 thorpej 3179:
3180: /*
3181: * XXXJRT: disallow holes?
3182: */
3183:
1.41 mrg 3184: while ((entry != &map->header) && (entry->start < end)) {
1.174 yamt 3185: UVM_MAP_CLIP_END(map, entry, end, NULL);
1.41 mrg 3186:
3187: switch (new_advice) {
3188: case MADV_NORMAL:
3189: case MADV_RANDOM:
3190: case MADV_SEQUENTIAL:
3191: /* nothing special here */
3192: break;
3193:
3194: default:
1.50 mrg 3195: vm_map_unlock(map);
1.41 mrg 3196: UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
1.94 chs 3197: return EINVAL;
1.41 mrg 3198: }
3199: entry->advice = new_advice;
3200: entry = entry->next;
3201: }
3202:
3203: vm_map_unlock(map);
3204: UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
1.94 chs 3205: return 0;
1.1 mrg 3206: }
3207:
3208: /*
3209: * uvm_map_pageable: sets the pageability of a range in a map.
3210: *
1.56 thorpej 3211: * => wires map entries. should not be used for transient page locking.
3212: * for that, use uvm_fault_wire()/uvm_fault_unwire() (see uvm_vslock()).
1.204.2.1 yamt 3213: * => regions specified as not pageable require lock-down (wired) memory
1.1 mrg 3214: * and page tables.
1.59 thorpej 3215: * => map must never be read-locked
1.204.2.3 yamt 3216: * => if islocked is true, map is already write-locked
1.59 thorpej 3217: * => we always unlock the map, since we must downgrade to a read-lock
3218: * to call uvm_fault_wire()
1.1 mrg 3219: * => XXXCDC: check this and try and clean it up.
3220: */
3221:
1.19 kleink 3222: int
1.138 enami 3223: uvm_map_pageable(struct vm_map *map, vaddr_t start, vaddr_t end,
1.204.2.3 yamt 3224: bool new_pageable, int lockflags)
1.1 mrg 3225: {
1.99 chs 3226: struct vm_map_entry *entry, *start_entry, *failed_entry;
1.10 mrg 3227: int rv;
1.60 thorpej 3228: #ifdef DIAGNOSTIC
3229: u_int timestamp_save;
3230: #endif
1.10 mrg 3231: UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
3232: UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
1.85 chs 3233: map, start, end, new_pageable);
3234: KASSERT(map->flags & VM_MAP_PAGEABLE);
1.45 thorpej 3235:
1.64 thorpej 3236: if ((lockflags & UVM_LK_ENTER) == 0)
1.59 thorpej 3237: vm_map_lock(map);
1.10 mrg 3238: VM_MAP_RANGE_CHECK(map, start, end);
3239:
1.98 chs 3240: /*
1.10 mrg 3241: * only one pageability change may take place at one time, since
3242: * uvm_fault_wire assumes it will be called only once for each
3243: * wiring/unwiring. therefore, we have to make sure we're actually
3244: * changing the pageability for the entire region. we do so before
1.98 chs 3245: * making any changes.
1.10 mrg 3246: */
3247:
1.204.2.3 yamt 3248: if (uvm_map_lookup_entry(map, start, &start_entry) == false) {
1.64 thorpej 3249: if ((lockflags & UVM_LK_EXIT) == 0)
3250: vm_map_unlock(map);
1.85 chs 3251:
1.94 chs 3252: UVMHIST_LOG(maphist,"<- done (fault)",0,0,0,0);
3253: return EFAULT;
1.10 mrg 3254: }
3255: entry = start_entry;
3256:
1.98 chs 3257: /*
1.100 wiz 3258: * handle wiring and unwiring separately.
1.10 mrg 3259: */
1.1 mrg 3260:
1.56 thorpej 3261: if (new_pageable) { /* unwire */
1.174 yamt 3262: UVM_MAP_CLIP_START(map, entry, start, NULL);
1.85 chs 3263:
1.10 mrg 3264: /*
3265: * unwiring. first ensure that the range to be unwired is
1.98 chs 3266: * really wired down and that there are no holes.
1.10 mrg 3267: */
1.85 chs 3268:
1.10 mrg 3269: while ((entry != &map->header) && (entry->start < end)) {
3270: if (entry->wired_count == 0 ||
3271: (entry->end < end &&
1.55 thorpej 3272: (entry->next == &map->header ||
3273: entry->next->start > entry->end))) {
1.64 thorpej 3274: if ((lockflags & UVM_LK_EXIT) == 0)
3275: vm_map_unlock(map);
1.94 chs 3276: UVMHIST_LOG(maphist, "<- done (INVAL)",0,0,0,0);
3277: return EINVAL;
1.10 mrg 3278: }
3279: entry = entry->next;
3280: }
3281:
1.98 chs 3282: /*
1.56 thorpej 3283: * POSIX 1003.1b - a single munlock call unlocks a region,
3284: * regardless of the number of mlock calls made on that
3285: * region.
1.10 mrg 3286: */
1.85 chs 3287:
1.10 mrg 3288: entry = start_entry;
3289: while ((entry != &map->header) && (entry->start < end)) {
1.174 yamt 3290: UVM_MAP_CLIP_END(map, entry, end, NULL);
1.56 thorpej 3291: if (VM_MAPENT_ISWIRED(entry))
1.10 mrg 3292: uvm_map_entry_unwire(map, entry);
3293: entry = entry->next;
3294: }
1.64 thorpej 3295: if ((lockflags & UVM_LK_EXIT) == 0)
3296: vm_map_unlock(map);
1.10 mrg 3297: UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
1.94 chs 3298: return 0;
1.10 mrg 3299: }
3300:
3301: /*
3302: * wire case: in two passes [XXXCDC: ugly block of code here]
3303: *
3304: * 1: holding the write lock, we create any anonymous maps that need
3305: * to be created. then we clip each map entry to the region to
1.98 chs 3306: * be wired and increment its wiring count.
1.10 mrg 3307: *
3308: * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
1.56 thorpej 3309: * in the pages for any newly wired area (wired_count == 1).
1.10 mrg 3310: *
3311: * downgrading to a read lock for uvm_fault_wire avoids a possible
3312: * deadlock with another thread that may have faulted on one of
3313: * the pages to be wired (it would mark the page busy, blocking
3314: * us, then in turn block on the map lock that we hold). because
3315: * of problems in the recursive lock package, we cannot upgrade
3316: * to a write lock in vm_map_lookup. thus, any actions that
3317: * require the write lock must be done beforehand. because we
3318: * keep the read lock on the map, the copy-on-write status of the
3319: * entries we modify here cannot change.
3320: */
3321:
3322: while ((entry != &map->header) && (entry->start < end)) {
1.55 thorpej 3323: if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
1.85 chs 3324:
3325: /*
1.10 mrg 3326: * perform actions of vm_map_lookup that need the
3327: * write lock on the map: create an anonymous map
3328: * for a copy-on-write region, or an anonymous map
1.29 chuck 3329: * for a zero-fill region. (XXXCDC: submap case
3330: * ok?)
1.10 mrg 3331: */
1.85 chs 3332:
1.29 chuck 3333: if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
1.98 chs 3334: if (UVM_ET_ISNEEDSCOPY(entry) &&
1.117 chs 3335: ((entry->max_protection & VM_PROT_WRITE) ||
1.54 thorpej 3336: (entry->object.uvm_obj == NULL))) {
1.204.2.1 yamt 3337: amap_copy(map, entry, 0, start, end);
1.10 mrg 3338: /* XXXCDC: wait OK? */
3339: }
3340: }
1.55 thorpej 3341: }
1.174 yamt 3342: UVM_MAP_CLIP_START(map, entry, start, NULL);
3343: UVM_MAP_CLIP_END(map, entry, end, NULL);
1.10 mrg 3344: entry->wired_count++;
3345:
3346: /*
1.98 chs 3347: * Check for holes
1.10 mrg 3348: */
1.85 chs 3349:
1.54 thorpej 3350: if (entry->protection == VM_PROT_NONE ||
3351: (entry->end < end &&
3352: (entry->next == &map->header ||
3353: entry->next->start > entry->end))) {
1.85 chs 3354:
1.10 mrg 3355: /*
3356: * found one. amap creation actions do not need to
1.98 chs 3357: * be undone, but the wired counts need to be restored.
1.10 mrg 3358: */
1.85 chs 3359:
1.10 mrg 3360: while (entry != &map->header && entry->end > start) {
3361: entry->wired_count--;
3362: entry = entry->prev;
3363: }
1.64 thorpej 3364: if ((lockflags & UVM_LK_EXIT) == 0)
3365: vm_map_unlock(map);
1.10 mrg 3366: UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
1.94 chs 3367: return EINVAL;
1.10 mrg 3368: }
3369: entry = entry->next;
3370: }
3371:
3372: /*
3373: * Pass 2.
3374: */
1.51 thorpej 3375:
1.60 thorpej 3376: #ifdef DIAGNOSTIC
3377: timestamp_save = map->timestamp;
3378: #endif
3379: vm_map_busy(map);
1.204.2.7 yamt 3380: vm_map_unlock(map);
1.10 mrg 3381:
3382: rv = 0;
3383: entry = start_entry;
3384: while (entry != &map->header && entry->start < end) {
1.51 thorpej 3385: if (entry->wired_count == 1) {
1.44 thorpej 3386: rv = uvm_fault_wire(map, entry->start, entry->end,
1.204.2.1 yamt 3387: entry->max_protection, 1);
1.10 mrg 3388: if (rv) {
1.94 chs 3389:
1.51 thorpej 3390: /*
3391: * wiring failed. break out of the loop.
3392: * we'll clean up the map below, once we
3393: * have a write lock again.
3394: */
1.94 chs 3395:
1.51 thorpej 3396: break;
1.10 mrg 3397: }
3398: }
3399: entry = entry->next;
3400: }
3401:
1.139 enami 3402: if (rv) { /* failed? */
1.85 chs 3403:
1.52 thorpej 3404: /*
3405: * Get back to an exclusive (write) lock.
3406: */
1.85 chs 3407:
1.204.2.7 yamt 3408: vm_map_lock(map);
1.60 thorpej 3409: vm_map_unbusy(map);
3410:
3411: #ifdef DIAGNOSTIC
1.204.2.9! yamt 3412: if (timestamp_save + 1 != map->timestamp)
1.60 thorpej 3413: panic("uvm_map_pageable: stale map");
3414: #endif
1.10 mrg 3415:
1.51 thorpej 3416: /*
3417: * first drop the wiring count on all the entries
3418: * which haven't actually been wired yet.
3419: */
1.85 chs 3420:
1.54 thorpej 3421: failed_entry = entry;
3422: while (entry != &map->header && entry->start < end) {
1.51 thorpej 3423: entry->wired_count--;
1.54 thorpej 3424: entry = entry->next;
3425: }
1.51 thorpej 3426:
3427: /*
1.54 thorpej 3428: * now, unwire all the entries that were successfully
3429: * wired above.
1.51 thorpej 3430: */
1.85 chs 3431:
1.54 thorpej 3432: entry = start_entry;
3433: while (entry != failed_entry) {
3434: entry->wired_count--;
1.55 thorpej 3435: if (VM_MAPENT_ISWIRED(entry) == 0)
1.54 thorpej 3436: uvm_map_entry_unwire(map, entry);
3437: entry = entry->next;
3438: }
1.64 thorpej 3439: if ((lockflags & UVM_LK_EXIT) == 0)
3440: vm_map_unlock(map);
1.10 mrg 3441: UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
1.139 enami 3442: return (rv);
1.10 mrg 3443: }
1.51 thorpej 3444:
1.64 thorpej 3445: if ((lockflags & UVM_LK_EXIT) == 0) {
3446: vm_map_unbusy(map);
3447: } else {
1.85 chs 3448:
1.64 thorpej 3449: /*
3450: * Get back to an exclusive (write) lock.
3451: */
1.85 chs 3452:
1.204.2.7 yamt 3453: vm_map_lock(map);
1.64 thorpej 3454: vm_map_unbusy(map);
3455: }
3456:
1.10 mrg 3457: UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
1.94 chs 3458: return 0;
1.1 mrg 3459: }
3460:
3461: /*
1.54 thorpej 3462: * uvm_map_pageable_all: special case of uvm_map_pageable - affects
3463: * all mapped regions.
3464: *
3465: * => map must not be locked.
3466: * => if no flags are specified, all regions are unwired.
3467: * => XXXJRT: has some of the same problems as uvm_map_pageable() above.
3468: */
3469:
3470: int
1.138 enami 3471: uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
1.54 thorpej 3472: {
1.99 chs 3473: struct vm_map_entry *entry, *failed_entry;
1.54 thorpej 3474: vsize_t size;
3475: int rv;
1.60 thorpej 3476: #ifdef DIAGNOSTIC
3477: u_int timestamp_save;
3478: #endif
1.54 thorpej 3479: UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
3480: UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0);
3481:
1.85 chs 3482: KASSERT(map->flags & VM_MAP_PAGEABLE);
1.54 thorpej 3483:
3484: vm_map_lock(map);
3485:
3486: /*
3487: * handle wiring and unwiring separately.
3488: */
3489:
3490: if (flags == 0) { /* unwire */
1.99 chs 3491:
1.54 thorpej 3492: /*
1.56 thorpej 3493: * POSIX 1003.1b -- munlockall unlocks all regions,
3494: * regardless of how many times mlockall has been called.
1.54 thorpej 3495: */
1.99 chs 3496:
1.54 thorpej 3497: for (entry = map->header.next; entry != &map->header;
3498: entry = entry->next) {
1.56 thorpej 3499: if (VM_MAPENT_ISWIRED(entry))
3500: uvm_map_entry_unwire(map, entry);
1.54 thorpej 3501: }
1.204.2.4 yamt 3502: map->flags &= ~VM_MAP_WIREFUTURE;
1.54 thorpej 3503: vm_map_unlock(map);
3504: UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
1.94 chs 3505: return 0;
1.54 thorpej 3506: }
3507:
3508: if (flags & MCL_FUTURE) {
1.99 chs 3509:
1.54 thorpej 3510: /*
3511: * must wire all future mappings; remember this.
3512: */
1.99 chs 3513:
1.204.2.4 yamt 3514: map->flags |= VM_MAP_WIREFUTURE;
1.54 thorpej 3515: }
3516:
3517: if ((flags & MCL_CURRENT) == 0) {
1.99 chs 3518:
1.54 thorpej 3519: /*
3520: * no more work to do!
3521: */
1.99 chs 3522:
1.54 thorpej 3523: UVMHIST_LOG(maphist,"<- done (OK no wire)",0,0,0,0);
3524: vm_map_unlock(map);
1.94 chs 3525: return 0;
1.54 thorpej 3526: }
3527:
3528: /*
3529: * wire case: in three passes [XXXCDC: ugly block of code here]
3530: *
3531: * 1: holding the write lock, count all pages mapped by non-wired
3532: * entries. if this would cause us to go over our limit, we fail.
3533: *
3534: * 2: still holding the write lock, we create any anonymous maps that
3535: * need to be created. then we increment its wiring count.
3536: *
3537: * 3: we downgrade to a read lock, and call uvm_fault_wire to fault
1.56 thorpej 3538: * in the pages for any newly wired area (wired_count == 1).
1.54 thorpej 3539: *
3540: * downgrading to a read lock for uvm_fault_wire avoids a possible
3541: * deadlock with another thread that may have faulted on one of
3542: * the pages to be wired (it would mark the page busy, blocking
3543: * us, then in turn block on the map lock that we hold). because
3544: * of problems in the recursive lock package, we cannot upgrade
3545: * to a write lock in vm_map_lookup. thus, any actions that
3546: * require the write lock must be done beforehand. because we
3547: * keep the read lock on the map, the copy-on-write status of the
3548: * entries we modify here cannot change.
3549: */
3550:
3551: for (size = 0, entry = map->header.next; entry != &map->header;
3552: entry = entry->next) {
3553: if (entry->protection != VM_PROT_NONE &&
1.55 thorpej 3554: VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
1.54 thorpej 3555: size += entry->end - entry->start;
3556: }
3557: }
3558:
3559: if (atop(size) + uvmexp.wired > uvmexp.wiredmax) {
3560: vm_map_unlock(map);
1.94 chs 3561: return ENOMEM;
1.54 thorpej 3562: }
3563:
3564: if (limit != 0 &&
3565: (size + ptoa(pmap_wired_count(vm_map_pmap(map))) > limit)) {
3566: vm_map_unlock(map);
1.94 chs 3567: return ENOMEM;
1.54 thorpej 3568: }
3569:
3570: /*
3571: * Pass 2.
3572: */
3573:
3574: for (entry = map->header.next; entry != &map->header;
3575: entry = entry->next) {
3576: if (entry->protection == VM_PROT_NONE)
3577: continue;
1.55 thorpej 3578: if (VM_MAPENT_ISWIRED(entry) == 0) { /* not already wired? */
1.99 chs 3579:
1.54 thorpej 3580: /*
3581: * perform actions of vm_map_lookup that need the
3582: * write lock on the map: create an anonymous map
3583: * for a copy-on-write region, or an anonymous map
3584: * for a zero-fill region. (XXXCDC: submap case
3585: * ok?)
3586: */
1.99 chs 3587:
1.54 thorpej 3588: if (!UVM_ET_ISSUBMAP(entry)) { /* not submap */
1.98 chs 3589: if (UVM_ET_ISNEEDSCOPY(entry) &&
1.117 chs 3590: ((entry->max_protection & VM_PROT_WRITE) ||
1.54 thorpej 3591: (entry->object.uvm_obj == NULL))) {
1.204.2.1 yamt 3592: amap_copy(map, entry, 0, entry->start,
3593: entry->end);
1.54 thorpej 3594: /* XXXCDC: wait OK? */
3595: }
3596: }
1.55 thorpej 3597: }
1.54 thorpej 3598: entry->wired_count++;
3599: }
3600:
3601: /*
3602: * Pass 3.
3603: */
3604:
1.60 thorpej 3605: #ifdef DIAGNOSTIC
3606: timestamp_save = map->timestamp;
3607: #endif
3608: vm_map_busy(map);
1.204.2.7 yamt 3609: vm_map_unlock(map);
1.54 thorpej 3610:
1.94 chs 3611: rv = 0;
1.54 thorpej 3612: for (entry = map->header.next; entry != &map->header;
3613: entry = entry->next) {
3614: if (entry->wired_count == 1) {
3615: rv = uvm_fault_wire(map, entry->start, entry->end,
1.204.2.1 yamt 3616: entry->max_protection, 1);
1.54 thorpej 3617: if (rv) {
1.99 chs 3618:
1.54 thorpej 3619: /*
3620: * wiring failed. break out of the loop.
3621: * we'll clean up the map below, once we
3622: * have a write lock again.
3623: */
1.99 chs 3624:
1.54 thorpej 3625: break;
3626: }
3627: }
3628: }
3629:
1.99 chs 3630: if (rv) {
3631:
1.54 thorpej 3632: /*
3633: * Get back an exclusive (write) lock.
3634: */
1.99 chs 3635:
1.204.2.7 yamt 3636: vm_map_lock(map);
1.60 thorpej 3637: vm_map_unbusy(map);
3638:
3639: #ifdef DIAGNOSTIC
1.204.2.9! yamt 3640: if (timestamp_save + 1 != map->timestamp)
1.60 thorpej 3641: panic("uvm_map_pageable_all: stale map");
3642: #endif
1.54 thorpej 3643:
3644: /*
3645: * first drop the wiring count on all the entries
3646: * which haven't actually been wired yet.
1.67 thorpej 3647: *
3648: * Skip VM_PROT_NONE entries like we did above.
1.54 thorpej 3649: */
1.99 chs 3650:
1.54 thorpej 3651: failed_entry = entry;
3652: for (/* nothing */; entry != &map->header;
1.67 thorpej 3653: entry = entry->next) {
3654: if (entry->protection == VM_PROT_NONE)
3655: continue;
1.54 thorpej 3656: entry->wired_count--;
1.67 thorpej 3657: }
1.54 thorpej 3658:
3659: /*
3660: * now, unwire all the entries that were successfully
3661: * wired above.
1.67 thorpej 3662: *
3663: * Skip VM_PROT_NONE entries like we did above.
1.54 thorpej 3664: */
1.99 chs 3665:
1.54 thorpej 3666: for (entry = map->header.next; entry != failed_entry;
3667: entry = entry->next) {
1.67 thorpej 3668: if (entry->protection == VM_PROT_NONE)
3669: continue;
1.54 thorpej 3670: entry->wired_count--;
1.67 thorpej 3671: if (VM_MAPENT_ISWIRED(entry))
1.54 thorpej 3672: uvm_map_entry_unwire(map, entry);
3673: }
3674: vm_map_unlock(map);
3675: UVMHIST_LOG(maphist,"<- done (RV=%d)", rv,0,0,0);
3676: return (rv);
3677: }
3678:
1.60 thorpej 3679: vm_map_unbusy(map);
1.54 thorpej 3680:
3681: UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
1.94 chs 3682: return 0;
1.54 thorpej 3683: }
3684:
3685: /*
1.61 thorpej 3686: * uvm_map_clean: clean out a map range
1.1 mrg 3687: *
3688: * => valid flags:
1.61 thorpej 3689: * if (flags & PGO_CLEANIT): dirty pages are cleaned first
1.1 mrg 3690: * if (flags & PGO_SYNCIO): dirty pages are written synchronously
3691: * if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
3692: * if (flags & PGO_FREE): any cached pages are freed after clean
3693: * => returns an error if any part of the specified range isn't mapped
1.98 chs 3694: * => never a need to flush amap layer since the anonymous memory has
1.61 thorpej 3695: * no permanent home, but may deactivate pages there
3696: * => called from sys_msync() and sys_madvise()
1.1 mrg 3697: * => caller must not write-lock map (read OK).
3698: * => we may sleep while cleaning if SYNCIO [with map read-locked]
3699: */
3700:
1.10 mrg 3701: int
1.138 enami 3702: uvm_map_clean(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
1.10 mrg 3703: {
1.99 chs 3704: struct vm_map_entry *current, *entry;
1.61 thorpej 3705: struct uvm_object *uobj;
3706: struct vm_amap *amap;
3707: struct vm_anon *anon;
3708: struct vm_page *pg;
3709: vaddr_t offset;
1.24 eeh 3710: vsize_t size;
1.188 dbj 3711: voff_t uoff;
1.106 chs 3712: int error, refs;
1.10 mrg 3713: UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
1.85 chs 3714:
1.10 mrg 3715: UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
1.85 chs 3716: map, start, end, flags);
3717: KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
3718: (PGO_FREE|PGO_DEACTIVATE));
1.61 thorpej 3719:
1.10 mrg 3720: vm_map_lock_read(map);
3721: VM_MAP_RANGE_CHECK(map, start, end);
1.204.2.3 yamt 3722: if (uvm_map_lookup_entry(map, start, &entry) == false) {
1.10 mrg 3723: vm_map_unlock_read(map);
1.94 chs 3724: return EFAULT;
1.10 mrg 3725: }
3726:
3727: /*
1.186 chs 3728: * Make a first pass to check for holes and wiring problems.
1.10 mrg 3729: */
1.85 chs 3730:
1.10 mrg 3731: for (current = entry; current->start < end; current = current->next) {
3732: if (UVM_ET_ISSUBMAP(current)) {
3733: vm_map_unlock_read(map);
1.94 chs 3734: return EINVAL;
1.10 mrg 3735: }
1.186 chs 3736: if ((flags & PGO_FREE) != 0 && VM_MAPENT_ISWIRED(entry)) {
3737: vm_map_unlock_read(map);
3738: return EBUSY;
3739: }
1.90 chs 3740: if (end <= current->end) {
3741: break;
3742: }
3743: if (current->end != current->next->start) {
1.10 mrg 3744: vm_map_unlock_read(map);
1.94 chs 3745: return EFAULT;
1.10 mrg 3746: }
3747: }
3748:
1.94 chs 3749: error = 0;
1.90 chs 3750: for (current = entry; start < end; current = current->next) {
1.61 thorpej 3751: amap = current->aref.ar_amap; /* top layer */
3752: uobj = current->object.uvm_obj; /* bottom layer */
1.85 chs 3753: KASSERT(start >= current->start);
1.1 mrg 3754:
1.10 mrg 3755: /*
1.61 thorpej 3756: * No amap cleaning necessary if:
3757: *
3758: * (1) There's no amap.
3759: *
3760: * (2) We're not deactivating or freeing pages.
1.10 mrg 3761: */
1.85 chs 3762:
1.90 chs 3763: if (amap == NULL || (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
1.61 thorpej 3764: goto flush_object;
3765:
3766: amap_lock(amap);
3767: offset = start - current->start;
1.90 chs 3768: size = MIN(end, current->end) - start;
3769: for ( ; size != 0; size -= PAGE_SIZE, offset += PAGE_SIZE) {
1.61 thorpej 3770: anon = amap_lookup(¤t->aref, offset);
3771: if (anon == NULL)
3772: continue;
3773:
1.204.2.7 yamt 3774: mutex_enter(&anon->an_lock);
1.192 yamt 3775: pg = anon->an_page;
1.63 thorpej 3776: if (pg == NULL) {
1.204.2.7 yamt 3777: mutex_exit(&anon->an_lock);
1.63 thorpej 3778: continue;
3779: }
3780:
1.61 thorpej 3781: switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) {
1.85 chs 3782:
1.61 thorpej 3783: /*
1.115 chs 3784: * In these first 3 cases, we just deactivate the page.
1.61 thorpej 3785: */
1.85 chs 3786:
1.61 thorpej 3787: case PGO_CLEANIT|PGO_FREE:
3788: case PGO_CLEANIT|PGO_DEACTIVATE:
3789: case PGO_DEACTIVATE:
1.68 thorpej 3790: deactivate_it:
1.61 thorpej 3791: /*
1.115 chs 3792: * skip the page if it's loaned or wired,
3793: * since it shouldn't be on a paging queue
3794: * at all in these cases.
1.61 thorpej 3795: */
1.85 chs 3796:
1.204.2.7 yamt 3797: mutex_enter(&uvm_pageqlock);
1.115 chs 3798: if (pg->loan_count != 0 ||
3799: pg->wire_count != 0) {
1.204.2.7 yamt 3800: mutex_exit(&uvm_pageqlock);
3801: mutex_exit(&anon->an_lock);
1.61 thorpej 3802: continue;
3803: }
1.85 chs 3804: KASSERT(pg->uanon == anon);
1.61 thorpej 3805: uvm_pagedeactivate(pg);
1.204.2.7 yamt 3806: mutex_exit(&uvm_pageqlock);
3807: mutex_exit(&anon->an_lock);
1.61 thorpej 3808: continue;
3809:
3810: case PGO_FREE:
1.85 chs 3811:
1.68 thorpej 3812: /*
3813: * If there are multiple references to
3814: * the amap, just deactivate the page.
3815: */
1.85 chs 3816:
1.68 thorpej 3817: if (amap_refs(amap) > 1)
3818: goto deactivate_it;
3819:
1.115 chs 3820: /* skip the page if it's wired */
1.62 thorpej 3821: if (pg->wire_count != 0) {
1.204.2.7 yamt 3822: mutex_exit(&anon->an_lock);
1.62 thorpej 3823: continue;
3824: }
1.66 thorpej 3825: amap_unadd(¤t->aref, offset);
1.61 thorpej 3826: refs = --anon->an_ref;
1.204.2.7 yamt 3827: mutex_exit(&anon->an_lock);
1.61 thorpej 3828: if (refs == 0)
3829: uvm_anfree(anon);
3830: continue;
3831: }
3832: }
3833: amap_unlock(amap);
1.1 mrg 3834:
1.61 thorpej 3835: flush_object:
1.10 mrg 3836: /*
1.33 chuck 3837: * flush pages if we've got a valid backing object.
1.116 chs 3838: * note that we must always clean object pages before
3839: * freeing them since otherwise we could reveal stale
3840: * data from files.
1.10 mrg 3841: */
1.1 mrg 3842:
1.188 dbj 3843: uoff = current->offset + (start - current->start);
1.90 chs 3844: size = MIN(end, current->end) - start;
1.61 thorpej 3845: if (uobj != NULL) {
1.204.2.7 yamt 3846: mutex_enter(&uobj->vmobjlock);
1.136 thorpej 3847: if (uobj->pgops->pgo_put != NULL)
1.188 dbj 3848: error = (uobj->pgops->pgo_put)(uobj, uoff,
3849: uoff + size, flags | PGO_CLEANIT);
1.136 thorpej 3850: else
3851: error = 0;
1.10 mrg 3852: }
3853: start += size;
3854: }
1.1 mrg 3855: vm_map_unlock_read(map);
1.98 chs 3856: return (error);
1.1 mrg 3857: }
3858:
3859:
3860: /*
3861: * uvm_map_checkprot: check protection in map
3862: *
3863: * => must allow specified protection in a fully allocated region.
3864: * => map must be read or write locked by caller.
3865: */
3866:
1.204.2.3 yamt 3867: bool
1.138 enami 3868: uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
3869: vm_prot_t protection)
1.10 mrg 3870: {
1.99 chs 3871: struct vm_map_entry *entry;
3872: struct vm_map_entry *tmp_entry;
1.10 mrg 3873:
1.94 chs 3874: if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
1.204.2.3 yamt 3875: return (false);
1.94 chs 3876: }
3877: entry = tmp_entry;
3878: while (start < end) {
3879: if (entry == &map->header) {
1.204.2.3 yamt 3880: return (false);
1.94 chs 3881: }
1.85 chs 3882:
1.10 mrg 3883: /*
3884: * no holes allowed
3885: */
3886:
1.94 chs 3887: if (start < entry->start) {
1.204.2.3 yamt 3888: return (false);
1.94 chs 3889: }
1.10 mrg 3890:
3891: /*
3892: * check protection associated with entry
3893: */
1.1 mrg 3894:
1.94 chs 3895: if ((entry->protection & protection) != protection) {
1.204.2.3 yamt 3896: return (false);
1.94 chs 3897: }
3898: start = entry->end;
3899: entry = entry->next;
3900: }
1.204.2.3 yamt 3901: return (true);
1.1 mrg 3902: }
3903:
3904: /*
3905: * uvmspace_alloc: allocate a vmspace structure.
3906: *
3907: * - structure includes vm_map and pmap
3908: * - XXX: no locking on this structure
3909: * - refcnt set to 1, rest must be init'd by caller
3910: */
1.10 mrg 3911: struct vmspace *
1.199 christos 3912: uvmspace_alloc(vaddr_t vmin, vaddr_t vmax)
1.10 mrg 3913: {
3914: struct vmspace *vm;
3915: UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
3916:
1.204.2.7 yamt 3917: vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
1.199 christos 3918: uvmspace_init(vm, NULL, vmin, vmax);
1.15 thorpej 3919: UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
3920: return (vm);
3921: }
3922:
3923: /*
3924: * uvmspace_init: initialize a vmspace structure.
3925: *
3926: * - XXX: no locking on this structure
1.132 matt 3927: * - refcnt set to 1, rest must be init'd by caller
1.15 thorpej 3928: */
3929: void
1.199 christos 3930: uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin, vaddr_t vmax)
1.15 thorpej 3931: {
3932: UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
3933:
1.23 perry 3934: memset(vm, 0, sizeof(*vm));
1.199 christos 3935: uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE
1.131 atatat 3936: #ifdef __USING_TOPDOWN_VM
3937: | VM_MAP_TOPDOWN
3938: #endif
3939: );
1.15 thorpej 3940: if (pmap)
3941: pmap_reference(pmap);
3942: else
3943: pmap = pmap_create();
3944: vm->vm_map.pmap = pmap;
1.10 mrg 3945: vm->vm_refcnt = 1;
1.15 thorpej 3946: UVMHIST_LOG(maphist,"<- done",0,0,0,0);
1.1 mrg 3947: }
3948:
3949: /*
1.168 junyoung 3950: * uvmspace_share: share a vmspace between two processes
1.1 mrg 3951: *
3952: * - used for vfork, threads(?)
3953: */
3954:
1.10 mrg 3955: void
1.138 enami 3956: uvmspace_share(struct proc *p1, struct proc *p2)
1.1 mrg 3957: {
1.139 enami 3958:
1.204.2.1 yamt 3959: uvmspace_addref(p1->p_vmspace);
1.10 mrg 3960: p2->p_vmspace = p1->p_vmspace;
1.1 mrg 3961: }
3962:
3963: /*
3964: * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
3965: *
3966: * - XXX: no locking on vmspace
3967: */
3968:
1.10 mrg 3969: void
1.138 enami 3970: uvmspace_unshare(struct lwp *l)
1.10 mrg 3971: {
1.128 thorpej 3972: struct proc *p = l->l_proc;
1.10 mrg 3973: struct vmspace *nvm, *ovm = p->p_vmspace;
1.85 chs 3974:
1.10 mrg 3975: if (ovm->vm_refcnt == 1)
3976: /* nothing to do: vmspace isn't shared in the first place */
3977: return;
1.85 chs 3978:
1.10 mrg 3979: /* make a new vmspace, still holding old one */
3980: nvm = uvmspace_fork(ovm);
3981:
1.128 thorpej 3982: pmap_deactivate(l); /* unbind old vmspace */
1.98 chs 3983: p->p_vmspace = nvm;
1.128 thorpej 3984: pmap_activate(l); /* switch to new vmspace */
1.13 thorpej 3985:
1.10 mrg 3986: uvmspace_free(ovm); /* drop reference to old vmspace */
1.1 mrg 3987: }
3988:
3989: /*
3990: * uvmspace_exec: the process wants to exec a new program
3991: */
3992:
1.10 mrg 3993: void
1.138 enami 3994: uvmspace_exec(struct lwp *l, vaddr_t start, vaddr_t end)
1.1 mrg 3995: {
1.128 thorpej 3996: struct proc *p = l->l_proc;
1.10 mrg 3997: struct vmspace *nvm, *ovm = p->p_vmspace;
1.99 chs 3998: struct vm_map *map = &ovm->vm_map;
1.1 mrg 3999:
1.71 chs 4000: #ifdef __sparc__
1.10 mrg 4001: /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
1.128 thorpej 4002: kill_user_windows(l); /* before stack addresses go away */
1.1 mrg 4003: #endif
4004:
1.10 mrg 4005: /*
4006: * see if more than one process is using this vmspace...
4007: */
1.1 mrg 4008:
1.10 mrg 4009: if (ovm->vm_refcnt == 1) {
1.1 mrg 4010:
1.10 mrg 4011: /*
4012: * if p is the only process using its vmspace then we can safely
4013: * recycle that vmspace for the program that is being exec'd.
4014: */
1.1 mrg 4015:
4016: #ifdef SYSVSHM
1.10 mrg 4017: /*
4018: * SYSV SHM semantics require us to kill all segments on an exec
4019: */
1.99 chs 4020:
1.10 mrg 4021: if (ovm->vm_shm)
4022: shmexit(ovm);
4023: #endif
1.54 thorpej 4024:
4025: /*
4026: * POSIX 1003.1b -- "lock future mappings" is revoked
4027: * when a process execs another program image.
4028: */
1.99 chs 4029:
1.204.2.4 yamt 4030: map->flags &= ~VM_MAP_WIREFUTURE;
1.10 mrg 4031:
4032: /*
4033: * now unmap the old program
4034: */
1.99 chs 4035:
1.120 chs 4036: pmap_remove_all(map->pmap);
1.184 chs 4037: uvm_unmap(map, vm_map_min(map), vm_map_max(map));
1.144 yamt 4038: KASSERT(map->header.prev == &map->header);
4039: KASSERT(map->nentries == 0);
1.93 eeh 4040:
4041: /*
4042: * resize the map
4043: */
1.99 chs 4044:
1.184 chs 4045: vm_map_setmin(map, start);
4046: vm_map_setmax(map, end);
1.10 mrg 4047: } else {
4048:
4049: /*
4050: * p's vmspace is being shared, so we can't reuse it for p since
4051: * it is still being used for others. allocate a new vmspace
4052: * for p
4053: */
1.99 chs 4054:
1.101 chs 4055: nvm = uvmspace_alloc(start, end);
1.1 mrg 4056:
1.10 mrg 4057: /*
4058: * install new vmspace and drop our ref to the old one.
4059: */
4060:
1.128 thorpej 4061: pmap_deactivate(l);
1.10 mrg 4062: p->p_vmspace = nvm;
1.128 thorpej 4063: pmap_activate(l);
1.13 thorpej 4064:
1.10 mrg 4065: uvmspace_free(ovm);
4066: }
1.1 mrg 4067: }
4068:
4069: /*
1.204.2.1 yamt 4070: * uvmspace_addref: add a referece to a vmspace.
4071: */
4072:
4073: void
4074: uvmspace_addref(struct vmspace *vm)
4075: {
4076: struct vm_map *map = &vm->vm_map;
4077:
4078: KASSERT((map->flags & VM_MAP_DYING) == 0);
4079:
1.204.2.4 yamt 4080: mutex_enter(&map->misc_lock);
1.204.2.1 yamt 4081: KASSERT(vm->vm_refcnt > 0);
4082: vm->vm_refcnt++;
1.204.2.4 yamt 4083: mutex_exit(&map->misc_lock);
1.204.2.1 yamt 4084: }
4085:
4086: /*
1.1 mrg 4087: * uvmspace_free: free a vmspace data structure
4088: */
4089:
1.10 mrg 4090: void
1.138 enami 4091: uvmspace_free(struct vmspace *vm)
1.1 mrg 4092: {
1.99 chs 4093: struct vm_map_entry *dead_entries;
1.171 pk 4094: struct vm_map *map = &vm->vm_map;
1.172 he 4095: int n;
4096:
1.10 mrg 4097: UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
1.1 mrg 4098:
1.10 mrg 4099: UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
1.204.2.4 yamt 4100: mutex_enter(&map->misc_lock);
1.172 he 4101: n = --vm->vm_refcnt;
1.204.2.4 yamt 4102: mutex_exit(&map->misc_lock);
1.171 pk 4103: if (n > 0)
1.120 chs 4104: return;
1.99 chs 4105:
1.120 chs 4106: /*
4107: * at this point, there should be no other references to the map.
4108: * delete all of the mappings, then destroy the pmap.
4109: */
1.99 chs 4110:
1.120 chs 4111: map->flags |= VM_MAP_DYING;
4112: pmap_remove_all(map->pmap);
1.92 thorpej 4113: #ifdef SYSVSHM
1.120 chs 4114: /* Get rid of any SYSV shared memory segments. */
4115: if (vm->vm_shm != NULL)
4116: shmexit(vm);
1.92 thorpej 4117: #endif
1.120 chs 4118: if (map->nentries) {
1.184 chs 4119: uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
1.187 yamt 4120: &dead_entries, NULL, 0);
1.120 chs 4121: if (dead_entries != NULL)
4122: uvm_unmap_detach(dead_entries, 0);
1.10 mrg 4123: }
1.146 yamt 4124: KASSERT(map->nentries == 0);
4125: KASSERT(map->size == 0);
1.204.2.4 yamt 4126: mutex_destroy(&map->misc_lock);
4127: mutex_destroy(&map->mutex);
4128: rw_destroy(&map->lock);
1.120 chs 4129: pmap_destroy(map->pmap);
1.204.2.7 yamt 4130: pool_cache_put(&uvm_vmspace_cache, vm);
1.1 mrg 4131: }
4132:
4133: /*
4134: * F O R K - m a i n e n t r y p o i n t
4135: */
4136: /*
4137: * uvmspace_fork: fork a process' main map
4138: *
4139: * => create a new vmspace for child process from parent.
4140: * => parent's map must not be locked.
4141: */
4142:
1.10 mrg 4143: struct vmspace *
1.138 enami 4144: uvmspace_fork(struct vmspace *vm1)
1.10 mrg 4145: {
4146: struct vmspace *vm2;
1.99 chs 4147: struct vm_map *old_map = &vm1->vm_map;
4148: struct vm_map *new_map;
4149: struct vm_map_entry *old_entry;
4150: struct vm_map_entry *new_entry;
1.10 mrg 4151: UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
1.1 mrg 4152:
1.10 mrg 4153: vm_map_lock(old_map);
1.1 mrg 4154:
1.184 chs 4155: vm2 = uvmspace_alloc(vm_map_min(old_map), vm_map_max(old_map));
1.23 perry 4156: memcpy(&vm2->vm_startcopy, &vm1->vm_startcopy,
1.204.2.4 yamt 4157: (char *) (vm1 + 1) - (char *) &vm1->vm_startcopy);
1.10 mrg 4158: new_map = &vm2->vm_map; /* XXX */
4159:
4160: old_entry = old_map->header.next;
1.162 pooka 4161: new_map->size = old_map->size;
1.10 mrg 4162:
4163: /*
4164: * go entry-by-entry
4165: */
1.1 mrg 4166:
1.10 mrg 4167: while (old_entry != &old_map->header) {
1.1 mrg 4168:
1.10 mrg 4169: /*
4170: * first, some sanity checks on the old entry
4171: */
1.99 chs 4172:
1.94 chs 4173: KASSERT(!UVM_ET_ISSUBMAP(old_entry));
4174: KASSERT(UVM_ET_ISCOPYONWRITE(old_entry) ||
4175: !UVM_ET_ISNEEDSCOPY(old_entry));
1.1 mrg 4176:
1.10 mrg 4177: switch (old_entry->inheritance) {
1.80 wiz 4178: case MAP_INHERIT_NONE:
1.99 chs 4179:
1.10 mrg 4180: /*
1.162 pooka 4181: * drop the mapping, modify size
1.10 mrg 4182: */
1.162 pooka 4183: new_map->size -= old_entry->end - old_entry->start;
1.10 mrg 4184: break;
4185:
1.80 wiz 4186: case MAP_INHERIT_SHARE:
1.99 chs 4187:
1.10 mrg 4188: /*
4189: * share the mapping: this means we want the old and
4190: * new entries to share amaps and backing objects.
4191: */
4192: /*
4193: * if the old_entry needs a new amap (due to prev fork)
4194: * then we need to allocate it now so that we have
4195: * something we own to share with the new_entry. [in
4196: * other words, we need to clear needs_copy]
4197: */
4198:
4199: if (UVM_ET_ISNEEDSCOPY(old_entry)) {
4200: /* get our own amap, clears needs_copy */
1.204.2.1 yamt 4201: amap_copy(old_map, old_entry, AMAP_COPY_NOCHUNK,
1.98 chs 4202: 0, 0);
1.10 mrg 4203: /* XXXCDC: WAITOK??? */
4204: }
4205:
1.126 bouyer 4206: new_entry = uvm_mapent_alloc(new_map, 0);
1.10 mrg 4207: /* old_entry -> new_entry */
4208: uvm_mapent_copy(old_entry, new_entry);
4209:
4210: /* new pmap has nothing wired in it */
4211: new_entry->wired_count = 0;
4212:
4213: /*
1.29 chuck 4214: * gain reference to object backing the map (can't
4215: * be a submap, already checked this case).
1.10 mrg 4216: */
1.99 chs 4217:
1.10 mrg 4218: if (new_entry->aref.ar_amap)
1.85 chs 4219: uvm_map_reference_amap(new_entry, AMAP_SHARED);
1.10 mrg 4220:
4221: if (new_entry->object.uvm_obj &&
4222: new_entry->object.uvm_obj->pgops->pgo_reference)
4223: new_entry->object.uvm_obj->
4224: pgops->pgo_reference(
4225: new_entry->object.uvm_obj);
4226:
4227: /* insert entry at end of new_map's entry list */
4228: uvm_map_entry_link(new_map, new_map->header.prev,
4229: new_entry);
4230:
4231: break;
4232:
1.80 wiz 4233: case MAP_INHERIT_COPY:
1.10 mrg 4234:
4235: /*
4236: * copy-on-write the mapping (using mmap's
4237: * MAP_PRIVATE semantics)
1.29 chuck 4238: *
1.98 chs 4239: * allocate new_entry, adjust reference counts.
1.29 chuck 4240: * (note that new references are read-only).
1.10 mrg 4241: */
4242:
1.126 bouyer 4243: new_entry = uvm_mapent_alloc(new_map, 0);
1.10 mrg 4244: /* old_entry -> new_entry */
4245: uvm_mapent_copy(old_entry, new_entry);
4246:
4247: if (new_entry->aref.ar_amap)
1.85 chs 4248: uvm_map_reference_amap(new_entry, 0);
1.10 mrg 4249:
4250: if (new_entry->object.uvm_obj &&
4251: new_entry->object.uvm_obj->pgops->pgo_reference)
4252: new_entry->object.uvm_obj->pgops->pgo_reference
4253: (new_entry->object.uvm_obj);
4254:
4255: /* new pmap has nothing wired in it */
4256: new_entry->wired_count = 0;
4257:
4258: new_entry->etype |=
4259: (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
4260: uvm_map_entry_link(new_map, new_map->header.prev,
4261: new_entry);
1.85 chs 4262:
1.14 chuck 4263: /*
1.10 mrg 4264: * the new entry will need an amap. it will either
4265: * need to be copied from the old entry or created
1.14 chuck 4266: * from scratch (if the old entry does not have an
4267: * amap). can we defer this process until later
4268: * (by setting "needs_copy") or do we need to copy
4269: * the amap now?
1.10 mrg 4270: *
1.14 chuck 4271: * we must copy the amap now if any of the following
1.10 mrg 4272: * conditions hold:
1.14 chuck 4273: * 1. the old entry has an amap and that amap is
4274: * being shared. this means that the old (parent)
1.98 chs 4275: * process is sharing the amap with another
1.14 chuck 4276: * process. if we do not clear needs_copy here
4277: * we will end up in a situation where both the
4278: * parent and child process are refering to the
1.98 chs 4279: * same amap with "needs_copy" set. if the
1.14 chuck 4280: * parent write-faults, the fault routine will
4281: * clear "needs_copy" in the parent by allocating
1.98 chs 4282: * a new amap. this is wrong because the
1.14 chuck 4283: * parent is supposed to be sharing the old amap
4284: * and the new amap will break that.
1.10 mrg 4285: *
1.14 chuck 4286: * 2. if the old entry has an amap and a non-zero
4287: * wire count then we are going to have to call
1.98 chs 4288: * amap_cow_now to avoid page faults in the
1.14 chuck 4289: * parent process. since amap_cow_now requires
4290: * "needs_copy" to be clear we might as well
4291: * clear it here as well.
1.10 mrg 4292: *
4293: */
4294:
1.14 chuck 4295: if (old_entry->aref.ar_amap != NULL) {
1.99 chs 4296: if ((amap_flags(old_entry->aref.ar_amap) &
4297: AMAP_SHARED) != 0 ||
4298: VM_MAPENT_ISWIRED(old_entry)) {
4299:
1.204.2.1 yamt 4300: amap_copy(new_map, new_entry,
4301: AMAP_COPY_NOCHUNK, 0, 0);
1.99 chs 4302: /* XXXCDC: M_WAITOK ... ok? */
4303: }
1.10 mrg 4304: }
1.85 chs 4305:
1.10 mrg 4306: /*
1.14 chuck 4307: * if the parent's entry is wired down, then the
4308: * parent process does not want page faults on
4309: * access to that memory. this means that we
4310: * cannot do copy-on-write because we can't write
4311: * protect the old entry. in this case we
4312: * resolve all copy-on-write faults now, using
4313: * amap_cow_now. note that we have already
4314: * allocated any needed amap (above).
1.10 mrg 4315: */
4316:
1.55 thorpej 4317: if (VM_MAPENT_ISWIRED(old_entry)) {
1.1 mrg 4318:
1.98 chs 4319: /*
1.14 chuck 4320: * resolve all copy-on-write faults now
1.98 chs 4321: * (note that there is nothing to do if
1.14 chuck 4322: * the old mapping does not have an amap).
4323: */
4324: if (old_entry->aref.ar_amap)
4325: amap_cow_now(new_map, new_entry);
4326:
1.98 chs 4327: } else {
1.14 chuck 4328:
4329: /*
4330: * setup mappings to trigger copy-on-write faults
4331: * we must write-protect the parent if it has
4332: * an amap and it is not already "needs_copy"...
4333: * if it is already "needs_copy" then the parent
4334: * has already been write-protected by a previous
4335: * fork operation.
4336: */
4337:
1.113 chs 4338: if (old_entry->aref.ar_amap &&
4339: !UVM_ET_ISNEEDSCOPY(old_entry)) {
1.14 chuck 4340: if (old_entry->max_protection & VM_PROT_WRITE) {
4341: pmap_protect(old_map->pmap,
4342: old_entry->start,
4343: old_entry->end,
4344: old_entry->protection &
4345: ~VM_PROT_WRITE);
1.105 chris 4346: pmap_update(old_map->pmap);
1.14 chuck 4347: }
4348: old_entry->etype |= UVM_ET_NEEDSCOPY;
4349: }
1.10 mrg 4350: }
4351: break;
1.14 chuck 4352: } /* end of switch statement */
1.10 mrg 4353: old_entry = old_entry->next;
1.1 mrg 4354: }
4355:
1.98 chs 4356: vm_map_unlock(old_map);
1.1 mrg 4357:
4358: #ifdef SYSVSHM
1.10 mrg 4359: if (vm1->vm_shm)
4360: shmfork(vm1, vm2);
1.39 thorpej 4361: #endif
4362:
4363: #ifdef PMAP_FORK
4364: pmap_fork(vm1->vm_map.pmap, vm2->vm_map.pmap);
1.1 mrg 4365: #endif
4366:
1.10 mrg 4367: UVMHIST_LOG(maphist,"<- done",0,0,0,0);
1.139 enami 4368: return (vm2);
1.1 mrg 4369: }
4370:
4371:
1.174 yamt 4372: /*
4373: * in-kernel map entry allocation.
4374: */
4375:
4376: struct uvm_kmapent_hdr {
4377: LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
4378: int ukh_nused;
4379: struct vm_map_entry *ukh_freelist;
4380: struct vm_map *ukh_map;
1.178 yamt 4381: struct vm_map_entry ukh_entries[0];
1.174 yamt 4382: };
4383:
4384: #define UVM_KMAPENT_CHUNK \
4385: ((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr)) \
4386: / sizeof(struct vm_map_entry))
4387:
4388: #define UVM_KHDR_FIND(entry) \
4389: ((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
4390:
4391:
1.201 dsl 4392: #ifdef DIAGNOSTIC
1.203 thorpej 4393: static struct vm_map *
1.174 yamt 4394: uvm_kmapent_map(struct vm_map_entry *entry)
4395: {
4396: const struct uvm_kmapent_hdr *ukh;
4397:
4398: ukh = UVM_KHDR_FIND(entry);
4399: return ukh->ukh_map;
4400: }
1.201 dsl 4401: #endif
1.174 yamt 4402:
1.204.2.1 yamt 4403: static inline struct vm_map_entry *
1.174 yamt 4404: uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
4405: {
4406: struct vm_map_entry *entry;
4407:
4408: KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4409: KASSERT(ukh->ukh_nused >= 0);
4410:
4411: entry = ukh->ukh_freelist;
4412: if (entry) {
4413: KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4414: == UVM_MAP_KERNEL);
4415: ukh->ukh_freelist = entry->next;
4416: ukh->ukh_nused++;
4417: KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4418: } else {
4419: KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4420: }
4421:
4422: return entry;
4423: }
4424:
1.204.2.1 yamt 4425: static inline void
1.174 yamt 4426: uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
4427: {
4428:
4429: KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
4430: == UVM_MAP_KERNEL);
4431: KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
4432: KASSERT(ukh->ukh_nused > 0);
4433: KASSERT(ukh->ukh_freelist != NULL ||
4434: ukh->ukh_nused == UVM_KMAPENT_CHUNK);
4435: KASSERT(ukh->ukh_freelist == NULL ||
4436: ukh->ukh_nused < UVM_KMAPENT_CHUNK);
4437:
4438: ukh->ukh_nused--;
4439: entry->next = ukh->ukh_freelist;
4440: ukh->ukh_freelist = entry;
4441: }
4442:
4443: /*
4444: * uvm_kmapent_alloc: allocate a map entry for in-kernel map
4445: */
4446:
4447: static struct vm_map_entry *
4448: uvm_kmapent_alloc(struct vm_map *map, int flags)
4449: {
4450: struct vm_page *pg;
4451: struct uvm_map_args args;
4452: struct uvm_kmapent_hdr *ukh;
4453: struct vm_map_entry *entry;
4454: uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
4455: UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
4456: vaddr_t va;
4457: int error;
4458: int i;
4459:
4460: KDASSERT(UVM_KMAPENT_CHUNK > 2);
4461: KDASSERT(kernel_map != NULL);
4462: KASSERT(vm_map_pmap(map) == pmap_kernel());
4463:
1.204.2.1 yamt 4464: UVMMAP_EVCNT_INCR(uke_alloc);
1.174 yamt 4465: entry = NULL;
4466: again:
4467: /*
4468: * try to grab an entry from freelist.
4469: */
1.204.2.4 yamt 4470: mutex_spin_enter(&uvm_kentry_lock);
1.175 yamt 4471: ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
1.174 yamt 4472: if (ukh) {
4473: entry = uvm_kmapent_get(ukh);
4474: if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
4475: LIST_REMOVE(ukh, ukh_listq);
4476: }
1.204.2.4 yamt 4477: mutex_spin_exit(&uvm_kentry_lock);
1.174 yamt 4478:
4479: if (entry)
4480: return entry;
4481:
4482: /*
4483: * there's no free entry for this vm_map.
4484: * now we need to allocate some vm_map_entry.
4485: * for simplicity, always allocate one page chunk of them at once.
4486: */
4487:
4488: pg = uvm_pagealloc(NULL, 0, NULL, 0);
4489: if (__predict_false(pg == NULL)) {
4490: if (flags & UVM_FLAG_NOWAIT)
4491: return NULL;
4492: uvm_wait("kme_alloc");
4493: goto again;
4494: }
4495:
1.204.2.5 yamt 4496: error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
4497: 0, mapflags, &args);
1.174 yamt 4498: if (error) {
4499: uvm_pagefree(pg);
4500: return NULL;
4501: }
4502:
4503: va = args.uma_start;
4504:
4505: pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
4506: pmap_update(vm_map_pmap(map));
4507:
4508: ukh = (void *)va;
4509:
4510: /*
4511: * use the first entry for ukh itsself.
4512: */
4513:
4514: entry = &ukh->ukh_entries[0];
4515: entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
4516: error = uvm_map_enter(map, &args, entry);
4517: KASSERT(error == 0);
4518:
4519: ukh->ukh_nused = UVM_KMAPENT_CHUNK;
4520: ukh->ukh_map = map;
4521: ukh->ukh_freelist = NULL;
4522: for (i = UVM_KMAPENT_CHUNK - 1; i >= 2; i--) {
1.199 christos 4523: struct vm_map_entry *xentry = &ukh->ukh_entries[i];
1.174 yamt 4524:
1.199 christos 4525: xentry->flags = UVM_MAP_KERNEL;
4526: uvm_kmapent_put(ukh, xentry);
1.174 yamt 4527: }
4528: KASSERT(ukh->ukh_nused == 2);
4529:
1.204.2.4 yamt 4530: mutex_spin_enter(&uvm_kentry_lock);
1.175 yamt 4531: LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
4532: ukh, ukh_listq);
1.204.2.4 yamt 4533: mutex_spin_exit(&uvm_kentry_lock);
1.174 yamt 4534:
4535: /*
4536: * return second entry.
4537: */
4538:
4539: entry = &ukh->ukh_entries[1];
4540: entry->flags = UVM_MAP_KERNEL;
1.204.2.1 yamt 4541: UVMMAP_EVCNT_INCR(ukh_alloc);
1.174 yamt 4542: return entry;
4543: }
4544:
4545: /*
4546: * uvm_mapent_free: free map entry for in-kernel map
4547: */
4548:
4549: static void
4550: uvm_kmapent_free(struct vm_map_entry *entry)
4551: {
4552: struct uvm_kmapent_hdr *ukh;
4553: struct vm_page *pg;
4554: struct vm_map *map;
4555: struct pmap *pmap;
4556: vaddr_t va;
4557: paddr_t pa;
4558: struct vm_map_entry *deadentry;
4559:
1.204.2.1 yamt 4560: UVMMAP_EVCNT_INCR(uke_free);
1.174 yamt 4561: ukh = UVM_KHDR_FIND(entry);
4562: map = ukh->ukh_map;
4563:
1.204.2.4 yamt 4564: mutex_spin_enter(&uvm_kentry_lock);
1.174 yamt 4565: uvm_kmapent_put(ukh, entry);
4566: if (ukh->ukh_nused > 1) {
4567: if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
1.175 yamt 4568: LIST_INSERT_HEAD(
4569: &vm_map_to_kernel(map)->vmk_kentry_free,
4570: ukh, ukh_listq);
1.204.2.4 yamt 4571: mutex_spin_exit(&uvm_kentry_lock);
1.174 yamt 4572: return;
4573: }
4574:
4575: /*
4576: * now we can free this ukh.
4577: *
4578: * however, keep an empty ukh to avoid ping-pong.
4579: */
4580:
1.175 yamt 4581: if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
1.174 yamt 4582: LIST_NEXT(ukh, ukh_listq) == NULL) {
1.204.2.4 yamt 4583: mutex_spin_exit(&uvm_kentry_lock);
1.174 yamt 4584: return;
4585: }
4586: LIST_REMOVE(ukh, ukh_listq);
1.204.2.4 yamt 4587: mutex_spin_exit(&uvm_kentry_lock);
1.174 yamt 4588:
4589: KASSERT(ukh->ukh_nused == 1);
4590:
4591: /*
4592: * remove map entry for ukh itsself.
4593: */
4594:
4595: va = (vaddr_t)ukh;
4596: KASSERT((va & PAGE_MASK) == 0);
1.198 yamt 4597: vm_map_lock(map);
1.187 yamt 4598: uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
1.174 yamt 4599: KASSERT(deadentry->flags & UVM_MAP_KERNEL);
4600: KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
4601: KASSERT(deadentry->next == NULL);
4602: KASSERT(deadentry == &ukh->ukh_entries[0]);
4603:
4604: /*
4605: * unmap the page from pmap and free it.
4606: */
4607:
4608: pmap = vm_map_pmap(map);
4609: KASSERT(pmap == pmap_kernel());
4610: if (!pmap_extract(pmap, va, &pa))
4611: panic("%s: no mapping", __func__);
4612: pmap_kremove(va, PAGE_SIZE);
1.204.2.8 yamt 4613: pmap_update(vm_map_pmap(map));
1.198 yamt 4614: vm_map_unlock(map);
1.174 yamt 4615: pg = PHYS_TO_VM_PAGE(pa);
4616: uvm_pagefree(pg);
1.204.2.1 yamt 4617: UVMMAP_EVCNT_INCR(ukh_free);
4618: }
4619:
4620: static vsize_t
4621: uvm_kmapent_overhead(vsize_t size)
4622: {
4623:
4624: /*
4625: * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
4626: * as the min allocation unit is PAGE_SIZE.
4627: * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
4628: * one of them are used to map the page itself.
4629: */
4630:
4631: return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
4632: PAGE_SIZE;
1.174 yamt 4633: }
4634:
4635: /*
4636: * map entry reservation
4637: */
4638:
4639: /*
4640: * uvm_mapent_reserve: reserve map entries for clipping before locking map.
4641: *
4642: * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
4643: * => caller shouldn't hold map locked.
4644: */
4645: int
4646: uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
4647: int nentries, int flags)
4648: {
4649:
4650: umr->umr_nentries = 0;
4651:
1.179 yamt 4652: if ((flags & UVM_FLAG_QUANTUM) != 0)
4653: return 0;
4654:
1.174 yamt 4655: if (!VM_MAP_USE_KMAPENT(map))
4656: return 0;
4657:
4658: while (nentries--) {
4659: struct vm_map_entry *ent;
4660: ent = uvm_kmapent_alloc(map, flags);
4661: if (!ent) {
4662: uvm_mapent_unreserve(map, umr);
4663: return ENOMEM;
4664: }
4665: UMR_PUTENTRY(umr, ent);
4666: }
4667:
4668: return 0;
4669: }
4670:
4671: /*
4672: * uvm_mapent_unreserve:
4673: *
4674: * => caller shouldn't hold map locked.
4675: * => never fail or sleep.
4676: */
4677: void
4678: uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
4679: {
4680:
4681: while (!UMR_EMPTY(umr))
4682: uvm_kmapent_free(UMR_GETENTRY(umr));
4683: }
4684:
1.194 yamt 4685: /*
4686: * uvm_mapent_trymerge: try to merge an entry with its neighbors.
4687: *
4688: * => called with map locked.
4689: * => return non zero if successfully merged.
4690: */
4691:
4692: int
4693: uvm_mapent_trymerge(struct vm_map *map, struct vm_map_entry *entry, int flags)
4694: {
4695: struct uvm_object *uobj;
4696: struct vm_map_entry *next;
4697: struct vm_map_entry *prev;
1.195 yamt 4698: vsize_t size;
1.194 yamt 4699: int merged = 0;
1.204.2.3 yamt 4700: bool copying;
1.194 yamt 4701: int newetype;
4702:
4703: if (VM_MAP_USE_KMAPENT(map)) {
4704: return 0;
4705: }
4706: if (entry->aref.ar_amap != NULL) {
4707: return 0;
4708: }
4709: if ((entry->flags & UVM_MAP_NOMERGE) != 0) {
4710: return 0;
4711: }
4712:
4713: uobj = entry->object.uvm_obj;
1.195 yamt 4714: size = entry->end - entry->start;
1.194 yamt 4715: copying = (flags & UVM_MERGE_COPYING) != 0;
4716: newetype = copying ? (entry->etype & ~UVM_ET_NEEDSCOPY) : entry->etype;
4717:
4718: next = entry->next;
4719: if (next != &map->header &&
4720: next->start == entry->end &&
4721: ((copying && next->aref.ar_amap != NULL &&
4722: amap_refs(next->aref.ar_amap) == 1) ||
4723: (!copying && next->aref.ar_amap == NULL)) &&
4724: UVM_ET_ISCOMPATIBLE(next, newetype,
4725: uobj, entry->flags, entry->protection,
4726: entry->max_protection, entry->inheritance, entry->advice,
1.195 yamt 4727: entry->wired_count) &&
4728: (uobj == NULL || entry->offset + size == next->offset)) {
1.194 yamt 4729: int error;
4730:
4731: if (copying) {
1.195 yamt 4732: error = amap_extend(next, size,
1.194 yamt 4733: AMAP_EXTEND_NOWAIT|AMAP_EXTEND_BACKWARDS);
4734: } else {
4735: error = 0;
4736: }
4737: if (error == 0) {
1.197 yamt 4738: if (uobj) {
4739: if (uobj->pgops->pgo_detach) {
4740: uobj->pgops->pgo_detach(uobj);
4741: }
1.194 yamt 4742: }
4743:
4744: entry->end = next->end;
1.204.2.1 yamt 4745: clear_hints(map, next);
1.194 yamt 4746: uvm_map_entry_unlink(map, next);
4747: if (copying) {
4748: entry->aref = next->aref;
4749: entry->etype &= ~UVM_ET_NEEDSCOPY;
4750: }
1.204.2.1 yamt 4751: uvm_map_check(map, "trymerge forwardmerge");
1.194 yamt 4752: uvm_mapent_free_merged(map, next);
4753: merged++;
4754: }
4755: }
4756:
4757: prev = entry->prev;
4758: if (prev != &map->header &&
4759: prev->end == entry->start &&
4760: ((copying && !merged && prev->aref.ar_amap != NULL &&
4761: amap_refs(prev->aref.ar_amap) == 1) ||
4762: (!copying && prev->aref.ar_amap == NULL)) &&
4763: UVM_ET_ISCOMPATIBLE(prev, newetype,
4764: uobj, entry->flags, entry->protection,
4765: entry->max_protection, entry->inheritance, entry->advice,
1.195 yamt 4766: entry->wired_count) &&
1.196 yamt 4767: (uobj == NULL ||
4768: prev->offset + prev->end - prev->start == entry->offset)) {
1.194 yamt 4769: int error;
4770:
4771: if (copying) {
1.195 yamt 4772: error = amap_extend(prev, size,
1.194 yamt 4773: AMAP_EXTEND_NOWAIT|AMAP_EXTEND_FORWARDS);
4774: } else {
4775: error = 0;
4776: }
4777: if (error == 0) {
1.197 yamt 4778: if (uobj) {
4779: if (uobj->pgops->pgo_detach) {
4780: uobj->pgops->pgo_detach(uobj);
4781: }
4782: entry->offset = prev->offset;
1.194 yamt 4783: }
4784:
4785: entry->start = prev->start;
1.204.2.1 yamt 4786: clear_hints(map, prev);
1.194 yamt 4787: uvm_map_entry_unlink(map, prev);
4788: if (copying) {
4789: entry->aref = prev->aref;
4790: entry->etype &= ~UVM_ET_NEEDSCOPY;
4791: }
1.204.2.1 yamt 4792: uvm_map_check(map, "trymerge backmerge");
1.194 yamt 4793: uvm_mapent_free_merged(map, prev);
4794: merged++;
4795: }
4796: }
4797:
4798: return merged;
4799: }
4800:
1.1 mrg 4801: #if defined(DDB)
4802:
4803: /*
4804: * DDB hooks
4805: */
4806:
4807: /*
4808: * uvm_map_printit: actually prints the map
4809: */
4810:
1.10 mrg 4811: void
1.204.2.3 yamt 4812: uvm_map_printit(struct vm_map *map, bool full,
1.138 enami 4813: void (*pr)(const char *, ...))
1.10 mrg 4814: {
1.99 chs 4815: struct vm_map_entry *entry;
1.10 mrg 4816:
1.184 chs 4817: (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map),
4818: vm_map_max(map));
1.53 thorpej 4819: (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n",
4820: map->nentries, map->size, map->ref_count, map->timestamp,
4821: map->flags);
1.173 yamt 4822: (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap,
4823: pmap_resident_count(map->pmap), pmap_wired_count(map->pmap));
1.10 mrg 4824: if (!full)
4825: return;
4826: for (entry = map->header.next; entry != &map->header;
4827: entry = entry->next) {
1.70 kleink 4828: (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n",
1.10 mrg 4829: entry, entry->start, entry->end, entry->object.uvm_obj,
1.85 chs 4830: (long long)entry->offset, entry->aref.ar_amap,
4831: entry->aref.ar_pageoff);
1.10 mrg 4832: (*pr)(
1.85 chs 4833: "\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
4834: "wc=%d, adv=%d\n",
1.10 mrg 4835: (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
1.98 chs 4836: (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
1.10 mrg 4837: (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
4838: entry->protection, entry->max_protection,
4839: entry->inheritance, entry->wired_count, entry->advice);
4840: }
1.98 chs 4841: }
1.1 mrg 4842:
4843: /*
4844: * uvm_object_printit: actually prints the object
4845: */
4846:
1.10 mrg 4847: void
1.204.2.3 yamt 4848: uvm_object_printit(struct uvm_object *uobj, bool full,
1.138 enami 4849: void (*pr)(const char *, ...))
1.10 mrg 4850: {
4851: struct vm_page *pg;
4852: int cnt = 0;
4853:
1.71 chs 4854: (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
1.204.2.7 yamt 4855: uobj, mutex_owned(&uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
1.42 thorpej 4856: if (UVM_OBJ_IS_KERN_OBJECT(uobj))
1.10 mrg 4857: (*pr)("refs=<SYSTEM>\n");
4858: else
4859: (*pr)("refs=%d\n", uobj->uo_refs);
4860:
1.71 chs 4861: if (!full) {
4862: return;
4863: }
1.10 mrg 4864: (*pr)(" PAGES <pg,offset>:\n ");
1.106 chs 4865: TAILQ_FOREACH(pg, &uobj->memq, listq) {
4866: cnt++;
1.85 chs 4867: (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
1.106 chs 4868: if ((cnt % 3) == 0) {
1.71 chs 4869: (*pr)("\n ");
4870: }
4871: }
1.106 chs 4872: if ((cnt % 3) != 0) {
1.71 chs 4873: (*pr)("\n");
1.10 mrg 4874: }
1.98 chs 4875: }
1.1 mrg 4876:
4877: /*
4878: * uvm_page_printit: actually print the page
4879: */
4880:
1.204.2.2 yamt 4881: static const char page_flagbits[] = UVM_PGFLAGBITS;
4882: static const char page_pqflagbits[] = UVM_PQFLAGBITS;
1.86 chs 4883:
1.10 mrg 4884: void
1.204.2.3 yamt 4885: uvm_page_printit(struct vm_page *pg, bool full,
1.138 enami 4886: void (*pr)(const char *, ...))
1.10 mrg 4887: {
1.85 chs 4888: struct vm_page *tpg;
1.10 mrg 4889: struct uvm_object *uobj;
4890: struct pglist *pgl;
1.71 chs 4891: char pgbuf[128];
4892: char pqbuf[128];
1.1 mrg 4893:
1.10 mrg 4894: (*pr)("PAGE %p:\n", pg);
1.71 chs 4895: bitmask_snprintf(pg->flags, page_flagbits, pgbuf, sizeof(pgbuf));
4896: bitmask_snprintf(pg->pqflags, page_pqflagbits, pqbuf, sizeof(pqbuf));
1.106 chs 4897: (*pr)(" flags=%s, pqflags=%s, wire_count=%d, pa=0x%lx\n",
1.137 yamt 4898: pgbuf, pqbuf, pg->wire_count, (long)VM_PAGE_TO_PHYS(pg));
1.86 chs 4899: (*pr)(" uobject=%p, uanon=%p, offset=0x%llx loan_count=%d\n",
4900: pg->uobject, pg->uanon, (long long)pg->offset, pg->loan_count);
1.1 mrg 4901: #if defined(UVM_PAGE_TRKOWN)
1.10 mrg 4902: if (pg->flags & PG_BUSY)
4903: (*pr)(" owning process = %d, tag=%s\n",
4904: pg->owner, pg->owner_tag);
4905: else
4906: (*pr)(" page not busy, no owner\n");
1.1 mrg 4907: #else
1.10 mrg 4908: (*pr)(" [page ownership tracking disabled]\n");
1.1 mrg 4909: #endif
4910:
1.10 mrg 4911: if (!full)
4912: return;
4913:
4914: /* cross-verify object/anon */
4915: if ((pg->pqflags & PQ_FREE) == 0) {
4916: if (pg->pqflags & PQ_ANON) {
1.192 yamt 4917: if (pg->uanon == NULL || pg->uanon->an_page != pg)
1.85 chs 4918: (*pr)(" >>> ANON DOES NOT POINT HERE <<< (%p)\n",
1.192 yamt 4919: (pg->uanon) ? pg->uanon->an_page : NULL);
1.10 mrg 4920: else
4921: (*pr)(" anon backpointer is OK\n");
4922: } else {
4923: uobj = pg->uobject;
4924: if (uobj) {
4925: (*pr)(" checking object list\n");
1.85 chs 4926: TAILQ_FOREACH(tpg, &uobj->memq, listq) {
4927: if (tpg == pg) {
4928: break;
4929: }
1.10 mrg 4930: }
1.85 chs 4931: if (tpg)
1.10 mrg 4932: (*pr)(" page found on object list\n");
4933: else
4934: (*pr)(" >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
4935: }
4936: }
4937: }
1.1 mrg 4938:
1.10 mrg 4939: /* cross-verify page queue */
1.73 thorpej 4940: if (pg->pqflags & PQ_FREE) {
4941: int fl = uvm_page_lookup_freelist(pg);
1.96 thorpej 4942: int color = VM_PGCOLOR_BUCKET(pg);
4943: pgl = &uvm.page_free[fl].pgfl_buckets[color].pgfl_queues[
4944: ((pg)->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN];
1.139 enami 4945: } else {
1.10 mrg 4946: pgl = NULL;
1.85 chs 4947: }
1.10 mrg 4948:
4949: if (pgl) {
4950: (*pr)(" checking pageq list\n");
1.85 chs 4951: TAILQ_FOREACH(tpg, pgl, pageq) {
4952: if (tpg == pg) {
4953: break;
4954: }
1.10 mrg 4955: }
1.85 chs 4956: if (tpg)
1.10 mrg 4957: (*pr)(" page found on pageq list\n");
4958: else
4959: (*pr)(" >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
4960: }
1.1 mrg 4961: }
1.204.2.1 yamt 4962:
4963: /*
4964: * uvm_pages_printthem - print a summary of all managed pages
4965: */
4966:
4967: void
4968: uvm_page_printall(void (*pr)(const char *, ...))
4969: {
4970: unsigned i;
4971: struct vm_page *pg;
4972:
1.204.2.2 yamt 4973: (*pr)("%18s %4s %4s %18s %18s"
1.204.2.1 yamt 4974: #ifdef UVM_PAGE_TRKOWN
4975: " OWNER"
4976: #endif
4977: "\n", "PAGE", "FLAG", "PQ", "UOBJECT", "UANON");
4978: for (i = 0; i < vm_nphysseg; i++) {
4979: for (pg = vm_physmem[i].pgs; pg <= vm_physmem[i].lastpg; pg++) {
1.204.2.2 yamt 4980: (*pr)("%18p %04x %04x %18p %18p",
1.204.2.1 yamt 4981: pg, pg->flags, pg->pqflags, pg->uobject,
4982: pg->uanon);
4983: #ifdef UVM_PAGE_TRKOWN
4984: if (pg->flags & PG_BUSY)
4985: (*pr)(" %d [%s]", pg->owner, pg->owner_tag);
4986: #endif
4987: (*pr)("\n");
4988: }
4989: }
4990: }
4991:
1.1 mrg 4992: #endif
1.204.2.1 yamt 4993:
4994: /*
4995: * uvm_map_create: create map
4996: */
4997:
4998: struct vm_map *
4999: uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
5000: {
5001: struct vm_map *result;
5002:
5003: MALLOC(result, struct vm_map *, sizeof(struct vm_map),
5004: M_VMMAP, M_WAITOK);
5005: uvm_map_setup(result, vmin, vmax, flags);
5006: result->pmap = pmap;
5007: return(result);
5008: }
5009:
5010: /*
5011: * uvm_map_setup: init map
5012: *
5013: * => map must not be in service yet.
5014: */
5015:
5016: void
5017: uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags)
5018: {
1.204.2.4 yamt 5019: int ipl;
1.204.2.1 yamt 5020:
5021: RB_INIT(&map->rbhead);
5022: map->header.next = map->header.prev = &map->header;
5023: map->nentries = 0;
5024: map->size = 0;
5025: map->ref_count = 1;
5026: vm_map_setmin(map, vmin);
5027: vm_map_setmax(map, vmax);
5028: map->flags = flags;
5029: map->first_free = &map->header;
5030: map->hint = &map->header;
5031: map->timestamp = 0;
1.204.2.4 yamt 5032: map->busy = NULL;
5033:
5034: if ((flags & VM_MAP_INTRSAFE) != 0) {
5035: ipl = IPL_VM;
5036: } else {
5037: ipl = IPL_NONE;
5038: }
5039:
5040: rw_init(&map->lock);
5041: cv_init(&map->cv, "vm_map");
5042: mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl);
5043: mutex_init(&map->mutex, MUTEX_DRIVER, ipl);
1.204.2.1 yamt 5044: }
5045:
5046:
5047: /*
5048: * U N M A P - m a i n e n t r y p o i n t
5049: */
5050:
5051: /*
5052: * uvm_unmap1: remove mappings from a vm_map (from "start" up to "stop")
5053: *
5054: * => caller must check alignment and size
5055: * => map must be unlocked (we will lock it)
5056: * => flags is UVM_FLAG_QUANTUM or 0.
5057: */
5058:
5059: void
5060: uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
5061: {
5062: struct vm_map_entry *dead_entries;
5063: struct uvm_mapent_reservation umr;
5064: UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
5065:
5066: UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)",
5067: map, start, end, 0);
1.204.2.6 yamt 5068: if (map == kernel_map) {
5069: LOCKDEBUG_MEM_CHECK((void *)start, end - start);
5070: }
1.204.2.1 yamt 5071: /*
5072: * work now done by helper functions. wipe the pmap's and then
5073: * detach from the dead entries...
5074: */
5075: uvm_mapent_reserve(map, &umr, 2, flags);
5076: vm_map_lock(map);
5077: uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
5078: vm_map_unlock(map);
5079: uvm_mapent_unreserve(map, &umr);
5080:
5081: if (dead_entries != NULL)
5082: uvm_unmap_detach(dead_entries, 0);
5083:
5084: UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
5085: }
5086:
5087:
5088: /*
5089: * uvm_map_reference: add reference to a map
5090: *
1.204.2.4 yamt 5091: * => map need not be locked (we use misc_lock).
1.204.2.1 yamt 5092: */
5093:
5094: void
5095: uvm_map_reference(struct vm_map *map)
5096: {
1.204.2.4 yamt 5097: mutex_enter(&map->misc_lock);
1.204.2.1 yamt 5098: map->ref_count++;
1.204.2.4 yamt 5099: mutex_exit(&map->misc_lock);
1.204.2.1 yamt 5100: }
5101:
5102: struct vm_map_kernel *
5103: vm_map_to_kernel(struct vm_map *map)
5104: {
5105:
5106: KASSERT(VM_MAP_IS_KERNEL(map));
5107:
5108: return (struct vm_map_kernel *)map;
5109: }
5110:
1.204.2.3 yamt 5111: bool
1.204.2.1 yamt 5112: vm_map_starved_p(struct vm_map *map)
5113: {
5114:
5115: if ((map->flags & VM_MAP_WANTVA) != 0) {
1.204.2.3 yamt 5116: return true;
1.204.2.1 yamt 5117: }
5118: /* XXX */
5119: if ((vm_map_max(map) - vm_map_min(map)) / 16 * 15 < map->size) {
1.204.2.3 yamt 5120: return true;
1.204.2.1 yamt 5121: }
1.204.2.3 yamt 5122: return false;
1.204.2.1 yamt 5123: }
1.204.2.7 yamt 5124:
5125: #if defined(DDB)
5126: void
5127: uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...))
5128: {
5129: struct vm_map *map;
5130:
5131: for (map = kernel_map;;) {
5132: struct vm_map_entry *entry;
5133:
5134: if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) {
5135: break;
5136: }
5137: (*pr)("%p is %p+%zu from VMMAP %p\n",
5138: (void *)addr, (void *)entry->start,
5139: (size_t)(addr - (uintptr_t)entry->start), map);
5140: if (!UVM_ET_ISSUBMAP(entry)) {
5141: break;
5142: }
5143: map = entry->object.sub_map;
5144: }
5145: }
5146: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>