[BACK]Return to uvm_map.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / uvm

Annotation of src/sys/uvm/uvm_map.c, Revision 1.22.2.2

1.22.2.2! eeh         1: /*     $NetBSD: uvm_map.c,v 1.22.2.1 1998/07/30 14:04:12 eeh Exp $     */
1.1       mrg         2:
                      3: /*
                      4:  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
                      5:  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
                      6:  */
                      7: /*
                      8:  * Copyright (c) 1997 Charles D. Cranor and Washington University.
                      9:  * Copyright (c) 1991, 1993, The Regents of the University of California.
                     10:  *
                     11:  * All rights reserved.
                     12:  *
                     13:  * This code is derived from software contributed to Berkeley by
                     14:  * The Mach Operating System project at Carnegie-Mellon University.
                     15:  *
                     16:  * Redistribution and use in source and binary forms, with or without
                     17:  * modification, are permitted provided that the following conditions
                     18:  * are met:
                     19:  * 1. Redistributions of source code must retain the above copyright
                     20:  *    notice, this list of conditions and the following disclaimer.
                     21:  * 2. Redistributions in binary form must reproduce the above copyright
                     22:  *    notice, this list of conditions and the following disclaimer in the
                     23:  *    documentation and/or other materials provided with the distribution.
                     24:  * 3. All advertising materials mentioning features or use of this software
                     25:  *    must display the following acknowledgement:
                     26:  *     This product includes software developed by Charles D. Cranor,
                     27:  *      Washington University, the University of California, Berkeley and
                     28:  *      its contributors.
                     29:  * 4. Neither the name of the University nor the names of its contributors
                     30:  *    may be used to endorse or promote products derived from this software
                     31:  *    without specific prior written permission.
                     32:  *
                     33:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     34:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     35:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     36:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     37:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     38:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     39:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     40:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     41:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     42:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     43:  * SUCH DAMAGE.
                     44:  *
                     45:  *     @(#)vm_map.c    8.3 (Berkeley) 1/12/94
1.3       mrg        46:  * from: Id: uvm_map.c,v 1.1.2.27 1998/02/07 01:16:54 chs Exp
1.1       mrg        47:  *
                     48:  *
                     49:  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
                     50:  * All rights reserved.
                     51:  *
                     52:  * Permission to use, copy, modify and distribute this software and
                     53:  * its documentation is hereby granted, provided that both the copyright
                     54:  * notice and this permission notice appear in all copies of the
                     55:  * software, derivative works or modified versions, and any portions
                     56:  * thereof, and that both notices appear in supporting documentation.
                     57:  *
                     58:  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
                     59:  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
                     60:  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
                     61:  *
                     62:  * Carnegie Mellon requests users of this software to return to
                     63:  *
                     64:  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
                     65:  *  School of Computer Science
                     66:  *  Carnegie Mellon University
                     67:  *  Pittsburgh PA 15213-3890
                     68:  *
                     69:  * any improvements or extensions that they make and grant Carnegie the
                     70:  * rights to redistribute these changes.
                     71:  */
                     72:
1.21      jonathan   73: #include "opt_ddb.h"
1.6       mrg        74: #include "opt_uvmhist.h"
                     75: #include "opt_pmap_new.h"
                     76:
1.1       mrg        77: /*
                     78:  * uvm_map.c: uvm map operations
                     79:  */
                     80:
                     81: #include <sys/param.h>
                     82: #include <sys/systm.h>
                     83: #include <sys/mman.h>
                     84: #include <sys/proc.h>
                     85: #include <sys/malloc.h>
                     86:
                     87: #ifdef SYSVSHM
                     88: #include <sys/shm.h>
                     89: #endif
                     90:
                     91: #include <vm/vm.h>
                     92: #include <vm/vm_page.h>
                     93: #include <vm/vm_kern.h>
                     94:
                     95: #define UVM_MAP
                     96: #include <uvm/uvm.h>
1.21      jonathan   97:
                     98: #ifdef DDB
                     99: #include <uvm/uvm_ddb.h>
                    100: #endif
                    101:
1.1       mrg       102:
                    103: struct uvm_cnt uvm_map_call, map_backmerge, map_forwmerge;
                    104: struct uvm_cnt uvm_mlk_call, uvm_mlk_hint;
                    105:
                    106: /*
                    107:  * macros
                    108:  */
                    109:
                    110: /*
                    111:  * uvm_map_entry_link: insert entry into a map
                    112:  *
                    113:  * => map must be locked
                    114:  */
1.10      mrg       115: #define uvm_map_entry_link(map, after_where, entry) do { \
                    116:        (map)->nentries++; \
                    117:        (entry)->prev = (after_where); \
                    118:        (entry)->next = (after_where)->next; \
                    119:        (entry)->prev->next = (entry); \
                    120:        (entry)->next->prev = (entry); \
                    121: } while (0)
                    122:
1.1       mrg       123: /*
                    124:  * uvm_map_entry_unlink: remove entry from a map
                    125:  *
                    126:  * => map must be locked
                    127:  */
1.10      mrg       128: #define uvm_map_entry_unlink(map, entry) do { \
                    129:        (map)->nentries--; \
                    130:        (entry)->next->prev = (entry)->prev; \
                    131:        (entry)->prev->next = (entry)->next; \
                    132: } while (0)
1.1       mrg       133:
                    134: /*
                    135:  * SAVE_HINT: saves the specified entry as the hint for future lookups.
                    136:  *
                    137:  * => map need not be locked (protected by hint_lock).
                    138:  */
1.10      mrg       139: #define SAVE_HINT(map,value) do { \
                    140:        simple_lock(&(map)->hint_lock); \
                    141:        (map)->hint = (value); \
                    142:        simple_unlock(&(map)->hint_lock); \
                    143: } while (0)
1.1       mrg       144:
                    145: /*
                    146:  * VM_MAP_RANGE_CHECK: check and correct range
                    147:  *
                    148:  * => map must at least be read locked
                    149:  */
                    150:
1.10      mrg       151: #define VM_MAP_RANGE_CHECK(map, start, end) do { \
                    152:        if (start < vm_map_min(map))            \
                    153:                start = vm_map_min(map);        \
                    154:        if (end > vm_map_max(map))              \
                    155:                end = vm_map_max(map);          \
                    156:        if (start > end)                        \
                    157:                start = end;                    \
                    158: } while (0)
1.1       mrg       159:
                    160: /*
                    161:  * local prototypes
                    162:  */
                    163:
                    164: static vm_map_entry_t  uvm_mapent_alloc __P((vm_map_t));
                    165: static void            uvm_mapent_copy __P((vm_map_entry_t,vm_map_entry_t));
                    166: static void            uvm_mapent_free __P((vm_map_entry_t));
                    167: static void            uvm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
                    168:
                    169: /*
                    170:  * local inlines
                    171:  */
                    172:
                    173: /*
                    174:  * uvm_mapent_alloc: allocate a map entry
                    175:  *
                    176:  * => XXX: static pool for kernel map?
                    177:  */
                    178:
1.10      mrg       179: static __inline vm_map_entry_t
                    180: uvm_mapent_alloc(map)
                    181:        vm_map_t map;
                    182: {
                    183:        vm_map_entry_t me;
                    184:        int s;
                    185:        UVMHIST_FUNC("uvm_mapent_alloc");
                    186:        UVMHIST_CALLED(maphist);
1.1       mrg       187:
1.10      mrg       188:        if (map->entries_pageable) {
                    189:                MALLOC(me, vm_map_entry_t, sizeof(struct vm_map_entry),
1.1       mrg       190:                                                M_VMMAPENT, M_WAITOK);
1.10      mrg       191:                me->flags = 0;
                    192:                /* me can't be null, wait ok */
1.1       mrg       193:
1.10      mrg       194:        } else {
                    195:                s = splimp();   /* protect kentry_free list with splimp */
                    196:                simple_lock(&uvm.kentry_lock);
                    197:                me = uvm.kentry_free;
                    198:                if (me) uvm.kentry_free = me->next;
                    199:                simple_unlock(&uvm.kentry_lock);
                    200:                splx(s);
                    201:                if (!me)
                    202:        panic("mapent_alloc: out of kernel map entries, check MAX_KMAPENT");
                    203:                me->flags = UVM_MAP_STATIC;
                    204:        }
1.1       mrg       205:
1.10      mrg       206:        UVMHIST_LOG(maphist, "<- new entry=0x%x [pageable=%d]",
1.1       mrg       207:                me, map->entries_pageable, 0, 0);
1.10      mrg       208:        return(me);
1.1       mrg       209:
                    210: }
                    211:
                    212: /*
                    213:  * uvm_mapent_free: free map entry
                    214:  *
                    215:  * => XXX: static pool for kernel map?
                    216:  */
                    217:
1.10      mrg       218: static __inline void
                    219: uvm_mapent_free(me)
                    220:        vm_map_entry_t me;
1.1       mrg       221: {
1.10      mrg       222:        int s;
                    223:        UVMHIST_FUNC("uvm_mapent_free");
                    224:        UVMHIST_CALLED(maphist);
                    225:        UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
1.1       mrg       226:                me, me->flags, 0, 0);
1.10      mrg       227:        if ((me->flags & UVM_MAP_STATIC) == 0) {
                    228:                FREE(me, M_VMMAPENT);
                    229:        } else {
                    230:                s = splimp();   /* protect kentry_free list with splimp */
                    231:                simple_lock(&uvm.kentry_lock);
                    232:                me->next = uvm.kentry_free;
                    233:                uvm.kentry_free = me;
                    234:                simple_unlock(&uvm.kentry_lock);
                    235:                splx(s);
                    236:        }
1.1       mrg       237: }
                    238:
                    239: /*
                    240:  * uvm_mapent_copy: copy a map entry, preserving flags
                    241:  */
                    242:
1.10      mrg       243: static __inline void
                    244: uvm_mapent_copy(src, dst)
                    245:        vm_map_entry_t src;
                    246:        vm_map_entry_t dst;
                    247: {
1.1       mrg       248:
1.10      mrg       249:        bcopy(src, dst, ((char *)&src->uvm_map_entry_stop_copy) - ((char*)src));
1.1       mrg       250: }
                    251:
                    252: /*
                    253:  * uvm_map_entry_unwire: unwire a map entry
                    254:  *
                    255:  * => map should be locked by caller
                    256:  */
                    257:
1.10      mrg       258: static __inline void
                    259: uvm_map_entry_unwire(map, entry)
                    260:        vm_map_t map;
                    261:        vm_map_entry_t entry;
                    262: {
1.1       mrg       263:
1.10      mrg       264:        uvm_fault_unwire(map->pmap, entry->start, entry->end);
                    265:        entry->wired_count = 0;
1.1       mrg       266: }
                    267:
                    268: /*
                    269:  * uvm_map_init: init mapping system at boot time.   note that we allocate
                    270:  * and init the static pool of vm_map_entry_t's for the kernel here.
                    271:  */
                    272:
1.10      mrg       273: void
                    274: uvm_map_init()
1.1       mrg       275: {
1.10      mrg       276:        static struct vm_map_entry kernel_map_entry[MAX_KMAPENT];
1.1       mrg       277: #if defined(UVMHIST)
1.10      mrg       278:        static struct uvm_history_ent maphistbuf[100];
                    279:        static struct uvm_history_ent pdhistbuf[100];
1.1       mrg       280: #endif
1.10      mrg       281:        int lcv;
                    282:
                    283:        /*
                    284:         * first, init logging system.
                    285:         */
1.1       mrg       286:
1.10      mrg       287:        UVMHIST_FUNC("uvm_map_init");
                    288:        UVMHIST_INIT_STATIC(maphist, maphistbuf);
                    289:        UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
                    290:        UVMHIST_CALLED(maphist);
                    291:        UVMHIST_LOG(maphist,"<starting uvm map system>", 0, 0, 0, 0);
                    292:        UVMCNT_INIT(uvm_map_call,  UVMCNT_CNT, 0,
                    293:            "# uvm_map() successful calls", 0);
                    294:        UVMCNT_INIT(map_backmerge, UVMCNT_CNT, 0, "# uvm_map() back merges", 0);
                    295:        UVMCNT_INIT(map_forwmerge, UVMCNT_CNT, 0, "# uvm_map() missed forward",
                    296:            0);
                    297:        UVMCNT_INIT(uvm_mlk_call,  UVMCNT_CNT, 0, "# map lookup calls", 0);
                    298:        UVMCNT_INIT(uvm_mlk_hint,  UVMCNT_CNT, 0, "# map lookup hint hits", 0);
                    299:
                    300:        /*
                    301:         * now set up static pool of kernel map entrys ...
                    302:         */
                    303:
                    304:        simple_lock_init(&uvm.kentry_lock);
                    305:        uvm.kentry_free = NULL;
                    306:        for (lcv = 0 ; lcv < MAX_KMAPENT ; lcv++) {
                    307:                kernel_map_entry[lcv].next = uvm.kentry_free;
                    308:                uvm.kentry_free = &kernel_map_entry[lcv];
                    309:        }
1.1       mrg       310:
                    311: }
                    312:
                    313: /*
                    314:  * clippers
                    315:  */
                    316:
                    317: /*
                    318:  * uvm_map_clip_start: ensure that the entry begins at or after
                    319:  *     the starting address, if it doesn't we split the entry.
                    320:  *
                    321:  * => caller should use UVM_MAP_CLIP_START macro rather than calling
                    322:  *    this directly
                    323:  * => map must be locked by caller
                    324:  */
                    325:
                    326: void uvm_map_clip_start(map, entry, start)
                    327:
                    328: register vm_map_t       map;
                    329: register vm_map_entry_t entry;
1.22.2.1  eeh       330: register vaddr_t    start;
1.1       mrg       331:
                    332: {
1.10      mrg       333:                                register vm_map_entry_t new_entry;
1.22.2.1  eeh       334:        vaddr_t new_adj;
1.1       mrg       335:
                    336:        /* uvm_map_simplify_entry(map, entry); */ /* XXX */
                    337:
1.10      mrg       338:        /*
                    339:         * Split off the front portion.  note that we must insert the new
                    340:         * entry BEFORE this one, so that this entry has the specified
1.1       mrg       341:         * starting address.
1.10      mrg       342:         */
1.1       mrg       343:
1.10      mrg       344:        new_entry = uvm_mapent_alloc(map);
1.1       mrg       345:        uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
1.10      mrg       346:
                    347:        new_entry->end = start;
1.1       mrg       348:        new_adj = start - new_entry->start;
                    349:        if (entry->object.uvm_obj)
1.10      mrg       350:                entry->offset += new_adj;       /* shift start over */
                    351:        entry->start = start;
1.1       mrg       352:
                    353:        if (new_entry->aref.ar_amap) {
1.10      mrg       354:                amap_splitref(&new_entry->aref, &entry->aref, new_adj);
1.1       mrg       355:        }
                    356:
1.10      mrg       357:        uvm_map_entry_link(map, entry->prev, new_entry);
                    358:
1.1       mrg       359:        if (UVM_ET_ISMAP(entry)) {
1.10      mrg       360:                 uvm_map_reference(new_entry->object.share_map);
1.1       mrg       361:        } else {
1.10      mrg       362:                if (UVM_ET_ISOBJ(entry) &&
                    363:                    entry->object.uvm_obj->pgops &&
                    364:                    entry->object.uvm_obj->pgops->pgo_reference)
                    365:                        entry->object.uvm_obj->pgops->pgo_reference(
                    366:                            entry->object.uvm_obj);
1.1       mrg       367:        }
                    368: }
                    369:
                    370: /*
                    371:  * uvm_map_clip_end: ensure that the entry ends at or before
                    372:  *     the ending address, if it does't we split the reference
                    373:  *
                    374:  * => caller should use UVM_MAP_CLIP_END macro rather than calling
                    375:  *    this directly
                    376:  * => map must be locked by caller
                    377:  */
                    378:
1.10      mrg       379: void
                    380: uvm_map_clip_end(map, entry, end)
                    381:        vm_map_t        map;
                    382:        vm_map_entry_t  entry;
1.22.2.1  eeh       383:        vaddr_t end;
1.1       mrg       384: {
1.10      mrg       385:        vm_map_entry_t  new_entry;
1.22.2.1  eeh       386:        vaddr_t new_adj; /* #bytes we move start forward */
1.1       mrg       387:
                    388:        /*
                    389:         *      Create a new entry and insert it
                    390:         *      AFTER the specified entry
                    391:         */
                    392:
                    393:        new_entry = uvm_mapent_alloc(map);
                    394:        uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
                    395:
                    396:        new_entry->start = entry->end = end;
                    397:        new_adj = end - entry->start;
                    398:        if (new_entry->object.uvm_obj)
                    399:                new_entry->offset += new_adj;
                    400:
1.10      mrg       401:        if (entry->aref.ar_amap)
                    402:                amap_splitref(&entry->aref, &new_entry->aref, new_adj);
1.1       mrg       403:
                    404:        uvm_map_entry_link(map, entry, new_entry);
                    405:
                    406:        if (UVM_ET_ISMAP(entry)) {
                    407:                uvm_map_reference(new_entry->object.share_map);
                    408:        } else {
1.10      mrg       409:                if (UVM_ET_ISOBJ(entry) &&
                    410:                    entry->object.uvm_obj->pgops &&
                    411:                    entry->object.uvm_obj->pgops->pgo_reference)
                    412:                        entry->object.uvm_obj->pgops->pgo_reference(
                    413:                            entry->object.uvm_obj);
1.1       mrg       414:        }
                    415: }
                    416:
                    417:
                    418: /*
                    419:  *   M A P   -   m a i n   e n t r y   p o i n t
                    420:  */
                    421: /*
                    422:  * uvm_map: establish a valid mapping in a map
                    423:  *
                    424:  * => assume startp is page aligned.
                    425:  * => assume size is a multiple of PAGE_SIZE.
                    426:  * => assume sys_mmap provides enough of a "hint" to have us skip
                    427:  *     over text/data/bss area.
                    428:  * => map must be unlocked (we will lock it)
                    429:  * => <uobj,uoffset> value meanings (4 cases):
                    430:  *      [1] <NULL,uoffset>             == uoffset is a hint for PMAP_PREFER
                    431:  *      [2] <NULL,UVM_UNKNOWN_OFFSET>  == don't PMAP_PREFER
                    432:  *      [3] <uobj,uoffset>             == normal mapping
                    433:  *      [4] <uobj,UVM_UNKNOWN_OFFSET>  == uvm_map finds offset based on VA
                    434:  *
                    435:  *    case [4] is for kernel mappings where we don't know the offset until
1.8       chuck     436:  *    we've found a virtual address.   note that kernel object offsets are
                    437:  *    always relative to vm_map_min(kernel_map).
1.1       mrg       438:  * => XXXCDC: need way to map in external amap?
                    439:  */
                    440:
1.10      mrg       441: int
                    442: uvm_map(map, startp, size, uobj, uoffset, flags)
                    443:        vm_map_t map;
1.22.2.1  eeh       444:        vaddr_t *startp;        /* IN/OUT */
                    445:        vsize_t size;
1.10      mrg       446:        struct uvm_object *uobj;
1.22.2.1  eeh       447:        vaddr_t uoffset;
1.10      mrg       448:        uvm_flag_t flags;
                    449: {
                    450:        vm_map_entry_t prev_entry, new_entry;
                    451:        vm_prot_t prot = UVM_PROTECTION(flags), maxprot =
                    452:            UVM_MAXPROTECTION(flags);
                    453:        vm_inherit_t inherit = UVM_INHERIT(flags);
                    454:        int advice = UVM_ADVICE(flags);
                    455:        UVMHIST_FUNC("uvm_map");
                    456:        UVMHIST_CALLED(maphist);
1.1       mrg       457:
1.10      mrg       458:        UVMHIST_LOG(maphist, "(map=0x%x, *startp=0x%x, size=%d, flags=0x%x)",
                    459:            map, *startp, size, flags);
                    460:        UVMHIST_LOG(maphist, "  uobj/offset 0x%x/%d", uobj, uoffset,0,0);
1.1       mrg       461:
1.10      mrg       462:        /*
                    463:         * step 0: sanity check of protection code
                    464:         */
1.1       mrg       465:
1.10      mrg       466:        if ((prot & maxprot) != prot) {
                    467:                UVMHIST_LOG(maphist, "<- prot. failure:  prot=0x%x, max=0x%x",
                    468:                prot, maxprot,0,0);
                    469:                return(KERN_PROTECTION_FAILURE);
                    470:        }
1.1       mrg       471:
1.10      mrg       472:        /*
                    473:         * step 1: figure out where to put new VM range
                    474:         */
1.1       mrg       475:
1.10      mrg       476:        if (vm_map_lock_try(map) == FALSE) {
                    477:                if (flags & UVM_FLAG_TRYLOCK)
                    478:                        return(KERN_FAILURE);
                    479:                vm_map_lock(map); /* could sleep here */
                    480:        }
                    481:        if ((prev_entry = uvm_map_findspace(map, *startp, size, startp,
                    482:            uobj, uoffset, flags & UVM_FLAG_FIXED)) == NULL) {
                    483:                UVMHIST_LOG(maphist,"<- uvm_map_findspace failed!",0,0,0,0);
                    484:                vm_map_unlock(map);
                    485:                return (KERN_NO_SPACE);
                    486:        }
1.1       mrg       487:
                    488: #if defined(PMAP_GROWKERNEL)   /* hack */
1.10      mrg       489:        {
                    490:                /* locked by kernel_map lock */
1.22.2.1  eeh       491:                static vaddr_t maxkaddr = 0;
1.10      mrg       492:
                    493:                /*
                    494:                 * hack: grow kernel PTPs in advance.
                    495:                 */
                    496:                if (map == kernel_map && maxkaddr < (*startp + size)) {
                    497:                        pmap_growkernel(*startp + size);
                    498:                        maxkaddr = *startp + size;
                    499:                }
                    500:        }
                    501: #endif
                    502:
                    503:        UVMCNT_INCR(uvm_map_call);
                    504:
                    505:        /*
                    506:         * if uobj is null, then uoffset is either a VAC hint for PMAP_PREFER
                    507:         * [typically from uvm_map_reserve] or it is UVM_UNKNOWN_OFFSET.   in
                    508:         * either case we want to zero it  before storing it in the map entry
                    509:         * (because it looks strange and confusing when debugging...)
                    510:         *
                    511:         * if uobj is not null
                    512:         *   if uoffset is not UVM_UNKNOWN_OFFSET then we have a normal mapping
                    513:         *      and we do not need to change uoffset.
                    514:         *   if uoffset is UVM_UNKNOWN_OFFSET then we need to find the offset
                    515:         *      now (based on the starting address of the map).   this case is
                    516:         *      for kernel object mappings where we don't know the offset until
                    517:         *      the virtual address is found (with uvm_map_findspace).   the
                    518:         *      offset is the distance we are from the start of the map.
                    519:         */
                    520:
                    521:        if (uobj == NULL) {
                    522:                uoffset = 0;
                    523:        } else {
                    524:                if (uoffset == UVM_UNKNOWN_OFFSET) {
1.8       chuck     525: #ifdef DIAGNOSTIC
1.10      mrg       526:                        if (uobj->uo_refs != UVM_OBJ_KERN)
1.8       chuck     527:        panic("uvm_map: unknown offset with non-kernel object");
                    528: #endif
1.10      mrg       529:                        uoffset = *startp - vm_map_min(kernel_map);
                    530:                }
                    531:        }
                    532:
                    533:        /*
                    534:         * step 2: try and insert in map by extending previous entry, if
                    535:         * possible
                    536:         * XXX: we don't try and pull back the next entry.   might be useful
                    537:         * for a stack, but we are currently allocating our stack in advance.
                    538:         */
                    539:
                    540:        if ((flags & UVM_FLAG_NOMERGE) == 0 &&
                    541:            prev_entry->end == *startp && prev_entry != &map->header &&
                    542:            prev_entry->object.uvm_obj == uobj) {
                    543:
                    544:                if (uobj && prev_entry->offset +
                    545:                    (prev_entry->end - prev_entry->start) != uoffset)
                    546:                        goto step3;
                    547:
                    548:                if (UVM_ET_ISMAP(prev_entry))
                    549:                        goto step3;
                    550:
                    551:                if (prev_entry->protection != prot ||
                    552:                    prev_entry->max_protection != maxprot)
                    553:                        goto step3;
                    554:
                    555:                if (prev_entry->inheritance != inherit ||
                    556:                    prev_entry->advice != advice)
                    557:                        goto step3;
                    558:
                    559:                /* wired_count's must match (new area is unwired) */
                    560:                if (prev_entry->wired_count)
                    561:                        goto step3;
                    562:
                    563:                /*
                    564:                 * can't extend a shared amap.  note: no need to lock amap to
                    565:                 * look at am_ref since we don't care about its exact value.
                    566:                 * if it is one (i.e. we have only reference) it will stay there
                    567:                 */
                    568:
                    569:                if (prev_entry->aref.ar_amap &&
                    570:                    prev_entry->aref.ar_amap->am_ref != 1) {
                    571:                        goto step3;
                    572:                }
                    573:
                    574:                /* got it! */
                    575:
                    576:                UVMCNT_INCR(map_backmerge);
                    577:                UVMHIST_LOG(maphist,"  starting back merge", 0, 0, 0, 0);
                    578:
                    579:                /*
                    580:                 * drop our reference to uobj since we are extending a reference
                    581:                 * that we already have (the ref count can not drop to zero).
                    582:                 */
                    583:                if (uobj && uobj->pgops->pgo_detach)
                    584:                        uobj->pgops->pgo_detach(uobj);
                    585:
                    586:                if (prev_entry->aref.ar_amap) {
                    587:                        amap_extend(prev_entry, size);
                    588:                }
                    589:
                    590:                prev_entry->end += size;
                    591:                map->size += size;
                    592:
                    593:                UVMHIST_LOG(maphist,"<- done (via backmerge)!", 0, 0, 0, 0);
                    594:                vm_map_unlock(map);
                    595:                return (KERN_SUCCESS);
                    596:
1.1       mrg       597:        }
1.10      mrg       598: step3:
                    599:        UVMHIST_LOG(maphist,"  allocating new map entry", 0, 0, 0, 0);
                    600:
                    601:        /*
                    602:         * check for possible forward merge (which we don't do) and count
                    603:         * the number of times we missed a *possible* chance to merge more
                    604:         */
1.1       mrg       605:
1.10      mrg       606:        if ((flags & UVM_FLAG_NOMERGE) == 0 &&
                    607:            prev_entry->next != &map->header &&
                    608:            prev_entry->next->start == (*startp + size))
                    609:                UVMCNT_INCR(map_forwmerge);
1.1       mrg       610:
                    611:        /*
1.10      mrg       612:         * step 3: allocate new entry and link it in
1.1       mrg       613:         */
                    614:
1.10      mrg       615:        new_entry = uvm_mapent_alloc(map);
                    616:        new_entry->start = *startp;
                    617:        new_entry->end = new_entry->start + size;
                    618:        new_entry->object.uvm_obj = uobj;
                    619:        new_entry->offset = uoffset;
                    620:
                    621:        if (uobj)
                    622:                new_entry->etype = UVM_ET_OBJ;
                    623:        else
                    624:                new_entry->etype = 0;
                    625:
                    626:        if (flags & UVM_FLAG_COPYONW) {
                    627:                new_entry->etype |= UVM_ET_COPYONWRITE;
                    628:                if ((flags & UVM_FLAG_OVERLAY) == 0)
                    629:                        new_entry->etype |= UVM_ET_NEEDSCOPY;
                    630:        }
                    631:
                    632:        new_entry->protection = prot;
                    633:        new_entry->max_protection = maxprot;
                    634:        new_entry->inheritance = inherit;
                    635:        new_entry->wired_count = 0;
                    636:        new_entry->advice = advice;
                    637:        if (flags & UVM_FLAG_OVERLAY) {
                    638:                /*
                    639:                 * to_add: for BSS we overallocate a little since we
                    640:                 * are likely to extend
                    641:                 */
1.22.2.1  eeh       642:                vaddr_t to_add = (flags & UVM_FLAG_AMAPPAD) ?
1.10      mrg       643:                        UVM_AMAP_CHUNK * PAGE_SIZE : 0;
                    644:                struct vm_amap *amap = amap_alloc(size, to_add, M_WAITOK);
                    645:                new_entry->aref.ar_slotoff = 0;
                    646:                new_entry->aref.ar_amap = amap;
                    647:        } else {
                    648:                new_entry->aref.ar_amap = NULL;
1.1       mrg       649:        }
                    650:
1.10      mrg       651:        uvm_map_entry_link(map, prev_entry, new_entry);
                    652:
1.1       mrg       653:        map->size += size;
                    654:
1.10      mrg       655:        /*
                    656:         *      Update the free space hint
                    657:         */
                    658:
                    659:        if ((map->first_free == prev_entry) &&
                    660:            (prev_entry->end >= new_entry->start))
                    661:                map->first_free = new_entry;
                    662:
                    663:        UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
1.1       mrg       664:        vm_map_unlock(map);
                    665:        return(KERN_SUCCESS);
                    666: }
                    667:
                    668: /*
                    669:  * uvm_map_lookup_entry: find map entry at or before an address
                    670:  *
                    671:  * => map must at least be read-locked by caller
                    672:  * => entry is returned in "entry"
                    673:  * => return value is true if address is in the returned entry
                    674:  */
                    675:
1.10      mrg       676: boolean_t
                    677: uvm_map_lookup_entry(map, address, entry)
                    678:        register vm_map_t       map;
1.22.2.1  eeh       679:        register vaddr_t        address;
1.10      mrg       680:        vm_map_entry_t          *entry;         /* OUT */
1.1       mrg       681: {
                    682:        register vm_map_entry_t         cur;
                    683:        register vm_map_entry_t         last;
                    684:        UVMHIST_FUNC("uvm_map_lookup_entry");
                    685:        UVMHIST_CALLED(maphist);
                    686:
                    687:        UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)",
1.10      mrg       688:            map, address, entry, 0);
1.1       mrg       689:
                    690:        /*
1.10      mrg       691:         * start looking either from the head of the
                    692:         * list, or from the hint.
1.1       mrg       693:         */
                    694:
                    695:        simple_lock(&map->hint_lock);
                    696:        cur = map->hint;
                    697:        simple_unlock(&map->hint_lock);
                    698:
                    699:        if (cur == &map->header)
                    700:                cur = cur->next;
                    701:
                    702:        UVMCNT_INCR(uvm_mlk_call);
                    703:        if (address >= cur->start) {
                    704:                /*
1.10      mrg       705:                 * go from hint to end of list.
1.1       mrg       706:                 *
1.10      mrg       707:                 * but first, make a quick check to see if
                    708:                 * we are already looking at the entry we
                    709:                 * want (which is usually the case).
                    710:                 * note also that we don't need to save the hint
                    711:                 * here... it is the same hint (unless we are
                    712:                 * at the header, in which case the hint didn't
                    713:                 * buy us anything anyway).
1.1       mrg       714:                 */
                    715:                last = &map->header;
                    716:                if ((cur != last) && (cur->end > address)) {
                    717:                        UVMCNT_INCR(uvm_mlk_hint);
                    718:                        *entry = cur;
                    719:                        UVMHIST_LOG(maphist,"<- got it via hint (0x%x)",
1.10      mrg       720:                            cur, 0, 0, 0);
                    721:                        return (TRUE);
1.1       mrg       722:                }
1.10      mrg       723:        } else {
1.1       mrg       724:                /*
1.10      mrg       725:                 * go from start to hint, *inclusively*
1.1       mrg       726:                 */
                    727:                last = cur->next;
                    728:                cur = map->header.next;
                    729:        }
                    730:
                    731:        /*
1.10      mrg       732:         * search linearly
1.1       mrg       733:         */
                    734:
                    735:        while (cur != last) {
                    736:                if (cur->end > address) {
                    737:                        if (address >= cur->start) {
                    738:                                /*
1.10      mrg       739:                                 * save this lookup for future
                    740:                                 * hints, and return
1.1       mrg       741:                                 */
                    742:
                    743:                                *entry = cur;
                    744:                                SAVE_HINT(map, cur);
                    745:                                UVMHIST_LOG(maphist,"<- search got it (0x%x)",
1.10      mrg       746:                                        cur, 0, 0, 0);
                    747:                                return (TRUE);
1.1       mrg       748:                        }
                    749:                        break;
                    750:                }
                    751:                cur = cur->next;
                    752:        }
                    753:        *entry = cur->prev;
                    754:        SAVE_HINT(map, *entry);
                    755:        UVMHIST_LOG(maphist,"<- failed!",0,0,0,0);
1.10      mrg       756:        return (FALSE);
1.1       mrg       757: }
                    758:
                    759:
                    760: /*
                    761:  * uvm_map_findspace: find "length" sized space in "map".
                    762:  *
                    763:  * => "hint" is a hint about where we want it, unless fixed is true
                    764:  *     (in which case we insist on using "hint").
                    765:  * => "result" is VA returned
                    766:  * => uobj/uoffset are to be used to handle VAC alignment, if required
                    767:  * => caller must at least have read-locked map
                    768:  * => returns NULL on failure, or pointer to prev. map entry if success
                    769:  * => note this is a cross between the old vm_map_findspace and vm_map_find
                    770:  */
                    771:
1.10      mrg       772: vm_map_entry_t
                    773: uvm_map_findspace(map, hint, length, result, uobj, uoffset, fixed)
                    774:        vm_map_t map;
1.22.2.1  eeh       775:        vaddr_t hint;
                    776:        vsize_t length;
                    777:        vaddr_t *result; /* OUT */
1.10      mrg       778:        struct uvm_object *uobj;
1.22.2.1  eeh       779:        vaddr_t uoffset;
1.10      mrg       780:        boolean_t fixed;
1.1       mrg       781: {
                    782:        vm_map_entry_t entry, next, tmp;
1.22.2.1  eeh       783:        vaddr_t end;
1.1       mrg       784:        UVMHIST_FUNC("uvm_map_findspace");
                    785:        UVMHIST_CALLED(maphist);
                    786:
                    787:        UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, fixed=%d)",
                    788:                map, hint, length, fixed);
                    789:
                    790:        if (hint < map->min_offset) {   /* check ranges ... */
                    791:                if (fixed) {
                    792:                        UVMHIST_LOG(maphist,"<- VA below map range",0,0,0,0);
                    793:                        return(NULL);
                    794:                }
                    795:                hint = map->min_offset;
                    796:        }
                    797:        if (hint > map->max_offset) {
                    798:                UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]",
                    799:                                hint, map->min_offset, map->max_offset, 0);
                    800:                return(NULL);
                    801:        }
                    802:
                    803:        /*
                    804:         * Look for the first possible address; if there's already
                    805:         * something at this address, we have to start after it.
                    806:         */
                    807:
                    808:        if (!fixed && hint == map->min_offset) {
                    809:                if ((entry = map->first_free) != &map->header)
                    810:                        hint = entry->end;
                    811:        } else {
                    812:                if (uvm_map_lookup_entry(map, hint, &tmp)) {
                    813:                        /* "hint" address already in use ... */
                    814:                        if (fixed) {
                    815:                                UVMHIST_LOG(maphist,"<- fixed & VA in use",
1.10      mrg       816:                                    0, 0, 0, 0);
1.1       mrg       817:                                return(NULL);
                    818:                        }
                    819:                        hint = tmp->end;
                    820:                }
                    821:                entry = tmp;
                    822:        }
                    823:
                    824:        /*
                    825:         * Look through the rest of the map, trying to fit a new region in
                    826:         * the gap between existing regions, or after the very last region.
                    827:         * note: entry->end   = base VA of current gap,
                    828:         *       next->start  = VA of end of current gap
                    829:         */
                    830:        for (;; hint = (entry = next)->end) {
                    831:                /*
                    832:                 * Find the end of the proposed new region.  Be sure we didn't
                    833:                 * go beyond the end of the map, or wrap around the address;
                    834:                 * if so, we lose.  Otherwise, if this is the last entry, or
                    835:                 * if the proposed new region fits before the next entry, we
                    836:                 * win.
                    837:                 */
                    838:
                    839: #ifdef PMAP_PREFER
                    840:                /*
                    841:                 * push hint forward as needed to avoid VAC alias problems.
                    842:                 * we only do this if a valid offset is specified.
                    843:                 */
                    844:                if (!fixed && uoffset != UVM_UNKNOWN_OFFSET)
                    845:                  PMAP_PREFER(uoffset, &hint);
                    846: #endif
                    847:                end = hint + length;
                    848:                if (end > map->max_offset || end < hint) {
                    849:                        UVMHIST_LOG(maphist,"<- failed (off end)", 0,0,0,0);
                    850:                        return (NULL);
                    851:                }
                    852:                next = entry->next;
                    853:                if (next == &map->header || next->start >= end)
                    854:                        break;
                    855:                if (fixed) {
                    856:                        UVMHIST_LOG(maphist,"<- fixed mapping failed", 0,0,0,0);
                    857:                        return(NULL); /* only one shot at it ... */
                    858:                }
                    859:        }
                    860:        SAVE_HINT(map, entry);
                    861:        *result = hint;
                    862:        UVMHIST_LOG(maphist,"<- got it!  (result=0x%x)", hint, 0,0,0);
                    863:        return (entry);
                    864: }
                    865:
                    866: /*
                    867:  *   U N M A P   -   m a i n   h e l p e r   f u n c t i o n s
                    868:  */
                    869:
                    870: /*
                    871:  * uvm_unmap_remove: remove mappings from a vm_map (from "start" up to "stop")
                    872:  *
                    873:  * => caller must check alignment and size
                    874:  * => map must be locked by caller
                    875:  * => if the "start"/"stop" range lie within a mapping of a share map,
                    876:  *    then the unmap takes place within the context of that share map
                    877:  *    rather than in the main map, unless the "mainonly" flag is set.
                    878:  *    (e.g. the "exit" system call would want to set "mainonly").
                    879:  * => we return a list of map entries that we've remove from the map
                    880:  *    in "entry_list"
                    881:  */
                    882:
1.10      mrg       883: int
                    884: uvm_unmap_remove(map, start, end, mainonly, entry_list)
                    885:        vm_map_t map;
1.22.2.1  eeh       886:        vaddr_t start,end;
1.10      mrg       887:        boolean_t mainonly;
                    888:        vm_map_entry_t *entry_list;     /* OUT */
                    889: {
                    890:        int result, refs;
                    891:        vm_map_entry_t entry, first_entry, next;
1.22.2.1  eeh       892:        vaddr_t len;
1.10      mrg       893:        boolean_t already_removed;
                    894:        struct uvm_object *uobj;
                    895:        UVMHIST_FUNC("uvm_unmap_remove");
                    896:        UVMHIST_CALLED(maphist);
                    897:
                    898:        UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)",
                    899:            map, start, end, 0);
                    900:
                    901:        VM_MAP_RANGE_CHECK(map, start, end);
                    902:
                    903:        /*
                    904:         * find first entry
                    905:         */
                    906:        if (uvm_map_lookup_entry(map, start, &first_entry) == TRUE) {
                    907:                /*
                    908:                 * start lies within a mapped region.   first check to see if
                    909:                 * it is within a sharemap (in which case we recurse and unmap
                    910:                 * within the context of the share map).
                    911:                 */
                    912:                if (UVM_ET_ISMAP(first_entry) &&
                    913:                    !UVM_ET_ISSUBMAP(first_entry) &&
                    914:                    mainonly == 0 && end <= first_entry->end) {
                    915:                        /*
                    916:                         * is a share map and in range ...
                    917:                         * XXX: do address transforms if share VA's != main VA's
                    918:                         * note: main map kept locked during share map unlock
                    919:                         */
                    920:                        result = uvm_unmap(first_entry->object.share_map,
                    921:                            start, end, 0);
                    922:                        *entry_list = NULL;
                    923:                        return(result);
                    924:                }
                    925:                /* non-share map: clip and go... */
                    926:                entry = first_entry;
                    927:                UVM_MAP_CLIP_START(map, entry, start);
                    928:                /* critical!  prevents stale hint */
                    929:                SAVE_HINT(map, entry->prev);
                    930:
                    931:        } else {
                    932:                entry = first_entry->next;
                    933:        }
                    934:
                    935:        /*
                    936:         * Save the free space hint
                    937:         */
                    938:
                    939:        if (map->first_free->start >= start)
                    940:                map->first_free = entry->prev;
                    941:
                    942:        /*
                    943:         * note: we now re-use first_entry for a different task.  we remove
                    944:         * a number of map entries from the map and save them in a linked
                    945:         * list headed by "first_entry".  once we remove them from the map
                    946:         * the caller should unlock the map and drop the references to the
                    947:         * backing objects [c.f. uvm_unmap_detach].  the object is to
                    948:         * seperate unmapping from reference dropping.  why?
                    949:         *   [1] the map has to be locked for unmapping
                    950:         *   [2] the map need not be locked for reference dropping
                    951:         *   [3] dropping references may trigger pager I/O, and if we hit
                    952:         *       a pager that does synchronous I/O we may have to wait for it.
                    953:         *   [4] we would like all waiting for I/O to occur with maps unlocked
                    954:         *       so that we don't block other threads.
                    955:         */
                    956:        first_entry = NULL;
                    957:        *entry_list = NULL;             /* to be safe */
                    958:
                    959:        /*
                    960:         * break up the area into map entry sized regions and unmap.  note
                    961:         * that all mappings have to be removed before we can even consider
                    962:         * dropping references to amaps or VM objects (otherwise we could end
                    963:         * up with a mapping to a page on the free list which would be very bad)
                    964:         */
                    965:
                    966:        while ((entry != &map->header) && (entry->start < end)) {
                    967:
                    968:                UVM_MAP_CLIP_END(map, entry, end);
                    969:                next = entry->next;
                    970:                len = entry->end - entry->start;
                    971:
                    972:                /*
                    973:                 * unwire before removing addresses from the pmap; otherwise
                    974:                 * unwiring will put the entries back into the pmap (XXX).
                    975:                 */
1.1       mrg       976:
1.10      mrg       977:                if (entry->wired_count)
                    978:                        uvm_map_entry_unwire(map, entry);
                    979:
                    980:                /*
                    981:                 * special case: handle mappings to anonymous kernel objects.
                    982:                 * we want to free these pages right away...
                    983:                 */
                    984:                if (UVM_ET_ISOBJ(entry) &&
                    985:                    entry->object.uvm_obj->uo_refs == UVM_OBJ_KERN) {
1.1       mrg       986:
                    987: #ifdef DIAGNOSTIC
1.10      mrg       988:                        if (vm_map_pmap(map) != pmap_kernel())
1.1       mrg       989:        panic("uvm_unmap_remove: kernel object mapped by non-kernel map");
                    990: #endif
                    991:
1.10      mrg       992:                        /*
                    993:                         * note: kernel object mappings are currently used in
                    994:                         * two ways:
                    995:                         *  [1] "normal" mappings of pages in the kernel object
                    996:                         *  [2] uvm_km_valloc'd allocations in which we
                    997:                         *      pmap_enter in some non-kernel-object page
                    998:                         *      (e.g. vmapbuf).
                    999:                         *
                   1000:                         * for case [1], we need to remove the mapping from
                   1001:                         * the pmap and then remove the page from the kernel
                   1002:                         * object (because, once pages in a kernel object are
                   1003:                         * unmapped they are no longer needed, unlike, say,
                   1004:                         * a vnode where you might want the data to persist
                   1005:                         * until flushed out of a queue).
                   1006:                         *
                   1007:                         * for case [2], we need to remove the mapping from
                   1008:                         * the pmap.  there shouldn't be any pages at the
                   1009:                         * specified offset in the kernel object [but it
                   1010:                         * doesn't hurt to call uvm_km_pgremove just to be
                   1011:                         * safe?]
                   1012:                         *
                   1013:                         * uvm_km_pgremove currently does the following:
                   1014:                         *   for pages in the kernel object in range:
                   1015:                         *     - pmap_page_protect them out of all pmaps
                   1016:                         *     - uvm_pagefree the page
                   1017:                         *
                   1018:                         * note that in case [1] the pmap_page_protect call
                   1019:                         * in uvm_km_pgremove may very well be redundant
                   1020:                         * because we have already removed the mappings
                   1021:                         * beforehand with pmap_remove (or pmap_kremove).
                   1022:                         * in the PMAP_NEW case, the pmap_page_protect call
                   1023:                         * may not do anything, since PMAP_NEW allows the
                   1024:                         * kernel to enter/remove kernel mappings without
                   1025:                         * bothing to keep track of the mappings (e.g. via
                   1026:                         * pv_entry lists).    XXX: because of this, in the
                   1027:                         * future we should consider removing the
                   1028:                         * pmap_page_protect from uvm_km_pgremove some time
                   1029:                         * in the future.
                   1030:                         */
                   1031:
                   1032:                        /*
                   1033:                         * remove mappings from pmap
                   1034:                         */
1.1       mrg      1035: #if defined(PMAP_NEW)
1.10      mrg      1036:                        pmap_kremove(entry->start, len);
1.1       mrg      1037: #else
1.10      mrg      1038:                        pmap_remove(pmap_kernel(), entry->start,
                   1039:                            entry->start+len);
1.1       mrg      1040: #endif
                   1041:
1.10      mrg      1042:                        /*
                   1043:                         * remove pages from a kernel object (offsets are
                   1044:                         * always relative to vm_map_min(kernel_map)).
                   1045:                         */
                   1046:                        uvm_km_pgremove(entry->object.uvm_obj,
                   1047:                        entry->start - vm_map_min(kernel_map),
                   1048:                        entry->end - vm_map_min(kernel_map));
                   1049:
                   1050:                        already_removed = TRUE;
                   1051:
                   1052:                        /*
                   1053:                         * null out kernel_object reference, we've just
                   1054:                         * dropped it
                   1055:                         */
                   1056:                        entry->etype &= ~UVM_ET_OBJ;
                   1057:                        entry->object.uvm_obj = NULL;   /* to be safe */
                   1058:
                   1059:                } else
                   1060:                        already_removed = FALSE;
                   1061:
                   1062:                /*
                   1063:                 * remove mappings now.   for sharemaps, check to see if
                   1064:                 * the reference count is one (i.e. not being shared right
                   1065:                 * now).   if so, use the cheaper pmap_remove() rather than
                   1066:                 * the more expensive share_protect functions.
                   1067:                 */
                   1068:
                   1069:                if (!map->is_main_map) {
                   1070:                        simple_lock(&map->ref_lock);
                   1071:                        refs = map->ref_count;
                   1072:                        simple_unlock(&map->ref_lock);
                   1073:                }
1.1       mrg      1074: #if defined(sparc)
1.10      mrg      1075:                else
                   1076:                        refs = 0; /* XXX: gcc */
1.1       mrg      1077: #endif
                   1078:
1.10      mrg      1079:                if (map->is_main_map || (!map->is_main_map && refs == 1)) {
                   1080:                        if (!already_removed)
                   1081:                                pmap_remove(map->pmap, entry->start,
                   1082:                                    entry->end);
                   1083:                } else {
                   1084:                        /* share map... must remove all mappings */
                   1085:                        if (entry->aref.ar_amap) {
                   1086:                                simple_lock(&entry->aref.ar_amap->am_l);
                   1087:                                amap_share_protect(entry, VM_PROT_NONE);
                   1088:                                simple_unlock(&entry->aref.ar_amap->am_l);
                   1089:                        }
                   1090:                        if (UVM_ET_ISOBJ(entry)) {
                   1091:                                uobj = entry->object.uvm_obj;
                   1092:                                simple_lock(&uobj->vmobjlock);
                   1093:                                uobj->pgops->pgo_shareprot(entry, VM_PROT_NONE);
                   1094:                                simple_unlock(&uobj->vmobjlock);
                   1095:                        }
                   1096:                }
                   1097:
                   1098:                /*
                   1099:                 * remove from map and put it on our list of entries that
                   1100:                 * we've nuked.  then go do next entry.
                   1101:                 */
                   1102:                UVMHIST_LOG(maphist, "  removed map entry 0x%x", entry, 0, 0,0);
                   1103:                uvm_map_entry_unlink(map, entry);
                   1104:                map->size -= len;
                   1105:                entry->next = first_entry;
                   1106:                first_entry = entry;
                   1107:                entry = next;           /* next entry, please */
                   1108:        }
                   1109:
                   1110:        /*
                   1111:         * now we've cleaned up the map and are ready for the caller to drop
                   1112:         * references to the mapped objects.
                   1113:         */
                   1114:
                   1115:        *entry_list = first_entry;
                   1116:        UVMHIST_LOG(maphist,"<- done!", 0, 0, 0, 0);
                   1117:        return(KERN_SUCCESS);
1.1       mrg      1118: }
                   1119:
                   1120: /*
                   1121:  * uvm_unmap_detach: drop references in a chain of map entries
                   1122:  *
                   1123:  * => we will free the map entries as we traverse the list.
                   1124:  */
                   1125:
1.10      mrg      1126: void
                   1127: uvm_unmap_detach(first_entry, amap_unref_flags)
                   1128:        vm_map_entry_t first_entry;
                   1129:        int amap_unref_flags;
1.1       mrg      1130: {
1.10      mrg      1131:        vm_map_entry_t next_entry;
                   1132:        UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
1.1       mrg      1133:
1.10      mrg      1134:        while (first_entry) {
1.1       mrg      1135:
                   1136: #ifdef DIAGNOSTIC
1.10      mrg      1137:                /*
                   1138:                 * sanity check
                   1139:                 */
                   1140:                /* was part of vm_map_entry_delete() */
                   1141:                if (first_entry->wired_count)
                   1142:                        panic("unmap: still wired!");
1.1       mrg      1143: #endif
                   1144:
1.10      mrg      1145:                UVMHIST_LOG(maphist,
                   1146:                    "  detach 0x%x: amap=0x%x, obj=0x%x, map?=%d", first_entry,
                   1147:                    first_entry->aref.ar_amap, first_entry->object.uvm_obj,
1.1       mrg      1148:                UVM_ET_ISMAP(first_entry));
                   1149:
1.10      mrg      1150:                /*
                   1151:                 * drop reference to amap, if we've got one
                   1152:                 */
                   1153:
                   1154:                if (first_entry->aref.ar_amap)
                   1155:                        amap_unref(first_entry, amap_unref_flags);
                   1156:
                   1157:                /*
                   1158:                 * drop reference to our backing object, if we've got one
                   1159:                 */
                   1160:
                   1161:                if (UVM_ET_ISMAP(first_entry)) {
                   1162:                        uvm_map_deallocate(first_entry->object.share_map);
                   1163:                } else {
                   1164:                        if (UVM_ET_ISOBJ(first_entry) &&
                   1165:                            first_entry->object.uvm_obj->pgops->pgo_detach)
                   1166:                                first_entry->object.uvm_obj->pgops->
                   1167:                                    pgo_detach(first_entry->object.uvm_obj);
                   1168:                }
                   1169:
                   1170:                /*
                   1171:                 * next entry
                   1172:                 */
                   1173:                next_entry = first_entry->next;
                   1174:                uvm_mapent_free(first_entry);
                   1175:                first_entry = next_entry;
                   1176:        }
                   1177:
                   1178:        /*
                   1179:         * done!
                   1180:         */
                   1181:        UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
                   1182:        return;
1.1       mrg      1183: }
                   1184:
                   1185: /*
                   1186:  *   E X T R A C T I O N   F U N C T I O N S
                   1187:  */
                   1188:
                   1189: /*
                   1190:  * uvm_map_reserve: reserve space in a vm_map for future use.
                   1191:  *
                   1192:  * => we reserve space in a map by putting a dummy map entry in the
                   1193:  *    map (dummy means obj=NULL, amap=NULL, prot=VM_PROT_NONE)
                   1194:  * => map should be unlocked (we will write lock it)
                   1195:  * => we return true if we were able to reserve space
                   1196:  * => XXXCDC: should be inline?
                   1197:  */
                   1198:
1.10      mrg      1199: int
                   1200: uvm_map_reserve(map, size, offset, raddr)
                   1201:        vm_map_t map;
1.22.2.1  eeh      1202:        vsize_t size;
                   1203:        vaddr_t offset;    /* hint for pmap_prefer */
                   1204:        vaddr_t *raddr; /* OUT: reserved VA */
1.1       mrg      1205: {
1.10      mrg      1206:        UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
1.1       mrg      1207:
1.10      mrg      1208:        UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)",
1.1       mrg      1209:              map,size,offset,raddr);
                   1210:
1.10      mrg      1211:        size = round_page(size);
                   1212:        if (*raddr < vm_map_min(map))
                   1213:                *raddr = vm_map_min(map);                /* hint */
1.1       mrg      1214:
1.10      mrg      1215:        /*
                   1216:         * reserve some virtual space.
                   1217:         */
1.1       mrg      1218:
1.10      mrg      1219:        if (uvm_map(map, raddr, size, NULL, offset,
                   1220:            UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
                   1221:            UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)) != KERN_SUCCESS) {
                   1222:            UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
                   1223:                return (FALSE);
                   1224:        }
                   1225:
                   1226:        UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0);
                   1227:        return (TRUE);
1.1       mrg      1228: }
                   1229:
                   1230: /*
                   1231:  * uvm_map_replace: replace a reserved (blank) area of memory with
                   1232:  * real mappings.
                   1233:  *
                   1234:  * => caller must WRITE-LOCK the map
                   1235:  * => we return TRUE if replacement was a success
                   1236:  * => we expect the newents chain to have nnewents entrys on it and
                   1237:  *    we expect newents->prev to point to the last entry on the list
                   1238:  * => note newents is allowed to be NULL
                   1239:  */
                   1240:
1.10      mrg      1241: int
                   1242: uvm_map_replace(map, start, end, newents, nnewents)
                   1243:        struct vm_map *map;
1.22.2.1  eeh      1244:        vaddr_t start, end;
1.10      mrg      1245:        vm_map_entry_t newents;
                   1246:        int nnewents;
                   1247: {
                   1248:        vm_map_entry_t oldent, last;
                   1249:        UVMHIST_FUNC("uvm_map_replace");
                   1250:        UVMHIST_CALLED(maphist);
1.1       mrg      1251:
1.10      mrg      1252:        /*
                   1253:         * first find the blank map entry at the specified address
                   1254:         */
                   1255:
                   1256:        if (!uvm_map_lookup_entry(map, start, &oldent)) {
                   1257:                return(FALSE);
                   1258:        }
                   1259:
                   1260:        /*
                   1261:         * check to make sure we have a proper blank entry
                   1262:         */
1.1       mrg      1263:
1.10      mrg      1264:        if (oldent->start != start || oldent->end != end ||
                   1265:            oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
                   1266:                return (FALSE);
                   1267:        }
1.1       mrg      1268:
                   1269: #ifdef DIAGNOSTIC
1.10      mrg      1270:        /*
                   1271:         * sanity check the newents chain
                   1272:         */
                   1273:        {
                   1274:                vm_map_entry_t tmpent = newents;
                   1275:                int nent = 0;
1.22.2.1  eeh      1276:                vaddr_t cur = start;
1.10      mrg      1277:
                   1278:                while (tmpent) {
                   1279:                        nent++;
                   1280:                        if (tmpent->start < cur)
                   1281:                                panic("uvm_map_replace1");
                   1282:                        if (tmpent->start > tmpent->end || tmpent->end > end) {
                   1283:                printf("tmpent->start=0x%lx, tmpent->end=0x%lx, end=0x%lx\n",
                   1284:                            tmpent->start, tmpent->end, end);
                   1285:                                panic("uvm_map_replace2");
                   1286:                        }
                   1287:                        cur = tmpent->end;
                   1288:                        if (tmpent->next) {
                   1289:                                if (tmpent->next->prev != tmpent)
                   1290:                                        panic("uvm_map_replace3");
                   1291:                        } else {
                   1292:                                if (newents->prev != tmpent)
                   1293:                                        panic("uvm_map_replace4");
                   1294:                        }
                   1295:                        tmpent = tmpent->next;
                   1296:                }
                   1297:                if (nent != nnewents)
                   1298:                        panic("uvm_map_replace5");
                   1299:        }
                   1300: #endif
                   1301:
                   1302:        /*
                   1303:         * map entry is a valid blank!   replace it.   (this does all the
                   1304:         * work of map entry link/unlink...).
                   1305:         */
                   1306:
                   1307:        if (newents) {
                   1308:
                   1309:                last = newents->prev;           /* we expect this */
                   1310:
                   1311:                /* critical: flush stale hints out of map */
                   1312:                SAVE_HINT(map, newents);
                   1313:                if (map->first_free == oldent)
                   1314:                        map->first_free = last;
                   1315:
                   1316:                last->next = oldent->next;
                   1317:                last->next->prev = last;
                   1318:                newents->prev = oldent->prev;
                   1319:                newents->prev->next = newents;
                   1320:                map->nentries = map->nentries + (nnewents - 1);
                   1321:
                   1322:        } else {
                   1323:
                   1324:                /* critical: flush stale hints out of map */
                   1325:                SAVE_HINT(map, oldent->prev);
                   1326:                if (map->first_free == oldent)
                   1327:                        map->first_free = oldent->prev;
                   1328:
                   1329:                /* NULL list of new entries: just remove the old one */
                   1330:                uvm_map_entry_unlink(map, oldent);
                   1331:        }
                   1332:
                   1333:
                   1334:        /*
                   1335:         * now we can free the old blank entry, unlock the map and return.
                   1336:         */
1.1       mrg      1337:
1.10      mrg      1338:        uvm_mapent_free(oldent);
                   1339:        return(TRUE);
1.1       mrg      1340: }
                   1341:
                   1342: /*
                   1343:  * uvm_map_extract: extract a mapping from a map and put it somewhere
                   1344:  *     (maybe removing the old mapping)
                   1345:  *
                   1346:  * => maps should be unlocked (we will write lock them)
                   1347:  * => returns 0 on success, error code otherwise
                   1348:  * => start must be page aligned
                   1349:  * => len must be page sized
                   1350:  * => flags:
                   1351:  *      UVM_EXTRACT_REMOVE: remove mappings from srcmap
                   1352:  *      UVM_EXTRACT_CONTIG: abort if unmapped area (advisory only)
                   1353:  *      UVM_EXTRACT_QREF: for a temporary extraction do quick obj refs
                   1354:  *      UVM_EXTRACT_FIXPROT: set prot to maxprot as we go
                   1355:  *    >>>NOTE: if you set REMOVE, you are not allowed to use CONTIG or QREF!<<<
                   1356:  *    >>>NOTE: QREF's must be unmapped via the QREF path, thus should only
                   1357:  *             be used from within the kernel in a kernel level map <<<
                   1358:  */
                   1359:
1.10      mrg      1360: int
                   1361: uvm_map_extract(srcmap, start, len, dstmap, dstaddrp, flags)
                   1362:        vm_map_t srcmap, dstmap;
1.22.2.1  eeh      1363:        vaddr_t start, *dstaddrp;
                   1364:        vsize_t len;
1.10      mrg      1365:        int flags;
                   1366: {
1.22.2.1  eeh      1367:        vaddr_t dstaddr, end, newend, oldoffset, fudge, orig_fudge,
1.10      mrg      1368:            oldstart;
                   1369:        vm_map_entry_t chain, endchain, entry, orig_entry, newentry, deadentry;
1.20      chuck    1370:        vm_map_entry_t oldentry;
1.22.2.1  eeh      1371:        vsize_t elen;
1.10      mrg      1372:        int nchain, error, copy_ok;
                   1373:        UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
                   1374:        UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start,
                   1375:            len,0);
                   1376:        UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0);
                   1377:
                   1378: #ifdef DIAGNOSTIC
                   1379:        /*
                   1380:         * step 0: sanity check: start must be on a page boundary, length
                   1381:         * must be page sized.  can't ask for CONTIG/QREF if you asked for
                   1382:         * REMOVE.
                   1383:         */
                   1384:        if ((start & PAGE_MASK) || (len & PAGE_MASK))
                   1385:                panic("uvm_map_extract1");
                   1386:        if (flags & UVM_EXTRACT_REMOVE)
                   1387:                if (flags & (UVM_EXTRACT_CONTIG|UVM_EXTRACT_QREF))
                   1388:                        panic("uvm_map_extract2");
                   1389: #endif
                   1390:
                   1391:
                   1392:        /*
                   1393:         * step 1: reserve space in the target map for the extracted area
                   1394:         */
                   1395:
                   1396:        dstaddr = *dstaddrp;
                   1397:        if (uvm_map_reserve(dstmap, len, start, &dstaddr) == FALSE)
                   1398:                return(ENOMEM);
                   1399:        *dstaddrp = dstaddr;    /* pass address back to caller */
                   1400:        UVMHIST_LOG(maphist, "  dstaddr=0x%x", dstaddr,0,0,0);
                   1401:
                   1402:
                   1403:        /*
                   1404:         * step 2: setup for the extraction process loop by init'ing the
                   1405:         * map entry chain, locking src map, and looking up the first useful
                   1406:         * entry in the map.
                   1407:         */
1.1       mrg      1408:
1.10      mrg      1409:        end = start + len;
                   1410:        newend = dstaddr + len;
                   1411:        chain = endchain = NULL;
                   1412:        nchain = 0;
                   1413:        vm_map_lock(srcmap);
                   1414:
                   1415:        if (uvm_map_lookup_entry(srcmap, start, &entry)) {
                   1416:
                   1417:                /* "start" is within an entry */
                   1418:                if (flags & UVM_EXTRACT_QREF) {
                   1419:                        /*
                   1420:                         * for quick references we don't clip the entry, so
                   1421:                         * the entry may map space "before" the starting
                   1422:                         * virtual address... this is the "fudge" factor
                   1423:                         * (which can be non-zero only the first time
                   1424:                         * through the "while" loop in step 3).
                   1425:                         */
                   1426:                        fudge = start - entry->start;
                   1427:                } else {
                   1428:                        /*
                   1429:                         * normal reference: we clip the map to fit (thus
                   1430:                         * fudge is zero)
                   1431:                         */
                   1432:                        UVM_MAP_CLIP_START(srcmap, entry, start);
                   1433:                        SAVE_HINT(srcmap, entry->prev);
                   1434:                        fudge = 0;
                   1435:                }
1.1       mrg      1436:
1.10      mrg      1437:        } else {
                   1438:
                   1439:                /* "start" is not within an entry ... skip to next entry */
                   1440:                if (flags & UVM_EXTRACT_CONTIG) {
                   1441:                        error = EINVAL;
                   1442:                        goto bad;    /* definite hole here ... */
                   1443:                }
1.1       mrg      1444:
1.10      mrg      1445:                entry = entry->next;
                   1446:                fudge = 0;
                   1447:        }
                   1448:        /* save values from srcmap for step 6 */
                   1449:        orig_entry = entry;
                   1450:        orig_fudge = fudge;
1.1       mrg      1451:
                   1452:
1.10      mrg      1453:        /*
                   1454:         * step 3: now start looping through the map entries, extracting
                   1455:         * as we go.
                   1456:         */
1.1       mrg      1457:
1.10      mrg      1458:        while (entry->start < end && entry != &srcmap->header) {
                   1459:
                   1460:                /* if we are not doing a quick reference, clip it */
                   1461:                if ((flags & UVM_EXTRACT_QREF) == 0)
                   1462:                        UVM_MAP_CLIP_END(srcmap, entry, end);
                   1463:
                   1464:                /* clear needs_copy (allow chunking) */
                   1465:                if (UVM_ET_ISNEEDSCOPY(entry)) {
                   1466:                        if (fudge)
                   1467:                                oldstart = entry->start;
                   1468:                        else
                   1469:                                oldstart = 0;   /* XXX: gcc */
                   1470:                        amap_copy(srcmap, entry, M_NOWAIT, TRUE, start, end);
                   1471:                        if (UVM_ET_ISNEEDSCOPY(entry)) {  /* failed? */
                   1472:                                error = ENOMEM;
                   1473:                                goto bad;
                   1474:                        }
                   1475:                        /* amap_copy could clip (during chunk)!  update fudge */
                   1476:                        if (fudge) {
                   1477:                                fudge = fudge - (entry->start - oldstart);
                   1478:                                orig_fudge = fudge;
                   1479:                        }
                   1480:                }
1.1       mrg      1481:
1.10      mrg      1482:                /* calculate the offset of this from "start" */
                   1483:                oldoffset = (entry->start + fudge) - start;
1.1       mrg      1484:
1.10      mrg      1485:                /* allocate a new map entry */
                   1486:                newentry = uvm_mapent_alloc(dstmap);
                   1487:                if (newentry == NULL) {
                   1488:                        error = ENOMEM;
                   1489:                        goto bad;
                   1490:                }
                   1491:
                   1492:                /* set up new map entry */
                   1493:                newentry->next = NULL;
                   1494:                newentry->prev = endchain;
                   1495:                newentry->start = dstaddr + oldoffset;
                   1496:                newentry->end =
                   1497:                    newentry->start + (entry->end - (entry->start + fudge));
                   1498:                if (newentry->end > newend)
                   1499:                        newentry->end = newend;
                   1500:                newentry->object.uvm_obj = entry->object.uvm_obj;
                   1501:                if (newentry->object.uvm_obj) {
                   1502:                        if (newentry->object.uvm_obj->pgops->pgo_reference)
                   1503:                                newentry->object.uvm_obj->pgops->
                   1504:                                    pgo_reference(newentry->object.uvm_obj);
                   1505:                                newentry->offset = entry->offset + fudge;
                   1506:                } else {
                   1507:                        newentry->offset = 0;
                   1508:                }
                   1509:                newentry->etype = entry->etype;
                   1510:                newentry->protection = (flags & UVM_EXTRACT_FIXPROT) ?
                   1511:                        entry->max_protection : entry->protection;
                   1512:                newentry->max_protection = entry->max_protection;
                   1513:                newentry->inheritance = entry->inheritance;
                   1514:                newentry->wired_count = 0;
                   1515:                newentry->aref.ar_amap = entry->aref.ar_amap;
                   1516:                if (newentry->aref.ar_amap) {
                   1517:                        newentry->aref.ar_slotoff =
                   1518:                            entry->aref.ar_slotoff + (fudge / PAGE_SIZE);
                   1519:                        amap_ref(newentry, AMAP_SHARED |
                   1520:                            ((flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0));
                   1521:                } else {
                   1522:                        newentry->aref.ar_slotoff = 0;
                   1523:                }
                   1524:                newentry->advice = entry->advice;
                   1525:
                   1526:                /* now link it on the chain */
                   1527:                nchain++;
                   1528:                if (endchain == NULL) {
                   1529:                        chain = endchain = newentry;
                   1530:                } else {
                   1531:                        endchain->next = newentry;
                   1532:                        endchain = newentry;
                   1533:                }
                   1534:
                   1535:                /* end of 'while' loop! */
                   1536:                if ((flags & UVM_EXTRACT_CONTIG) && entry->end < end &&
                   1537:                    (entry->next == &srcmap->header ||
                   1538:                    entry->next->start != entry->end)) {
                   1539:                        error = EINVAL;
                   1540:                        goto bad;
                   1541:                }
                   1542:                entry = entry->next;
                   1543:                fudge = 0;
                   1544:        }
                   1545:
                   1546:
                   1547:        /*
                   1548:         * step 4: close off chain (in format expected by uvm_map_replace)
                   1549:         */
                   1550:
                   1551:        if (chain)
                   1552:                chain->prev = endchain;
                   1553:
                   1554:
                   1555:        /*
                   1556:         * step 5: attempt to lock the dest map so we can pmap_copy.
                   1557:         * note usage of copy_ok:
                   1558:         *   1 => dstmap locked, pmap_copy ok, and we "replace" here (step 5)
                   1559:         *   0 => dstmap unlocked, NO pmap_copy, and we will "replace" in step 7
                   1560:         */
                   1561:
                   1562:        if (srcmap == dstmap || vm_map_lock_try(dstmap) == TRUE) {
                   1563:
                   1564:                copy_ok = 1;
                   1565:                if (!uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
                   1566:                    nchain)) {
                   1567:                        if (srcmap != dstmap)
                   1568:                                vm_map_unlock(dstmap);
                   1569:                        error = EIO;
                   1570:                        goto bad;
                   1571:                }
                   1572:
                   1573:        } else {
                   1574:
                   1575:                copy_ok = 0;
                   1576:                /* replace defered until step 7 */
                   1577:
                   1578:        }
                   1579:
                   1580:
                   1581:        /*
                   1582:         * step 6: traverse the srcmap a second time to do the following:
                   1583:         *  - if we got a lock on the dstmap do pmap_copy
                   1584:         *  - if UVM_EXTRACT_REMOVE remove the entries
                   1585:         * we make use of orig_entry and orig_fudge (saved in step 2)
                   1586:         */
                   1587:
                   1588:        if (copy_ok || (flags & UVM_EXTRACT_REMOVE)) {
                   1589:
                   1590:                /* purge possible stale hints from srcmap */
                   1591:                if (flags & UVM_EXTRACT_REMOVE) {
                   1592:                        SAVE_HINT(srcmap, orig_entry->prev);
                   1593:                        if (srcmap->first_free->start >= start)
                   1594:                                srcmap->first_free = orig_entry->prev;
                   1595:                }
                   1596:
                   1597:                entry = orig_entry;
                   1598:                fudge = orig_fudge;
                   1599:                deadentry = NULL;       /* for UVM_EXTRACT_REMOVE */
                   1600:
                   1601:                while (entry->start < end && entry != &srcmap->header) {
                   1602:
                   1603:                        if (copy_ok) {
                   1604:        oldoffset = (entry->start + fudge) - start;
                   1605:        elen = min(end, entry->end) - (entry->start + fudge);
                   1606:        pmap_copy(dstmap->pmap, srcmap->pmap, dstaddr + oldoffset,
                   1607:                  elen, entry->start + fudge);
                   1608:                        }
                   1609:
1.20      chuck    1610:       /* we advance "entry" in the following if statement */
1.10      mrg      1611:                        if (flags & UVM_EXTRACT_REMOVE) {
1.20      chuck    1612:                                pmap_remove(srcmap->pmap, entry->start,
                   1613:                                                entry->end);
                   1614:                                oldentry = entry;       /* save entry */
                   1615:                                entry = entry->next;    /* advance */
                   1616:                                uvm_map_entry_unlink(srcmap, oldentry);
                   1617:                                                        /* add to dead list */
                   1618:                                oldentry->next = deadentry;
                   1619:                                deadentry = oldentry;
                   1620:                        } else {
                   1621:                                entry = entry->next;            /* advance */
1.10      mrg      1622:                        }
                   1623:
                   1624:                        /* end of 'while' loop */
                   1625:                        fudge = 0;
                   1626:                }
                   1627:
                   1628:                /*
                   1629:                 * unlock dstmap.  we will dispose of deadentry in
                   1630:                 * step 7 if needed
                   1631:                 */
                   1632:                if (copy_ok && srcmap != dstmap)
                   1633:                        vm_map_unlock(dstmap);
                   1634:
                   1635:        }
                   1636:        else
                   1637:                deadentry = NULL; /* XXX: gcc */
                   1638:
                   1639:        /*
                   1640:         * step 7: we are done with the source map, unlock.   if copy_ok
                   1641:         * is 0 then we have not replaced the dummy mapping in dstmap yet
                   1642:         * and we need to do so now.
                   1643:         */
                   1644:
                   1645:        vm_map_unlock(srcmap);
                   1646:        if ((flags & UVM_EXTRACT_REMOVE) && deadentry)
                   1647:                uvm_unmap_detach(deadentry, 0);   /* dispose of old entries */
                   1648:
                   1649:        /* now do the replacement if we didn't do it in step 5 */
                   1650:        if (copy_ok == 0) {
                   1651:                vm_map_lock(dstmap);
                   1652:                error = uvm_map_replace(dstmap, dstaddr, dstaddr+len, chain,
                   1653:                    nchain);
                   1654:                vm_map_unlock(dstmap);
                   1655:
                   1656:                if (error == FALSE) {
                   1657:                        error = EIO;
                   1658:                        goto bad2;
                   1659:                }
                   1660:        }
                   1661:
                   1662:        /*
                   1663:         * done!
                   1664:         */
                   1665:        return(0);
                   1666:
                   1667:        /*
                   1668:         * bad: failure recovery
                   1669:         */
                   1670: bad:
                   1671:        vm_map_unlock(srcmap);
                   1672: bad2:                  /* src already unlocked */
                   1673:        if (chain)
                   1674:                uvm_unmap_detach(chain,
                   1675:                    (flags & UVM_EXTRACT_QREF) ? AMAP_REFALL : 0);
                   1676:        uvm_unmap(dstmap, dstaddr, dstaddr+len, 1);   /* ??? */
                   1677:        return(error);
                   1678: }
                   1679:
                   1680: /* end of extraction functions */
1.1       mrg      1681:
                   1682: /*
                   1683:  * uvm_map_submap: punch down part of a map into a submap
                   1684:  *
                   1685:  * => only the kernel_map is allowed to be submapped
                   1686:  * => the purpose of submapping is to break up the locking granularity
                   1687:  *     of a larger map
                   1688:  * => the range specified must have been mapped previously with a uvm_map()
                   1689:  *     call [with uobj==NULL] to create a blank map entry in the main map.
                   1690:  *     [And it had better still be blank!]
                   1691:  * => maps which contain submaps should never be copied or forked.
                   1692:  * => to remove a submap, use uvm_unmap() on the main map
                   1693:  *     and then uvm_map_deallocate() the submap.
                   1694:  * => main map must be unlocked.
                   1695:  * => submap must have been init'd and have a zero reference count.
                   1696:  *     [need not be locked as we don't actually reference it]
                   1697:  */
1.10      mrg      1698:
                   1699: int
                   1700: uvm_map_submap(map, start, end, submap)
                   1701:        vm_map_t map, submap;
1.22.2.1  eeh      1702:        vaddr_t start, end;
1.10      mrg      1703: {
                   1704:        vm_map_entry_t entry;
                   1705:        int result;
                   1706:        UVMHIST_FUNC("uvm_map_submap"); UVMHIST_CALLED(maphist);
1.1       mrg      1707:
1.10      mrg      1708:        vm_map_lock(map);
1.1       mrg      1709:
1.10      mrg      1710:        VM_MAP_RANGE_CHECK(map, start, end);
                   1711:
                   1712:        if (uvm_map_lookup_entry(map, start, &entry)) {
                   1713:                UVM_MAP_CLIP_START(map, entry, start);
                   1714:                UVM_MAP_CLIP_END(map, entry, end);              /* to be safe */
                   1715:        }
                   1716:        else {
                   1717:                entry = NULL;
                   1718:        }
1.1       mrg      1719:
1.10      mrg      1720:        if (entry != NULL &&
                   1721:            entry->start == start && entry->end == end &&
                   1722:            entry->object.uvm_obj == NULL && entry->aref.ar_amap == NULL &&
                   1723:            !UVM_ET_ISCOPYONWRITE(entry) && !UVM_ET_ISNEEDSCOPY(entry)) {
                   1724:
                   1725:                /*
                   1726:                 * doit!
                   1727:                 */
                   1728:                entry->etype |= (UVM_ET_MAP|UVM_ET_SUBMAP);
                   1729:                entry->object.sub_map = submap;
                   1730:                entry->offset = 0;
                   1731:                uvm_map_reference(submap);
                   1732:                result = KERN_SUCCESS;
                   1733:        } else {
                   1734:                result = KERN_INVALID_ARGUMENT;
                   1735:        }
                   1736:        vm_map_unlock(map);
1.1       mrg      1737:
1.10      mrg      1738:        return(result);
1.1       mrg      1739: }
                   1740:
                   1741:
                   1742: /*
                   1743:  * uvm_map_protect: change map protection
                   1744:  *
                   1745:  * => set_max means set max_protection.
                   1746:  * => map must be unlocked.
                   1747:  * => XXXCDC: does not work properly with share maps.  rethink.
                   1748:  */
                   1749:
                   1750: #define MASK(entry)     ( UVM_ET_ISCOPYONWRITE(entry) ? \
                   1751:        ~VM_PROT_WRITE : VM_PROT_ALL)
                   1752: #define max(a,b)        ((a) > (b) ? (a) : (b))
                   1753:
1.10      mrg      1754: int
                   1755: uvm_map_protect(map, start, end, new_prot, set_max)
                   1756:        vm_map_t map;
1.22.2.1  eeh      1757:        vaddr_t start, end;
1.10      mrg      1758:        vm_prot_t new_prot;
                   1759:        boolean_t set_max;
                   1760: {
                   1761:        vm_map_entry_t current, entry;
                   1762:        UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
                   1763:        UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)",
1.1       mrg      1764:        map, start, end, new_prot);
1.10      mrg      1765:
                   1766:        vm_map_lock(map);
1.1       mrg      1767:
1.10      mrg      1768:        VM_MAP_RANGE_CHECK(map, start, end);
1.1       mrg      1769:
1.10      mrg      1770:        if (uvm_map_lookup_entry(map, start, &entry)) {
                   1771:                UVM_MAP_CLIP_START(map, entry, start);
                   1772:        } else {
                   1773:                entry = entry->next;
                   1774:        }
                   1775:
1.1       mrg      1776:        /*
1.10      mrg      1777:         * make a first pass to check for protection violations.
1.1       mrg      1778:         */
                   1779:
1.10      mrg      1780:        current = entry;
                   1781:        while ((current != &map->header) && (current->start < end)) {
                   1782:                if (UVM_ET_ISSUBMAP(current))
                   1783:                        return(KERN_INVALID_ARGUMENT);
                   1784:                if ((new_prot & current->max_protection) != new_prot) {
                   1785:                        vm_map_unlock(map);
                   1786:                        return(KERN_PROTECTION_FAILURE);
                   1787:                }
                   1788:                        current = current->next;
                   1789:        }
                   1790:
                   1791:        /* go back and fix up protections (no need to clip this time). */
                   1792:
                   1793:        current = entry;
                   1794:
                   1795:        while ((current != &map->header) && (current->start < end)) {
                   1796:                vm_prot_t old_prot;
                   1797:
                   1798:                UVM_MAP_CLIP_END(map, current, end);
                   1799:
                   1800:                old_prot = current->protection;
                   1801:                if (set_max)
                   1802:                        current->protection =
                   1803:                            (current->max_protection = new_prot) & old_prot;
                   1804:                else
                   1805:                        current->protection = new_prot;
                   1806:
                   1807:                /*
                   1808:                 * update physical map if necessary.  worry about copy-on-write
                   1809:                 * here -- CHECK THIS XXX
                   1810:                 */
                   1811:
                   1812:                if (current->protection != old_prot) {
                   1813:                        if (UVM_ET_ISMAP(current) &&
                   1814:                            !UVM_ET_ISSUBMAP(current)) {
                   1815:                                /* share map?   gotta go down a level */
                   1816:                                vm_map_entry_t  share_entry;
1.22.2.1  eeh      1817:                                vaddr_t     share_end;
1.10      mrg      1818:
                   1819:                                /*
                   1820:                                 * note: a share map has its own address
                   1821:                                 * space (starting at zero). current->offset
                   1822:                                 * is the offset into the share map our
                   1823:                                 * mapping starts.    the length of our
                   1824:                                 * mapping is (current->end - current->start).
                   1825:                                 * thus, our mapping goes from current->offset
                   1826:                                 * to share_end (which is: current->offset +
                   1827:                                 * length) in the share map's address space.
                   1828:                                 *
                   1829:                                 * thus for any share_entry we need to make
                   1830:                                 * sure that the addresses we've got fall in
                   1831:                                 * the range we want.   we use:
                   1832:                                 *  max(any share_entry->start, current->offset)
                   1833:                                 *  min(any share_entry->end, share_end)
                   1834:                                 *
                   1835:                                 * of course to change our pmap we've got to
                   1836:                                 * convert the share * map address back to
                   1837:                                 * our map's virtual address space using:
                   1838:                                 *  our_va = share_va -
                   1839:                                 *     current->offset + current->start
                   1840:                                 *
                   1841:                                 * XXXCDC: protection change in sharemap may
                   1842:                                 * require use of pmap_page_protect.   needs
                   1843:                                 * a rethink.
                   1844:                                 */
                   1845:
                   1846:                                vm_map_lock(current->object.share_map);
                   1847:                                /*
                   1848:                                 * note: current->offset is offset into
                   1849:                                 * share map
                   1850:                                 */
                   1851:                                (void)uvm_map_lookup_entry(
                   1852:                                    current->object.share_map,
1.1       mrg      1853:                                    current->offset, &share_entry);
1.10      mrg      1854:                                share_end = current->offset +
                   1855:                                    (current->end - current->start);
                   1856:                                while ((share_entry !=
                   1857:                                    &current->object.share_map->header) &&
                   1858:                                       (share_entry->start < share_end)) {
                   1859:
                   1860:                                        pmap_protect(map->pmap,
                   1861:                                            (max(share_entry->start,
                   1862:                                              current->offset) -
                   1863:                                            current->offset + current->start),
                   1864:                                            min(share_entry->end, share_end) -
                   1865:                                            current->offset + current->start,
                   1866:                                            current->protection &
                   1867:                                            MASK(share_entry));
                   1868:
                   1869:                                        share_entry = share_entry->next;
                   1870:                                }
                   1871:                                vm_map_unlock(current->object.share_map);
                   1872:
                   1873:                        } else {             /* not share map! */
                   1874:
                   1875:                                pmap_protect(map->pmap, current->start,
                   1876:                                    current->end,
                   1877:                                    current->protection & MASK(entry));
                   1878:
                   1879:                        }
                   1880:                }
                   1881:                current = current->next;
                   1882:        }
                   1883:
                   1884:        vm_map_unlock(map);
                   1885:        UVMHIST_LOG(maphist, "<- done",0,0,0,0);
                   1886:        return(KERN_SUCCESS);
1.1       mrg      1887: }
                   1888:
                   1889: #undef  max
                   1890: #undef  MASK
                   1891:
                   1892: /*
                   1893:  * uvm_map_inherit: set inheritance code for range of addrs in map.
                   1894:  *
                   1895:  * => map must be unlocked
                   1896:  * => note that the inherit code is used during a "fork".  see fork
                   1897:  *     code for details.
                   1898:  * => XXXCDC: currently only works in main map.  what about share map?
                   1899:  */
                   1900:
1.10      mrg      1901: int
                   1902: uvm_map_inherit(map, start, end, new_inheritance)
                   1903:        vm_map_t map;
1.22.2.1  eeh      1904:        vaddr_t start;
                   1905:        vaddr_t end;
1.10      mrg      1906:        vm_inherit_t new_inheritance;
                   1907: {
                   1908:        vm_map_entry_t entry, temp_entry;
                   1909:        UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
                   1910:        UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)",
                   1911:            map, start, end, new_inheritance);
                   1912:
                   1913:        switch (new_inheritance) {
                   1914:        case VM_INHERIT_NONE:
                   1915:        case VM_INHERIT_COPY:
                   1916:        case VM_INHERIT_SHARE:
                   1917:                break;
                   1918:        default:
                   1919:                UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
                   1920:                return(KERN_INVALID_ARGUMENT);
                   1921:        }
1.1       mrg      1922:
1.10      mrg      1923:        vm_map_lock(map);
                   1924:
                   1925:        VM_MAP_RANGE_CHECK(map, start, end);
                   1926:
                   1927:        if (uvm_map_lookup_entry(map, start, &temp_entry)) {
                   1928:                entry = temp_entry;
                   1929:                UVM_MAP_CLIP_START(map, entry, start);
                   1930:        }  else {
                   1931:                entry = temp_entry->next;
                   1932:        }
                   1933:
                   1934:        while ((entry != &map->header) && (entry->start < end)) {
                   1935:                UVM_MAP_CLIP_END(map, entry, end);
                   1936:
                   1937:                entry->inheritance = new_inheritance;
                   1938:
                   1939:                entry = entry->next;
                   1940:        }
                   1941:
                   1942:        vm_map_unlock(map);
                   1943:        UVMHIST_LOG(maphist,"<- done (OK)",0,0,0,0);
                   1944:        return(KERN_SUCCESS);
1.1       mrg      1945: }
                   1946:
                   1947: /*
                   1948:  * uvm_map_pageable: sets the pageability of a range in a map.
                   1949:  *
                   1950:  * => regions sepcified as not pageable require lock-down (wired) memory
                   1951:  *     and page tables.
                   1952:  * => map must not be locked.
                   1953:  * => XXXCDC: check this and try and clean it up.
                   1954:  */
                   1955:
1.19      kleink   1956: int
                   1957: uvm_map_pageable(map, start, end, new_pageable)
1.11      mrg      1958:        vm_map_t map;
1.22.2.1  eeh      1959:        vaddr_t start, end;
1.11      mrg      1960:        boolean_t new_pageable;
1.1       mrg      1961: {
1.10      mrg      1962:        vm_map_entry_t entry, start_entry;
1.22.2.1  eeh      1963:        vaddr_t failed = 0;
1.10      mrg      1964:        int rv;
                   1965:        UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
                   1966:        UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)",
1.1       mrg      1967:        map, start, end, new_pageable);
                   1968:
1.10      mrg      1969:        vm_map_lock(map);
                   1970:        VM_MAP_RANGE_CHECK(map, start, end);
                   1971:
                   1972:        /*
                   1973:         * only one pageability change may take place at one time, since
                   1974:         * uvm_fault_wire assumes it will be called only once for each
                   1975:         * wiring/unwiring.  therefore, we have to make sure we're actually
                   1976:         * changing the pageability for the entire region.  we do so before
                   1977:         * making any changes.
                   1978:         */
                   1979:
                   1980:        if (uvm_map_lookup_entry(map, start, &start_entry) == FALSE) {
                   1981:                vm_map_unlock(map);
                   1982:
                   1983:                UVMHIST_LOG(maphist,"<- done (INVALID ARG)",0,0,0,0);
                   1984:                return (KERN_INVALID_ADDRESS);
                   1985:        }
                   1986:        entry = start_entry;
                   1987:
                   1988:        /*
                   1989:         * handle wiring and unwiring seperately.
                   1990:         */
1.1       mrg      1991:
1.10      mrg      1992:        if (new_pageable) {               /* unwire */
                   1993:
                   1994:                UVM_MAP_CLIP_START(map, entry, start);
                   1995:
                   1996:                /*
                   1997:                 * unwiring.  first ensure that the range to be unwired is
                   1998:                 * really wired down and that there are no holes.
                   1999:                 */
                   2000:                while ((entry != &map->header) && (entry->start < end)) {
                   2001:
                   2002:                        if (entry->wired_count == 0 ||
                   2003:                            (entry->end < end &&
                   2004:                            (entry->next == &map->header ||
                   2005:                            entry->next->start > entry->end))) {
                   2006:                                vm_map_unlock(map);
                   2007:                                UVMHIST_LOG(maphist,
                   2008:                                    "<- done (INVALID UNWIRE ARG)",0,0,0,0);
                   2009:                                return (KERN_INVALID_ARGUMENT);
                   2010:                        }
                   2011:                        entry = entry->next;
                   2012:                }
                   2013:
                   2014:                /*
                   2015:                 * now decrement the wiring count for each region.  if a region
                   2016:                 * becomes completely unwired, unwire its physical pages and
                   2017:                 * mappings.
                   2018:                 */
1.1       mrg      2019: #if 0          /* not necessary: uvm_fault_unwire does not lock */
1.10      mrg      2020:                lock_set_recursive(&map->lock);
1.1       mrg      2021: #endif  /* XXXCDC */
                   2022:
1.10      mrg      2023:                entry = start_entry;
                   2024:                while ((entry != &map->header) && (entry->start < end)) {
                   2025:                        UVM_MAP_CLIP_END(map, entry, end);
                   2026:
                   2027:                        entry->wired_count--;
                   2028:                        if (entry->wired_count == 0)
                   2029:                                uvm_map_entry_unwire(map, entry);
                   2030:
                   2031:                        entry = entry->next;
                   2032:                }
1.1       mrg      2033: #if 0 /* XXXCDC: not necessary, see above */
1.10      mrg      2034:                lock_clear_recursive(&map->lock);
1.1       mrg      2035: #endif
1.10      mrg      2036:                vm_map_unlock(map);
                   2037:                UVMHIST_LOG(maphist,"<- done (OK UNWIRE)",0,0,0,0);
                   2038:                return(KERN_SUCCESS);
                   2039:
                   2040:                /*
                   2041:                 * end of unwire case!
                   2042:                 */
                   2043:        }
                   2044:
                   2045:        /*
                   2046:         * wire case: in two passes [XXXCDC: ugly block of code here]
                   2047:         *
                   2048:         * 1: holding the write lock, we create any anonymous maps that need
                   2049:         *    to be created.  then we clip each map entry to the region to
                   2050:         *    be wired and increment its wiring count.
                   2051:         *
                   2052:         * 2: we downgrade to a read lock, and call uvm_fault_wire to fault
                   2053:         *    in the pages for any newly wired area (wired_count is 1).
                   2054:         *
                   2055:         *    downgrading to a read lock for uvm_fault_wire avoids a possible
                   2056:         *    deadlock with another thread that may have faulted on one of
                   2057:         *    the pages to be wired (it would mark the page busy, blocking
                   2058:         *    us, then in turn block on the map lock that we hold).  because
                   2059:         *    of problems in the recursive lock package, we cannot upgrade
                   2060:         *    to a write lock in vm_map_lookup.  thus, any actions that
                   2061:         *    require the write lock must be done beforehand.  because we
                   2062:         *    keep the read lock on the map, the copy-on-write status of the
                   2063:         *    entries we modify here cannot change.
                   2064:         */
                   2065:
                   2066:        while ((entry != &map->header) && (entry->start < end)) {
                   2067:
                   2068:                if (entry->wired_count == 0) {  /* not already wired? */
                   2069:
                   2070:                        /*
                   2071:                         * perform actions of vm_map_lookup that need the
                   2072:                         * write lock on the map: create an anonymous map
                   2073:                         * for a copy-on-write region, or an anonymous map
                   2074:                         * for a zero-fill region.
                   2075:                         *
                   2076:                         * we don't have to do this for entries that point
                   2077:                         * to sharing maps, because we won't hold the lock
                   2078:                         * on the sharing map.
                   2079:                         */
                   2080:
                   2081:                        if (!UVM_ET_ISMAP(entry)) {      /* not sharing map */
                   2082:                                /*
                   2083:                                 * XXXCDC: protection vs. max_protection??
                   2084:                                 * (wirefault uses max?)
                   2085:                                 * XXXCDC: used to do it always if
                   2086:                                 * uvm_obj == NULL (wrong?)
                   2087:                                 */
                   2088:                                if ( UVM_ET_ISNEEDSCOPY(entry) &&
                   2089:                                    (entry->protection & VM_PROT_WRITE) != 0) {
                   2090:                                        amap_copy(map, entry, M_WAITOK, TRUE,
                   2091:                                            start, end);
                   2092:                                        /* XXXCDC: wait OK? */
                   2093:                                }
                   2094:                        }
                   2095:                }     /* wired_count == 0 */
                   2096:                UVM_MAP_CLIP_START(map, entry, start);
                   2097:                UVM_MAP_CLIP_END(map, entry, end);
                   2098:                entry->wired_count++;
                   2099:
                   2100:                /*
                   2101:                 * Check for holes
                   2102:                 */
                   2103:                if (entry->end < end && (entry->next == &map->header ||
1.1       mrg      2104:                             entry->next->start > entry->end)) {
1.10      mrg      2105:                        /*
                   2106:                         * found one.  amap creation actions do not need to
                   2107:                         * be undone, but the wired counts need to be restored.
                   2108:                         */
                   2109:                        while (entry != &map->header && entry->end > start) {
                   2110:                                entry->wired_count--;
                   2111:                                entry = entry->prev;
                   2112:                        }
                   2113:                        vm_map_unlock(map);
                   2114:                        UVMHIST_LOG(maphist,"<- done (INVALID WIRE)",0,0,0,0);
                   2115:                        return(KERN_INVALID_ARGUMENT);
                   2116:                }
                   2117:                entry = entry->next;
                   2118:        }
                   2119:
                   2120:        /*
                   2121:         * Pass 2.
                   2122:         */
                   2123:        /*
                   2124:         * HACK HACK HACK HACK
                   2125:         *
                   2126:         * if we are wiring in the kernel map or a submap of it, unlock the
                   2127:         * map to avoid deadlocks.  we trust that the kernel threads are
                   2128:         * well-behaved, and therefore will not do anything destructive to
                   2129:         * this region of the map while we have it unlocked.  we cannot
                   2130:         * trust user threads to do the same.
                   2131:         *
                   2132:         * HACK HACK HACK HACK
                   2133:         */
                   2134:        if (vm_map_pmap(map) == pmap_kernel()) {
                   2135:                vm_map_unlock(map);         /* trust me ... */
                   2136:        } else {
                   2137:                vm_map_set_recursive(&map->lock);
                   2138:                lockmgr(&map->lock, LK_DOWNGRADE, (void *)0);
                   2139:        }
                   2140:
                   2141:        rv = 0;
                   2142:        entry = start_entry;
                   2143:        while (entry != &map->header && entry->start < end) {
                   2144:                /*
                   2145:                 * if uvm_fault_wire fails for any page we need to undo what has
                   2146:                 * been done.  we decrement the wiring count for those pages
                   2147:                 * which have not yet been wired (now) and unwire those that
                   2148:                 * have * (later).
                   2149:                 *
                   2150:                 * XXX this violates the locking protocol on the map, needs to
                   2151:                 * be fixed.  [because we only have a read lock on map we
                   2152:                 * shouldn't be changing wired_count?]
                   2153:                 */
                   2154:                if (rv) {
                   2155:                        entry->wired_count--;
                   2156:                } else if (entry->wired_count == 1) {
                   2157:                        rv = uvm_fault_wire(map, entry->start, entry->end);
                   2158:                        if (rv) {
                   2159:                                failed = entry->start;
                   2160:                                entry->wired_count--;
                   2161:                        }
                   2162:                }
                   2163:                entry = entry->next;
                   2164:        }
                   2165:
                   2166:        if (vm_map_pmap(map) == pmap_kernel()) {
                   2167:                vm_map_lock(map);     /* relock */
                   2168:        } else {
                   2169:                vm_map_clear_recursive(&map->lock);
                   2170:        }
                   2171:
                   2172:        if (rv) {        /* failed? */
                   2173:                vm_map_unlock(map);
                   2174:                (void) uvm_map_pageable(map, start, failed, TRUE);
                   2175:                UVMHIST_LOG(maphist, "<- done (RV=%d)", rv,0,0,0);
                   2176:                return(rv);
                   2177:        }
                   2178:        vm_map_unlock(map);
                   2179:
                   2180:        UVMHIST_LOG(maphist,"<- done (OK WIRE)",0,0,0,0);
                   2181:        return(KERN_SUCCESS);
1.1       mrg      2182: }
                   2183:
                   2184: /*
                   2185:  * uvm_map_clean: push dirty pages off to backing store.
                   2186:  *
                   2187:  * => valid flags:
                   2188:  *   if (flags & PGO_SYNCIO): dirty pages are written synchronously
                   2189:  *   if (flags & PGO_DEACTIVATE): any cached pages are deactivated after clean
                   2190:  *   if (flags & PGO_FREE): any cached pages are freed after clean
                   2191:  * => returns an error if any part of the specified range isn't mapped
                   2192:  * => never a need to flush amap layer since the anonymous memory has
                   2193:  *     no permanent home...
                   2194:  * => called from sys_msync()
                   2195:  * => caller must not write-lock map (read OK).
                   2196:  * => we may sleep while cleaning if SYNCIO [with map read-locked]
                   2197:  * => XXX: does this handle share maps properly?
                   2198:  */
                   2199:
1.10      mrg      2200: int
                   2201: uvm_map_clean(map, start, end, flags)
                   2202:        vm_map_t map;
1.22.2.1  eeh      2203:        vaddr_t start, end;
1.10      mrg      2204:        int flags;
                   2205: {
                   2206:        vm_map_entry_t current;
                   2207:        vm_map_entry_t entry;
1.22.2.1  eeh      2208:        vsize_t size;
1.10      mrg      2209:        struct uvm_object *object;
1.22.2.1  eeh      2210:        vaddr_t offset;
1.10      mrg      2211:        UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
                   2212:        UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)",
                   2213:        map, start, end, flags);
                   2214:
                   2215:        vm_map_lock_read(map);
                   2216:        VM_MAP_RANGE_CHECK(map, start, end);
                   2217:        if (!uvm_map_lookup_entry(map, start, &entry)) {
                   2218:                vm_map_unlock_read(map);
                   2219:                return(KERN_INVALID_ADDRESS);
                   2220:        }
                   2221:
                   2222:        /*
                   2223:         * Make a first pass to check for holes.
                   2224:         */
                   2225:        for (current = entry; current->start < end; current = current->next) {
                   2226:                if (UVM_ET_ISSUBMAP(current)) {
                   2227:                        vm_map_unlock_read(map);
                   2228:                        return(KERN_INVALID_ARGUMENT);
                   2229:                }
                   2230:                if (end > current->end && (current->next == &map->header ||
                   2231:                    current->end != current->next->start)) {
                   2232:                        vm_map_unlock_read(map);
                   2233:                        return(KERN_INVALID_ADDRESS);
                   2234:                }
                   2235:        }
                   2236:
                   2237:        /*
                   2238:         * add "cleanit" flag to flags (for generic flush routine).
                   2239:         * then make a second pass, cleaning/uncaching pages from
                   2240:         * the indicated objects as we go.
                   2241:         */
                   2242:        flags = flags | PGO_CLEANIT;
                   2243:        for (current = entry; current->start < end; current = current->next) {
                   2244:                offset = current->offset + (start - current->start);
                   2245:                size = (end <= current->end ? end : current->end) - start;
1.1       mrg      2246:
1.10      mrg      2247:                /*
                   2248:                 * get object/offset.   special case to handle share maps.
                   2249:                 */
                   2250:                if (UVM_ET_ISMAP(current)) {   /* share map? */
                   2251:                        register vm_map_t smap;
                   2252:                        vm_map_entry_t tentry;
1.22.2.1  eeh      2253:                        vsize_t tsize;
1.10      mrg      2254:
                   2255:                        smap = current->object.share_map;
                   2256:                        vm_map_lock_read(smap);
                   2257:                        (void) uvm_map_lookup_entry(smap, offset, &tentry);
                   2258:                        tsize = tentry->end - offset;
                   2259:                        if (tsize < size)
                   2260:                                size = tsize;
                   2261:                        object = tentry->object.uvm_obj;
                   2262:                        offset = tentry->offset + (offset - tentry->start);
                   2263:                        simple_lock(&object->vmobjlock);
                   2264:                        vm_map_unlock_read(smap);
                   2265:                } else {
                   2266:                        object = current->object.uvm_obj;
                   2267:                        simple_lock(&object->vmobjlock);
                   2268:                }
1.1       mrg      2269:
1.10      mrg      2270:                /*
                   2271:                 * flush pages if writing is allowed.   note that object is
                   2272:                 * locked.
                   2273:                 * XXX should we continue on an error?
                   2274:                 */
1.1       mrg      2275:
1.10      mrg      2276:                if (object && object->pgops &&
                   2277:                    (current->protection & VM_PROT_WRITE) != 0) {
                   2278:                        if (!object->pgops->pgo_flush(object, offset,
                   2279:                            offset+size, flags)) {
                   2280:                                simple_unlock(&object->vmobjlock);
                   2281:                                vm_map_unlock_read(map);
                   2282:                                return (KERN_FAILURE);
                   2283:                        }
                   2284:                }
                   2285:                simple_unlock(&object->vmobjlock);
                   2286:                start += size;
                   2287:        }
1.1       mrg      2288:        vm_map_unlock_read(map);
1.10      mrg      2289:        return(KERN_SUCCESS);
1.1       mrg      2290: }
                   2291:
                   2292:
                   2293: /*
                   2294:  * uvm_map_checkprot: check protection in map
                   2295:  *
                   2296:  * => must allow specified protection in a fully allocated region.
                   2297:  * => map must be read or write locked by caller.
                   2298:  */
                   2299:
1.10      mrg      2300: boolean_t
                   2301: uvm_map_checkprot(map, start, end, protection)
                   2302:        vm_map_t       map;
1.22.2.1  eeh      2303:        vaddr_t    start, end;
1.10      mrg      2304:        vm_prot_t      protection;
                   2305: {
                   2306:         vm_map_entry_t entry;
                   2307:         vm_map_entry_t tmp_entry;
                   2308:
                   2309:         if (!uvm_map_lookup_entry(map, start, &tmp_entry)) {
                   2310:                 return(FALSE);
                   2311:         }
                   2312:
                   2313:         entry = tmp_entry;
                   2314:
                   2315:         while (start < end) {
                   2316:                 if (entry == &map->header) {
                   2317:                         return(FALSE);
                   2318:                 }
                   2319:
                   2320:                /*
                   2321:                 * no holes allowed
                   2322:                 */
                   2323:
                   2324:                 if (start < entry->start) {
                   2325:                         return(FALSE);
                   2326:                 }
                   2327:
                   2328:                /*
                   2329:                 * check protection associated with entry
                   2330:                 */
1.1       mrg      2331:
1.10      mrg      2332:                 if ((entry->protection & protection) != protection) {
                   2333:                         return(FALSE);
                   2334:                 }
                   2335:
                   2336:                 /* go to next entry */
                   2337:
                   2338:                 start = entry->end;
                   2339:                 entry = entry->next;
                   2340:         }
                   2341:         return(TRUE);
1.1       mrg      2342: }
                   2343:
                   2344: /*
                   2345:  * uvmspace_alloc: allocate a vmspace structure.
                   2346:  *
                   2347:  * - structure includes vm_map and pmap
                   2348:  * - XXX: no locking on this structure
                   2349:  * - refcnt set to 1, rest must be init'd by caller
                   2350:  */
1.10      mrg      2351: struct vmspace *
                   2352: uvmspace_alloc(min, max, pageable)
1.22.2.1  eeh      2353:        vaddr_t min, max;
1.10      mrg      2354:        int pageable;
                   2355: {
                   2356:        struct vmspace *vm;
                   2357:        UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
                   2358:
                   2359:        MALLOC(vm, struct vmspace *, sizeof(struct vmspace), M_VMMAP, M_WAITOK);
1.15      thorpej  2360:        uvmspace_init(vm, NULL, min, max, pageable);
                   2361:        UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0);
                   2362:        return (vm);
                   2363: }
                   2364:
                   2365: /*
                   2366:  * uvmspace_init: initialize a vmspace structure.
                   2367:  *
                   2368:  * - XXX: no locking on this structure
                   2369:  * - refcnt set to 1, rest must me init'd by caller
                   2370:  */
                   2371: void
                   2372: uvmspace_init(vm, pmap, min, max, pageable)
                   2373:        struct vmspace *vm;
                   2374:        struct pmap *pmap;
1.22.2.1  eeh      2375:        vaddr_t min, max;
1.15      thorpej  2376:        boolean_t pageable;
                   2377: {
                   2378:        UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
                   2379:
1.10      mrg      2380:        bzero(vm, sizeof(*vm));
1.15      thorpej  2381:
1.10      mrg      2382:        uvm_map_setup(&vm->vm_map, min, max, pageable);
1.15      thorpej  2383:
                   2384:        if (pmap)
                   2385:                pmap_reference(pmap);
                   2386:        else
1.1       mrg      2387: #if defined(PMAP_NEW)
1.15      thorpej  2388:                pmap = pmap_create();
1.1       mrg      2389: #else
1.15      thorpej  2390:                pmap = pmap_create(0);
1.1       mrg      2391: #endif
1.15      thorpej  2392:        vm->vm_map.pmap = pmap;
                   2393:
1.10      mrg      2394:        vm->vm_refcnt = 1;
1.15      thorpej  2395:        UVMHIST_LOG(maphist,"<- done",0,0,0,0);
1.1       mrg      2396: }
                   2397:
                   2398: /*
                   2399:  * uvmspace_share: share a vmspace between two proceses
                   2400:  *
                   2401:  * - XXX: no locking on vmspace
                   2402:  * - used for vfork, threads(?)
                   2403:  */
                   2404:
1.10      mrg      2405: void
                   2406: uvmspace_share(p1, p2)
                   2407:        struct proc *p1, *p2;
1.1       mrg      2408: {
1.10      mrg      2409:        p2->p_vmspace = p1->p_vmspace;
                   2410:        p1->p_vmspace->vm_refcnt++;
1.1       mrg      2411: }
                   2412:
                   2413: /*
                   2414:  * uvmspace_unshare: ensure that process "p" has its own, unshared, vmspace
                   2415:  *
                   2416:  * - XXX: no locking on vmspace
                   2417:  */
                   2418:
1.10      mrg      2419: void
                   2420: uvmspace_unshare(p)
                   2421:        struct proc *p;
                   2422: {
                   2423:        struct vmspace *nvm, *ovm = p->p_vmspace;
1.13      thorpej  2424:        int s;
1.1       mrg      2425:
1.10      mrg      2426:        if (ovm->vm_refcnt == 1)
                   2427:                /* nothing to do: vmspace isn't shared in the first place */
                   2428:                return;
1.1       mrg      2429:
1.10      mrg      2430:        /* make a new vmspace, still holding old one */
                   2431:        nvm = uvmspace_fork(ovm);
                   2432:
1.13      thorpej  2433:        s = splhigh();                  /* make this `atomic' */
1.12      thorpej  2434:        pmap_deactivate(p);             /* unbind old vmspace */
1.10      mrg      2435:        p->p_vmspace = nvm;
                   2436:        pmap_activate(p);               /* switch to new vmspace */
1.13      thorpej  2437:        splx(s);                        /* end of critical section */
                   2438:
1.10      mrg      2439:        uvmspace_free(ovm);             /* drop reference to old vmspace */
1.1       mrg      2440: }
                   2441:
                   2442: /*
                   2443:  * uvmspace_exec: the process wants to exec a new program
                   2444:  *
                   2445:  * - XXX: no locking on vmspace
                   2446:  */
                   2447:
1.10      mrg      2448: void
                   2449: uvmspace_exec(p)
                   2450:        struct proc *p;
1.1       mrg      2451: {
1.10      mrg      2452:        struct vmspace *nvm, *ovm = p->p_vmspace;
                   2453:        vm_map_t map = &ovm->vm_map;
1.13      thorpej  2454:        int s;
1.1       mrg      2455:
                   2456: #ifdef sparc
1.10      mrg      2457:        /* XXX cgd 960926: the sparc #ifdef should be a MD hook */
                   2458:        kill_user_windows(p);   /* before stack addresses go away */
1.1       mrg      2459: #endif
                   2460:
1.10      mrg      2461:        /*
                   2462:         * see if more than one process is using this vmspace...
                   2463:         */
1.1       mrg      2464:
1.10      mrg      2465:        if (ovm->vm_refcnt == 1) {
1.1       mrg      2466:
1.10      mrg      2467:                /*
                   2468:                 * if p is the only process using its vmspace then we can safely
                   2469:                 * recycle that vmspace for the program that is being exec'd.
                   2470:                 */
1.1       mrg      2471:
                   2472: #ifdef SYSVSHM
1.10      mrg      2473:                /*
                   2474:                 * SYSV SHM semantics require us to kill all segments on an exec
                   2475:                 */
                   2476:                if (ovm->vm_shm)
                   2477:                        shmexit(ovm);
                   2478: #endif
                   2479:
                   2480:                /*
                   2481:                 * now unmap the old program
                   2482:                 */
                   2483:                uvm_unmap(map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS, 0);
                   2484:
                   2485:        } else {
                   2486:
                   2487:                /*
                   2488:                 * p's vmspace is being shared, so we can't reuse it for p since
                   2489:                 * it is still being used for others.   allocate a new vmspace
                   2490:                 * for p
                   2491:                 */
                   2492:                nvm = uvmspace_alloc(map->min_offset, map->max_offset,
1.1       mrg      2493:                         map->entries_pageable);
                   2494:
1.17      matthias 2495: #if (defined(i386) || defined(pc532)) && !defined(PMAP_NEW)
1.10      mrg      2496:                /*
                   2497:                 * allocate zero fill area in the new vmspace's map for user
                   2498:                 * page tables for ports that have old style pmaps that keep
                   2499:                 * user page tables in the top part of the process' address
                   2500:                 * space.
                   2501:                 *
                   2502:                 * XXXCDC: this should go away once all pmaps are fixed
                   2503:                 */
                   2504:                {
1.22.2.1  eeh      2505:                        vaddr_t addr = VM_MAXUSER_ADDRESS;
1.10      mrg      2506:                        if (uvm_map(&nvm->vm_map, &addr, VM_MAX_ADDRESS - addr,
                   2507:                            NULL, UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
                   2508:                            UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_NORMAL,
                   2509:                            UVM_FLAG_FIXED|UVM_FLAG_COPYONW)) != KERN_SUCCESS)
                   2510:                                panic("vm_allocate of PT page area failed");
                   2511:                }
                   2512: #endif
                   2513:
                   2514:                /*
                   2515:                 * install new vmspace and drop our ref to the old one.
                   2516:                 */
                   2517:
1.13      thorpej  2518:                s = splhigh();
1.12      thorpej  2519:                pmap_deactivate(p);
1.10      mrg      2520:                p->p_vmspace = nvm;
                   2521:                pmap_activate(p);
1.13      thorpej  2522:                splx(s);
                   2523:
1.10      mrg      2524:                uvmspace_free(ovm);
                   2525:        }
1.1       mrg      2526: }
                   2527:
                   2528: /*
                   2529:  * uvmspace_free: free a vmspace data structure
                   2530:  *
                   2531:  * - XXX: no locking on vmspace
                   2532:  */
                   2533:
1.10      mrg      2534: void
                   2535: uvmspace_free(vm)
                   2536:        struct vmspace *vm;
1.1       mrg      2537: {
1.10      mrg      2538:        vm_map_entry_t dead_entries;
                   2539:        UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
1.1       mrg      2540:
1.10      mrg      2541:        UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0);
                   2542:        if (--vm->vm_refcnt == 0) {
                   2543:                /*
                   2544:                 * lock the map, to wait out all other references to it.  delete
                   2545:                 * all of the mappings and pages they hold, then call the pmap
                   2546:                 * module to reclaim anything left.
                   2547:                 */
                   2548:                vm_map_lock(&vm->vm_map);
                   2549:                if (vm->vm_map.nentries) {
                   2550:                        (void)uvm_unmap_remove(&vm->vm_map,
                   2551:                            vm->vm_map.min_offset, vm->vm_map.max_offset,
                   2552:                            TRUE, &dead_entries);
                   2553:                        if (dead_entries != NULL)
                   2554:                                uvm_unmap_detach(dead_entries, 0);
                   2555:                }
                   2556:                pmap_destroy(vm->vm_map.pmap);
                   2557:                vm->vm_map.pmap = NULL;
                   2558:                FREE(vm, M_VMMAP);
                   2559:        }
                   2560:        UVMHIST_LOG(maphist,"<- done", 0,0,0,0);
1.1       mrg      2561: }
                   2562:
                   2563: /*
                   2564:  *   F O R K   -   m a i n   e n t r y   p o i n t
                   2565:  */
                   2566: /*
                   2567:  * uvmspace_fork: fork a process' main map
                   2568:  *
                   2569:  * => create a new vmspace for child process from parent.
                   2570:  * => parent's map must not be locked.
                   2571:  */
                   2572:
1.10      mrg      2573: struct vmspace *
                   2574: uvmspace_fork(vm1)
                   2575:        struct vmspace *vm1;
                   2576: {
                   2577:        struct vmspace *vm2;
                   2578:        vm_map_t        old_map = &vm1->vm_map;
                   2579:        vm_map_t        new_map;
                   2580:        vm_map_entry_t  old_entry;
                   2581:        vm_map_entry_t  new_entry;
                   2582:        pmap_t          new_pmap;
1.14      chuck    2583:        boolean_t       protect_child;
1.10      mrg      2584:        UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
1.1       mrg      2585:
1.17      matthias 2586: #if (defined(i386) || defined(pc532)) && !defined(PMAP_NEW)
1.10      mrg      2587:        /*
                   2588:         * avoid copying any of the parent's pagetables or other per-process
                   2589:         * objects that reside in the map by marking all of them non-inheritable
                   2590:         * XXXCDC: should go away
                   2591:         */
                   2592:        (void) uvm_map_inherit(old_map, VM_MAXUSER_ADDRESS, VM_MAX_ADDRESS,
1.1       mrg      2593:                         VM_INHERIT_NONE);
                   2594: #endif
                   2595:
1.10      mrg      2596:        vm_map_lock(old_map);
1.1       mrg      2597:
1.10      mrg      2598:        vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset,
1.1       mrg      2599:                      old_map->entries_pageable);
1.10      mrg      2600:        bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
1.1       mrg      2601:        (caddr_t) (vm1 + 1) - (caddr_t) &vm1->vm_startcopy);
1.10      mrg      2602:        new_map = &vm2->vm_map;           /* XXX */
                   2603:        new_pmap = new_map->pmap;
                   2604:
                   2605:        old_entry = old_map->header.next;
                   2606:
                   2607:        /*
                   2608:         * go entry-by-entry
                   2609:         */
1.1       mrg      2610:
1.10      mrg      2611:        while (old_entry != &old_map->header) {
1.1       mrg      2612:
1.10      mrg      2613:                /*
                   2614:                 * first, some sanity checks on the old entry
                   2615:                 */
                   2616:                if (UVM_ET_ISSUBMAP(old_entry))
                   2617:        panic("fork: encountered a submap during fork (illegal)");
                   2618:                else if (UVM_ET_ISMAP(old_entry)) {
                   2619:                        if (UVM_ET_ISNEEDSCOPY(old_entry))
1.1       mrg      2620:        panic("fork: encountered a share map entry that needs_copy (illegal)");
1.10      mrg      2621:                        if (UVM_ET_ISCOPYONWRITE(old_entry))
1.1       mrg      2622:        panic("fork: encountered a copy_on_write share map entry (illegal)");
1.10      mrg      2623:                        if (old_entry->aref.ar_amap)
1.1       mrg      2624:        panic("fork: detected share map entry that has an amap (illegal)");
1.10      mrg      2625:                } else {
                   2626:                        if (!UVM_ET_ISCOPYONWRITE(old_entry) &&
                   2627:                            UVM_ET_ISNEEDSCOPY(old_entry))
1.1       mrg      2628:        panic("fork: non-copy_on_write map entry marked needs_copy (illegal)");
1.10      mrg      2629:                }
1.1       mrg      2630:
                   2631:
1.10      mrg      2632:                switch (old_entry->inheritance) {
                   2633:                case VM_INHERIT_NONE:
                   2634:                        /*
                   2635:                         * drop the mapping
                   2636:                         */
                   2637:                        break;
                   2638:
                   2639:                case VM_INHERIT_SHARE:
                   2640:                        /*
                   2641:                         * share the mapping: this means we want the old and
                   2642:                         * new entries to share amaps and backing objects.
                   2643:                         */
                   2644:
                   2645:                        /*
                   2646:                         * if the old_entry needs a new amap (due to prev fork)
                   2647:                         * then we need to allocate it now so that we have
                   2648:                         * something we own to share with the new_entry.   [in
                   2649:                         * other words, we need to clear needs_copy]
                   2650:                         */
                   2651:
                   2652:                        if (UVM_ET_ISNEEDSCOPY(old_entry)) {
                   2653:                                /* get our own amap, clears needs_copy */
                   2654:                                amap_copy(old_map, old_entry, M_WAITOK, FALSE,
                   2655:                                    0, 0);
                   2656:                                /* XXXCDC: WAITOK??? */
                   2657:                        }
                   2658:
                   2659:                        new_entry = uvm_mapent_alloc(new_map);
                   2660:                        /* old_entry -> new_entry */
                   2661:                        uvm_mapent_copy(old_entry, new_entry);
                   2662:
                   2663:                        /* new pmap has nothing wired in it */
                   2664:                        new_entry->wired_count = 0;
                   2665:
                   2666:                        /*
                   2667:                         * gain reference to objects backing the map
                   2668:                         */
                   2669:                        if (UVM_ET_ISMAP(new_entry)) {   /* share map? */
                   2670:                                uvm_map_reference(old_entry->object.share_map);
                   2671:                        } else {
                   2672:                        if (new_entry->aref.ar_amap)
                   2673:                                /* share reference */
                   2674:                                amap_ref(new_entry, AMAP_SHARED);
                   2675:
                   2676:                        if (new_entry->object.uvm_obj &&
                   2677:                            new_entry->object.uvm_obj->pgops->pgo_reference)
                   2678:                                new_entry->object.uvm_obj->
                   2679:                                    pgops->pgo_reference(
                   2680:                                        new_entry->object.uvm_obj);
                   2681:                        }
                   2682:
                   2683:                        /* insert entry at end of new_map's entry list */
                   2684:                        uvm_map_entry_link(new_map, new_map->header.prev,
                   2685:                            new_entry);
                   2686:
                   2687:                        /*
                   2688:                         * pmap_copy the mappings: this routine is optional
                   2689:                         * but if it is there it will reduce the number of
                   2690:                         * page faults in the new proc.
                   2691:                         */
                   2692:
                   2693:                        pmap_copy(new_pmap, old_map->pmap, new_entry->start,
                   2694:                            (old_entry->end - old_entry->start),
                   2695:                            old_entry->start);
                   2696:
                   2697:                        break;
                   2698:
                   2699:                case VM_INHERIT_COPY:
                   2700:
                   2701:                        /*
                   2702:                         * copy-on-write the mapping (using mmap's
                   2703:                         * MAP_PRIVATE semantics)
                   2704:                         */
                   2705:
                   2706:                        /*
                   2707:                         * share maps: we special case it (handled by
                   2708:                         * uvm_map_sharemapcopy)
                   2709:                         */
                   2710:
                   2711:                        if (UVM_ET_ISMAP(old_entry)) {   /* share map? */
                   2712:                                uvm_map_sharemapcopy(old_map, old_entry,
                   2713:                                    new_map);
                   2714:                                break;
                   2715:                        }
                   2716:
                   2717:                        /*
                   2718:                         * not a share map.   allocate new_entry, adjust
                   2719:                         * reference counts.  (note that new references
                   2720:                         * are read-only).
                   2721:                         */
                   2722:
                   2723:                        new_entry = uvm_mapent_alloc(new_map);
                   2724:                        /* old_entry -> new_entry */
                   2725:                        uvm_mapent_copy(old_entry, new_entry);
                   2726:
                   2727:                        if (new_entry->aref.ar_amap)
                   2728:                                amap_ref(new_entry, 0);
                   2729:
                   2730:                        if (new_entry->object.uvm_obj &&
                   2731:                            new_entry->object.uvm_obj->pgops->pgo_reference)
                   2732:                                new_entry->object.uvm_obj->pgops->pgo_reference
                   2733:                                    (new_entry->object.uvm_obj);
                   2734:
                   2735:                        /* new pmap has nothing wired in it */
                   2736:                        new_entry->wired_count = 0;
                   2737:
                   2738:                        new_entry->etype |=
                   2739:                            (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
                   2740:                        uvm_map_entry_link(new_map, new_map->header.prev,
                   2741:                            new_entry);
                   2742:
1.14      chuck    2743:                        /*
1.10      mrg      2744:                         * the new entry will need an amap.  it will either
                   2745:                         * need to be copied from the old entry or created
1.14      chuck    2746:                         * from scratch (if the old entry does not have an
                   2747:                         * amap).  can we defer this process until later
                   2748:                         * (by setting "needs_copy") or do we need to copy
                   2749:                         * the amap now?
1.10      mrg      2750:                         *
1.14      chuck    2751:                         * we must copy the amap now if any of the following
1.10      mrg      2752:                         * conditions hold:
1.14      chuck    2753:                         * 1. the old entry has an amap and that amap is
                   2754:                         *    being shared.  this means that the old (parent)
                   2755:                         *    process is sharing the amap with another
                   2756:                         *    process.  if we do not clear needs_copy here
                   2757:                         *    we will end up in a situation where both the
                   2758:                         *    parent and child process are refering to the
                   2759:                         *    same amap with "needs_copy" set.  if the
                   2760:                         *    parent write-faults, the fault routine will
                   2761:                         *    clear "needs_copy" in the parent by allocating
                   2762:                         *    a new amap.   this is wrong because the
                   2763:                         *    parent is supposed to be sharing the old amap
                   2764:                         *    and the new amap will break that.
1.10      mrg      2765:                         *
1.14      chuck    2766:                         * 2. if the old entry has an amap and a non-zero
                   2767:                         *    wire count then we are going to have to call
                   2768:                         *    amap_cow_now to avoid page faults in the
                   2769:                         *    parent process.   since amap_cow_now requires
                   2770:                         *    "needs_copy" to be clear we might as well
                   2771:                         *    clear it here as well.
1.10      mrg      2772:                         *
                   2773:                         */
                   2774:
1.14      chuck    2775:                        if (old_entry->aref.ar_amap != NULL) {
                   2776:
                   2777:                          if ((old_entry->aref.ar_amap->am_flags &
                   2778:                               AMAP_SHARED) != 0 ||
                   2779:                              old_entry->wired_count != 0) {
                   2780:
                   2781:                            amap_copy(new_map, new_entry, M_WAITOK, FALSE,
                   2782:                                      0, 0);
                   2783:                            /* XXXCDC: M_WAITOK ... ok? */
                   2784:                          }
1.10      mrg      2785:                        }
1.14      chuck    2786:
1.10      mrg      2787:                        /*
1.14      chuck    2788:                         * if the parent's entry is wired down, then the
                   2789:                         * parent process does not want page faults on
                   2790:                         * access to that memory.  this means that we
                   2791:                         * cannot do copy-on-write because we can't write
                   2792:                         * protect the old entry.   in this case we
                   2793:                         * resolve all copy-on-write faults now, using
                   2794:                         * amap_cow_now.   note that we have already
                   2795:                         * allocated any needed amap (above).
1.10      mrg      2796:                         */
                   2797:
1.14      chuck    2798:                        if (old_entry->wired_count != 0) {
1.1       mrg      2799:
1.14      chuck    2800:                          /*
                   2801:                           * resolve all copy-on-write faults now
                   2802:                           * (note that there is nothing to do if
                   2803:                           * the old mapping does not have an amap).
                   2804:                           * XXX: is it worthwhile to bother with pmap_copy
                   2805:                           * in this case?
                   2806:                           */
                   2807:                          if (old_entry->aref.ar_amap)
                   2808:                            amap_cow_now(new_map, new_entry);
                   2809:
                   2810:                        } else {
                   2811:
                   2812:                          /*
                   2813:                           * setup mappings to trigger copy-on-write faults
                   2814:                           * we must write-protect the parent if it has
                   2815:                           * an amap and it is not already "needs_copy"...
                   2816:                           * if it is already "needs_copy" then the parent
                   2817:                           * has already been write-protected by a previous
                   2818:                           * fork operation.
                   2819:                           *
                   2820:                           * if we do not write-protect the parent, then
                   2821:                           * we must be sure to write-protect the child
                   2822:                           * after the pmap_copy() operation.
                   2823:                           *
                   2824:                           * XXX: pmap_copy should have some way of telling
                   2825:                           * us that it didn't do anything so we can avoid
                   2826:                           * calling pmap_protect needlessly.
                   2827:                           */
                   2828:
                   2829:                          if (old_entry->aref.ar_amap) {
                   2830:
                   2831:                            if (!UVM_ET_ISNEEDSCOPY(old_entry)) {
                   2832:                              if (old_entry->max_protection & VM_PROT_WRITE) {
                   2833:                                pmap_protect(old_map->pmap,
                   2834:                                             old_entry->start,
                   2835:                                             old_entry->end,
                   2836:                                             old_entry->protection &
                   2837:                                             ~VM_PROT_WRITE);
                   2838:                              }
                   2839:                              old_entry->etype |= UVM_ET_NEEDSCOPY;
                   2840:                            }
                   2841:
                   2842:                            /*
                   2843:                             * parent must now be write-protected
                   2844:                             */
                   2845:                            protect_child = FALSE;
                   2846:                          } else {
                   2847:
                   2848:                            /*
                   2849:                             * we only need to protect the child if the
                   2850:                             * parent has write access.
                   2851:                             */
                   2852:                            if (old_entry->max_protection & VM_PROT_WRITE)
                   2853:                              protect_child = TRUE;
                   2854:                            else
                   2855:                              protect_child = FALSE;
                   2856:
                   2857:                          }
                   2858:
                   2859:                          /*
                   2860:                           * copy the mappings
                   2861:                           * XXX: need a way to tell if this does anything
                   2862:                           */
1.1       mrg      2863:
1.14      chuck    2864:                          pmap_copy(new_pmap, old_map->pmap,
1.10      mrg      2865:                                    new_entry->start,
1.14      chuck    2866:                                    (old_entry->end - old_entry->start),
1.10      mrg      2867:                                    old_entry->start);
1.14      chuck    2868:
                   2869:                          /*
                   2870:                           * protect the child's mappings if necessary
                   2871:                           */
                   2872:                          if (protect_child) {
                   2873:                            pmap_protect(new_pmap, new_entry->start,
                   2874:                                         new_entry->end,
                   2875:                                         new_entry->protection &
                   2876:                                                  ~VM_PROT_WRITE);
                   2877:                          }
1.10      mrg      2878:
                   2879:                        }
                   2880:                        break;
1.14      chuck    2881:                }  /* end of switch statement */
1.10      mrg      2882:                old_entry = old_entry->next;
1.1       mrg      2883:        }
                   2884:
1.10      mrg      2885:        new_map->size = old_map->size;
                   2886:        vm_map_unlock(old_map);
1.1       mrg      2887:
1.17      matthias 2888: #if (defined(i386) || defined(pc532)) && !defined(PMAP_NEW)
1.10      mrg      2889:        /*
                   2890:         * allocate zero fill area in the new vmspace's map for user
                   2891:         * page tables for ports that have old style pmaps that keep
                   2892:         * user page tables in the top part of the process' address
                   2893:         * space.
                   2894:         *
                   2895:         * XXXCDC: this should go away once all pmaps are fixed
                   2896:         */
                   2897:        {
1.22.2.1  eeh      2898:                vaddr_t addr = VM_MAXUSER_ADDRESS;
1.10      mrg      2899:                if (uvm_map(new_map, &addr, VM_MAX_ADDRESS - addr, NULL,
                   2900:                    UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL,
                   2901:                    UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_NORMAL,
                   2902:                    UVM_FLAG_FIXED|UVM_FLAG_COPYONW)) != KERN_SUCCESS)
                   2903:                        panic("vm_allocate of PT page area failed");
                   2904:        }
1.1       mrg      2905: #endif
                   2906:
                   2907: #ifdef SYSVSHM
1.10      mrg      2908:        if (vm1->vm_shm)
                   2909:                shmfork(vm1, vm2);
1.1       mrg      2910: #endif
                   2911:
1.10      mrg      2912:        UVMHIST_LOG(maphist,"<- done",0,0,0,0);
                   2913:        return(vm2);
1.1       mrg      2914: }
                   2915:
                   2916:
                   2917: /*
                   2918:  * uvm_map_sharemapcopy: handle the copying of a share map during a
                   2919:  * fork.  this is a helper function for uvmspace_fork.  it is called
                   2920:  * when we are doing a fork and we have encountered a map entry which
                   2921:  * has two attributes: [1] its inherit code is VM_INHERIT_COPY, and
                   2922:  * [2] it points to a share map (i.e. is_a_map is true).  in this case
                   2923:  * we must traverse the area of the share map pointed to by the
                   2924:  * old_entry and make private copies of the map entries in the share
                   2925:  * map.  this is somewhat similar to what happens in the non-share map
                   2926:  * case in fork, but it has to handle multiple map entries which may
                   2927:  * not be the proper size.  it was seperated out into its own function
                   2928:  * in order to make the main body of the fork code easier to read and
                   2929:  * understand!
                   2930:  *
                   2931:  * main_entry->offset = starting VA in share map for our mapping
                   2932:  *
                   2933:  * => main map is locked by caller.
                   2934:  * => we lock share map.
                   2935:  * => new map isn't in use yet (still being set up for the first time).
                   2936:  */
                   2937:
1.10      mrg      2938: void
                   2939: uvm_map_sharemapcopy(main_map, main_entry, new_map)
                   2940:        vm_map_t main_map, new_map;
                   2941:        vm_map_entry_t main_entry;
                   2942: {
                   2943:        vm_map_t share_map = main_entry->object.share_map;
                   2944:        vm_map_entry_t share_entry, new_entry;
1.22.2.1  eeh      2945:        vaddr_t shend = main_entry->offset +
1.10      mrg      2946:                (main_entry->end - main_entry->start);
                   2947:        int refs;
                   2948:
                   2949:        /*
                   2950:         * lock share map.  find first map entry of interest.   clip if needed.
                   2951:         */
                   2952:
                   2953:        vm_map_lock(share_map);
                   2954:        if (uvm_map_lookup_entry(share_map, main_entry->offset, &share_entry))
                   2955:                UVM_MAP_CLIP_START(share_map, share_entry, main_entry->offset);
                   2956:
                   2957:        while (share_entry != &share_map->header &&
                   2958:            share_entry->start < shend) {
1.1       mrg      2959:
1.10      mrg      2960:                /*
                   2961:                 * at this point we have a map entry that we need to make a
                   2962:                 * copy of.
                   2963:                 */
1.1       mrg      2964:
1.10      mrg      2965:                /* may need to clip? */
                   2966:                UVM_MAP_CLIP_END(share_map, share_entry, shend);
                   2967:                new_entry = uvm_mapent_alloc(new_map);
                   2968:
                   2969:                /* share_entry -> new_entry */
                   2970:                uvm_mapent_copy(share_entry, new_entry);
                   2971:
                   2972:                /* convert share map addresses back to main map addresses */
                   2973:                new_entry->start = main_entry->start +
                   2974:                        (new_entry->start - main_entry->offset);
                   2975:                new_entry->end = main_entry->start +
                   2976:                    (new_entry->end - main_entry->offset);
                   2977:
                   2978:                /* gain references */
                   2979:                if (new_entry->aref.ar_amap) {
                   2980:                        amap_ref(new_entry, 0);
                   2981:                }
                   2982:                if (new_entry->object.uvm_obj &&
                   2983:                    new_entry->object.uvm_obj->pgops->pgo_reference)
                   2984:                        new_entry->object.uvm_obj->
                   2985:                            pgops->pgo_reference(new_entry->object.uvm_obj);
                   2986:
                   2987:                /* init rest of new entry and insert at end of new map */
                   2988:                new_entry->wired_count = 0;
                   2989:                new_entry->etype |= (UVM_ET_COPYONWRITE|UVM_ET_NEEDSCOPY);
                   2990:                uvm_map_entry_link(new_map, new_map->header.prev, new_entry);
                   2991:
                   2992:                /*
                   2993:                 * don't bother trying to defer the copy in the share map case
                   2994:                 */
                   2995:                /* XXXCDC: WAITOK? */
                   2996:                amap_copy(new_map, new_entry, M_WAITOK, FALSE, 0, 0);
                   2997:
                   2998:                /* just like non-share case: can't COW wired memory */
                   2999:                if (share_entry->wired_count != 0 &&
                   3000:                    UVM_ET_ISCOPYONWRITE(share_entry)) {
                   3001:                        amap_cow_now(new_map, new_entry);
                   3002:                } else {
                   3003:
                   3004:                        /* just like non-share case */
                   3005:                        if (UVM_ET_ISCOPYONWRITE(share_entry)) {
                   3006:
                   3007:                                if (!UVM_ET_ISNEEDSCOPY(share_entry)) {
                   3008:
                   3009:                                        /*
                   3010:                                         * must write protect pages.   if we
                   3011:                                         * have the sole reference to the share
                   3012:                                         * map we can use good old pmap_protect.
                   3013:                                         * if we don't, then we have to use
                   3014:                                         * pmap_page_protect.  note that the VA
                   3015:                                         * new_entry->start (starting entry of
                   3016:                                         * this segment of the share map in
                   3017:                                         * child process) is the same virtual
                   3018:                                         * address it is mapped in in the parent
                   3019:                                         * (thus we can mix main_map and
                   3020:                                         * new_entry in the pmap_protect call
                   3021:                                         * below).
                   3022:                                         */
                   3023:
                   3024:                                        simple_lock(&share_map->ref_lock);
                   3025:                                        refs = share_map->ref_count;
                   3026:                                        simple_unlock(&share_map->ref_lock);
                   3027:                                        if (refs == 1) {
                   3028:                                                pmap_protect(main_map->pmap,
                   3029:                                                    new_entry->start,
                   3030:                                                    new_entry->end,
                   3031:                                                share_entry->protection &
                   3032:                                                    ~VM_PROT_WRITE);
                   3033:                                        } else {
                   3034:                                                if (share_entry->aref.ar_amap) {
                   3035:                                        simple_lock(
                   3036:                                            &share_entry->aref.ar_amap->am_l);
                   3037:                                        amap_share_protect(share_entry,
                   3038:                                            share_entry->protection &
                   3039:                                            ~VM_PROT_WRITE);
                   3040:                                        simple_unlock(
                   3041:                                            &share_entry->aref.ar_amap->am_l);
                   3042:                                                }
                   3043:                                                if (share_entry->object.uvm_obj)
                   3044:                                                {
1.1       mrg      3045: #ifdef DIAGNOSTIC
1.10      mrg      3046:                                if (!share_entry->object.uvm_obj->pgops->
                   3047:                                    pgo_shareprot)
                   3048:                        panic("fork: share_entry with no prot function");
                   3049: #endif
                   3050:                                simple_lock(
                   3051:                                    &share_entry->object.uvm_obj->vmobjlock);
                   3052:                                share_entry->object.uvm_obj->pgops->
                   3053:                                    pgo_shareprot(share_entry,
                   3054:                                share_entry->protection & ~VM_PROT_WRITE);
                   3055:                                simple_unlock(
                   3056:                                    &share_entry->object.uvm_obj->vmobjlock);
                   3057:                                                }
                   3058:                                        }
                   3059:                                        share_entry->etype |= UVM_ET_NEEDSCOPY;
                   3060:                                }
                   3061:                        }
                   3062:
                   3063:                        /*
                   3064:                         * now copy the mappings: note address are the same
                   3065:                         * in both main_map and new_map
                   3066:                         */
                   3067:                        pmap_copy(new_map->pmap, main_map->pmap,
                   3068:                            new_entry->start,
                   3069:                            (new_entry->end - new_entry->start),
                   3070:                            new_entry->start);
                   3071:
                   3072:                        /* just like non-share case */
                   3073:                        if (!UVM_ET_ISCOPYONWRITE(share_entry)) {
                   3074:                                pmap_protect(new_map->pmap, new_entry->start,
                   3075:                                    new_entry->end,
                   3076:                                    new_entry->protection & ~VM_PROT_WRITE);
                   3077:                        }
                   3078:                }
1.1       mrg      3079:
1.10      mrg      3080:                /* next entry in share map, please */
                   3081:                share_entry = share_entry->next;
1.1       mrg      3082:
1.10      mrg      3083:        }
                   3084:        /* done! */
1.1       mrg      3085: }
                   3086:
                   3087: #if defined(DDB)
                   3088:
                   3089: /*
                   3090:  * DDB hooks
                   3091:  */
                   3092:
                   3093: /*
                   3094:  * uvm_map_print: print out a map
                   3095:  */
                   3096:
1.10      mrg      3097: void
                   3098: uvm_map_print(map, full)
                   3099:        vm_map_t map;
                   3100:        boolean_t full;
                   3101: {
1.1       mrg      3102:
1.10      mrg      3103:        uvm_map_printit(map, full, printf);
1.1       mrg      3104: }
                   3105:
                   3106: /*
                   3107:  * uvm_map_printit: actually prints the map
                   3108:  */
                   3109:
1.10      mrg      3110: void
                   3111: uvm_map_printit(map, full, pr)
                   3112:        vm_map_t map;
                   3113:        boolean_t full;
                   3114:        void (*pr) __P((const char *, ...));
                   3115: {
                   3116:        vm_map_entry_t entry;
                   3117:
                   3118:        (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, map->min_offset,map->max_offset);
1.16      chuck    3119:        (*pr)("\t#ent=%d, sz=%d, ref=%d, main=%c, version=%d\n",
                   3120:            map->nentries, map->size, map->ref_count,
1.10      mrg      3121:            (map->is_main_map) ? 'T' : 'F', map->timestamp);
1.16      chuck    3122: #ifdef pmap_resident_count
                   3123:        (*pr)("\tpmap=%p(resident=%d)\n", map->pmap,
                   3124:            pmap_resident_count(map->pmap));
                   3125: #else
                   3126:        /* XXXCDC: this should be required ... */
                   3127:        (*pr)("\tpmap=%p(resident=<<NOT SUPPORTED!!!>>)\n", map->pmap);
                   3128: #endif
1.10      mrg      3129:        if (!full)
                   3130:                return;
                   3131:        for (entry = map->header.next; entry != &map->header;
                   3132:            entry = entry->next) {
                   3133:                (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%x, amap=%p/%d\n",
                   3134:                    entry, entry->start, entry->end, entry->object.uvm_obj,
                   3135:                    entry->offset, entry->aref.ar_amap, entry->aref.ar_slotoff);
                   3136:                (*pr)(
1.1       mrg      3137: "\tmap=%c, submap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, wc=%d, adv=%d\n",
1.10      mrg      3138:                    (entry->etype & UVM_ET_MAP) ? 'T' : 'F',
                   3139:                    (entry->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
                   3140:                    (entry->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
                   3141:                    (entry->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
                   3142:                    entry->protection, entry->max_protection,
                   3143:                    entry->inheritance, entry->wired_count, entry->advice);
                   3144:        }
1.1       mrg      3145: }
                   3146:
                   3147: /*
                   3148:  * uvm_object_print: print out an object
                   3149:  */
                   3150:
1.10      mrg      3151: void
                   3152: uvm_object_print(uobj, full)
                   3153:        struct uvm_object *uobj;
                   3154:        boolean_t full;
                   3155: {
1.1       mrg      3156:
1.10      mrg      3157:        uvm_object_printit(uobj, full, printf);
1.1       mrg      3158: }
                   3159:
                   3160: /*
                   3161:  * uvm_object_printit: actually prints the object
                   3162:  */
                   3163:
1.10      mrg      3164: void
                   3165: uvm_object_printit(uobj, full, pr)
                   3166:        struct uvm_object *uobj;
                   3167:        boolean_t full;
                   3168:        void (*pr) __P((const char *, ...));
                   3169: {
                   3170:        struct vm_page *pg;
                   3171:        int cnt = 0;
                   3172:
                   3173:        (*pr)("OBJECT %p: pgops=%p, npages=%d, ", uobj, uobj->pgops,
                   3174:            uobj->uo_npages);
                   3175:        if (uobj->uo_refs == UVM_OBJ_KERN)
                   3176:                (*pr)("refs=<SYSTEM>\n");
                   3177:        else
                   3178:                (*pr)("refs=%d\n", uobj->uo_refs);
                   3179:
                   3180:        if (!full) return;
                   3181:        (*pr)("  PAGES <pg,offset>:\n  ");
                   3182:        for (pg = uobj->memq.tqh_first ; pg ; pg = pg->listq.tqe_next, cnt++) {
                   3183:                (*pr)("<%p,0x%lx> ", pg, pg->offset);
                   3184:                if ((cnt % 3) == 2) (*pr)("\n  ");
                   3185:        }
                   3186:        if ((cnt % 3) != 2) (*pr)("\n");
1.1       mrg      3187: }
                   3188:
                   3189: /*
                   3190:  * uvm_page_print: print out a page
                   3191:  */
                   3192:
1.10      mrg      3193: void
                   3194: uvm_page_print(pg, full)
                   3195:        struct vm_page *pg;
                   3196:        boolean_t full;
                   3197: {
1.1       mrg      3198:
1.10      mrg      3199:        uvm_page_printit(pg, full, printf);
1.1       mrg      3200: }
                   3201:
                   3202: /*
                   3203:  * uvm_page_printit: actually print the page
                   3204:  */
                   3205:
1.10      mrg      3206: void
                   3207: uvm_page_printit(pg, full, pr)
                   3208:        struct vm_page *pg;
                   3209:        boolean_t full;
                   3210:        void (*pr) __P((const char *, ...));
                   3211: {
                   3212:        struct vm_page *lcv;
                   3213:        struct uvm_object *uobj;
                   3214:        struct pglist *pgl;
1.1       mrg      3215:
1.10      mrg      3216:        (*pr)("PAGE %p:\n", pg);
                   3217:        (*pr)("  flags=0x%x, pqflags=0x%x, vers=%d, wire_count=%d, pa=0x%lx\n",
1.22.2.2! eeh      3218:        pg->flags, pg->pqflags, pg->version, pg->wire_count, (long)pg->phys_addr);
1.10      mrg      3219:        (*pr)("  uobject=%p, uanon=%p, offset=0x%lx loan_count=%d\n",
1.1       mrg      3220:        pg->uobject, pg->uanon, pg->offset, pg->loan_count);
                   3221: #if defined(UVM_PAGE_TRKOWN)
1.10      mrg      3222:        if (pg->flags & PG_BUSY)
                   3223:                (*pr)("  owning process = %d, tag=%s\n",
                   3224:                    pg->owner, pg->owner_tag);
                   3225:        else
                   3226:                (*pr)("  page not busy, no owner\n");
1.1       mrg      3227: #else
1.10      mrg      3228:        (*pr)("  [page ownership tracking disabled]\n");
1.1       mrg      3229: #endif
                   3230:
1.10      mrg      3231:        if (!full)
                   3232:                return;
                   3233:
                   3234:        /* cross-verify object/anon */
                   3235:        if ((pg->pqflags & PQ_FREE) == 0) {
                   3236:                if (pg->pqflags & PQ_ANON) {
                   3237:                        if (pg->uanon == NULL || pg->uanon->u.an_page != pg)
                   3238:                            (*pr)("  >>> ANON DOES NOT POINT HERE <<< (%p)\n",
                   3239:                                (pg->uanon) ? pg->uanon->u.an_page : NULL);
                   3240:                        else
                   3241:                                (*pr)("  anon backpointer is OK\n");
                   3242:                } else {
                   3243:                        uobj = pg->uobject;
                   3244:                        if (uobj) {
                   3245:                                (*pr)("  checking object list\n");
                   3246:                                for (lcv = uobj->memq.tqh_first ; lcv ;
                   3247:                                    lcv = lcv->listq.tqe_next) {
                   3248:                                        if (lcv == pg) break;
                   3249:                                }
                   3250:                                if (lcv)
                   3251:                                        (*pr)("  page found on object list\n");
                   3252:                                else
                   3253:                        (*pr)("  >>> PAGE NOT FOUND ON OBJECT LIST! <<<\n");
                   3254:                        }
                   3255:                }
                   3256:        }
1.1       mrg      3257:
1.10      mrg      3258:        /* cross-verify page queue */
                   3259:        if (pg->pqflags & PQ_FREE)
1.22      thorpej  3260:                pgl = &uvm.page_free[uvm_page_lookup_freelist(pg)];
1.10      mrg      3261:        else if (pg->pqflags & PQ_INACTIVE)
                   3262:                pgl = (pg->pqflags & PQ_SWAPBACKED) ?
                   3263:                    &uvm.page_inactive_swp : &uvm.page_inactive_obj;
                   3264:        else if (pg->pqflags & PQ_ACTIVE)
                   3265:                pgl = &uvm.page_active;
                   3266:        else
                   3267:                pgl = NULL;
                   3268:
                   3269:        if (pgl) {
                   3270:                (*pr)("  checking pageq list\n");
                   3271:                for (lcv = pgl->tqh_first ; lcv ; lcv = lcv->pageq.tqe_next) {
                   3272:                        if (lcv == pg) break;
                   3273:                }
                   3274:                if (lcv)
                   3275:                        (*pr)("  page found on pageq list\n");
                   3276:                else
                   3277:                        (*pr)("  >>> PAGE NOT FOUND ON PAGEQ LIST! <<<\n");
                   3278:        }
1.1       mrg      3279: }
                   3280: #endif

CVSweb <webmaster@jp.NetBSD.org>