Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/uvm/uvm_map.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/uvm/uvm_map.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.246 retrieving revision 1.246.4.3 diff -u -p -r1.246 -r1.246.4.3 --- src/sys/uvm/uvm_map.c 2007/11/26 08:22:32 1.246 +++ src/sys/uvm/uvm_map.c 2007/12/13 05:06:05 1.246.4.3 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.c,v 1.246 2007/11/26 08:22:32 xtraeme Exp $ */ +/* $NetBSD: uvm_map.c,v 1.246.4.3 2007/12/13 05:06:05 yamt Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -71,7 +71,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.246 2007/11/26 08:22:32 xtraeme Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.246.4.3 2007/12/13 05:06:05 yamt Exp $"); #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -83,6 +83,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v #include #include #include +#include #include #include #include @@ -178,7 +179,7 @@ extern struct vm_map *pager_map; /* XXX #define VM_MAP_USE_KMAPENT_FLAGS(flags) \ (((flags) & VM_MAP_INTRSAFE) != 0) #define VM_MAP_USE_KMAPENT(map) \ - (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map) + (VM_MAP_USE_KMAPENT_FLAGS((map)->flags) /*|| (map) == kernel_map*/) /* * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging @@ -635,9 +636,12 @@ static struct vm_map_entry * uvm_mapent_alloc(struct vm_map *map, int flags) { struct vm_map_entry *me; +#if 0 int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK; +#endif UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); +#if 0 if (VM_MAP_USE_KMAPENT(map)) { me = uvm_kmapent_alloc(map, flags); } else { @@ -646,6 +650,14 @@ uvm_mapent_alloc(struct vm_map *map, int return NULL; me->flags = 0; } +#else + me = kmem_alloc(sizeof(*me), + (flags & UVM_FLAG_NOWAIT) ? KM_NOSLEEP : KM_SLEEP); + if (__predict_false(me == NULL)) { + return NULL; + } + me->flags = 0; +#endif UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me, ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0); @@ -693,11 +705,14 @@ uvm_mapent_free(struct vm_map_entry *me) UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", me, me->flags, 0, 0); +#if 0 if (me->flags & UVM_MAP_KERNEL) { uvm_kmapent_free(me); } else { pool_put(&uvm_map_entry_pool, me); } +#endif + kmem_free(me, sizeof(*me)); } /* @@ -1026,7 +1041,7 @@ uvm_map(struct vm_map *map, vaddr_t *sta new_entry = NULL; if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) || - map == pager_map) { + map == pager_map || map == kernel_map) { new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT)); if (__predict_false(new_entry == NULL)) return ENOMEM; @@ -1108,50 +1123,66 @@ retry: } vm_map_lock(map); /* could sleep here */ } - prev_entry = uvm_map_findspace(map, start, size, &start, - uobj, uoffset, align, flags); - if (prev_entry == NULL) { - unsigned int timestamp; - - timestamp = map->timestamp; - UVMHIST_LOG(maphist,"waiting va timestamp=0x%x", - timestamp,0,0,0); - map->flags |= VM_MAP_WANTVA; - vm_map_unlock(map); + if (map == kernel_map) { + KASSERT((flags & UVM_FLAG_FIXED) == 0); + if (align == 1) { + align = 0; /* XXX */ + } +#if defined(PMAP_PREFER) + XXX +#else /* defined(PMAP_PREFER) */ + start = (vaddr_t)vmem_xalloc(kernel_va_arena, size, align, + 0, 0, 0, 0, + ((flags & UVM_FLAG_NOWAIT) ? VM_NOSLEEP : VM_SLEEP) | + VM_INSTANTFIT); +#endif /* defined(PMAP_PREFER) */ + if (start == 0) { + return ENOMEM; + } + if (uvm_map_lookup_entry(map, start, &prev_entry)) { + panic("%s: va %p in use", __func__, (void *)start); + } + goto done; + } else { + prev_entry = uvm_map_findspace(map, start, size, &start, + uobj, uoffset, align, flags); + if (prev_entry == NULL) { + unsigned int timestamp; + + timestamp = map->timestamp; + UVMHIST_LOG(maphist,"waiting va timestamp=0x%x", + timestamp,0,0,0); + map->flags |= VM_MAP_WANTVA; + vm_map_unlock(map); - /* - * try to reclaim kva and wait until someone does unmap. - * fragile locking here, so we awaken every second to - * recheck the condition. - */ + /* + * try to reclaim kva and wait until someone does unmap. + * fragile locking here, so we awaken every second to + * recheck the condition. + */ - vm_map_drain(map, flags); + vm_map_drain(map, flags); - mutex_enter(&map->misc_lock); - while ((map->flags & VM_MAP_WANTVA) != 0 && - map->timestamp == timestamp) { - if ((flags & UVM_FLAG_WAITVA) == 0) { - mutex_exit(&map->misc_lock); - UVMHIST_LOG(maphist, - "<- uvm_map_findspace failed!", 0,0,0,0); - return ENOMEM; - } else { - cv_timedwait(&map->cv, &map->misc_lock, hz); + mutex_enter(&map->misc_lock); + while ((map->flags & VM_MAP_WANTVA) != 0 && + map->timestamp == timestamp) { + if ((flags & UVM_FLAG_WAITVA) == 0) { + mutex_exit(&map->misc_lock); + UVMHIST_LOG(maphist, + "<- uvm_map_findspace failed!", + 0,0,0,0); + return ENOMEM; + } else { + cv_timedwait(&map->cv, &map->misc_lock, + hz); + } } + mutex_exit(&map->misc_lock); + goto retry; } - mutex_exit(&map->misc_lock); - goto retry; } -#ifdef PMAP_GROWKERNEL - /* - * If the kernel pmap can't map the requested space, - * then allocate more resources for it. - */ - if (map == kernel_map && uvm_maxkaddr < (start + size)) - uvm_maxkaddr = pmap_growkernel(start + size); -#endif - +done: UVMMAP_EVCNT_INCR(map_call); /* @@ -1522,6 +1553,32 @@ done: } /* + * uvm_map_lookup_entry_bytree: lookup an entry in tree + */ + +static bool +uvm_map_lookup_entry_bytree(struct vm_map *map, vaddr_t address, + struct vm_map_entry **entry /* OUT */) +{ + struct vm_map_entry *prev = &map->header; + struct vm_map_entry *cur = RB_ROOT(&map->rbhead); + + while (cur) { + if (address >= cur->start) { + if (address < cur->end) { + *entry = cur; + return true; + } + prev = cur; + cur = RB_RIGHT(cur, rb_entry); + } else + cur = RB_LEFT(cur, rb_entry); + } + *entry = prev; + return false; +} + +/* * uvm_map_lookup_entry: find map entry at or before an address * * => map must at least be read-locked by caller @@ -1590,26 +1647,15 @@ uvm_map_lookup_entry(struct vm_map *map, uvm_map_check(map, __func__); if (use_tree) { - struct vm_map_entry *prev = &map->header; - cur = RB_ROOT(&map->rbhead); - /* * Simple lookup in the tree. Happens when the hint is * invalid, or nentries reach a threshold. */ - while (cur) { - if (address >= cur->start) { - if (address < cur->end) { - *entry = cur; - goto got; - } - prev = cur; - cur = RB_RIGHT(cur, rb_entry); - } else - cur = RB_LEFT(cur, rb_entry); + if (uvm_map_lookup_entry_bytree(map, address, entry)) { + goto got; + } else { + goto failed; } - *entry = prev; - goto failed; } /* @@ -2223,6 +2269,10 @@ uvm_unmap_remove(struct vm_map *map, vad } } #endif /* defined(DEBUG) */ + if (map == kernel_map) { + vmem_xfree(kernel_va_arena, (vmem_addr_t)entry->start, + entry->end - entry->start); + } /* * remove entry from map and put it on our list of entries @@ -3811,6 +3861,25 @@ uvm_map_checkprot(struct vm_map *map, va struct vm_map_entry *tmp_entry; if (!uvm_map_lookup_entry(map, start, &tmp_entry)) { + if (VM_MAP_IS_KERNEL(map)) { + /* + * XXX a kludge for mem(4). + * non-pageable mappings don't have + * corresponding entries in vm_map. + * + * maybe we can iterate segments in kernel_va_arena, + * but don't try too hard to be correct here, + * because uiomove() will pick an error anyway... + */ + vaddr_t va; + + for (va = start; va < end; va += PAGE_SIZE) { + if (!pmap_extract(pmap_kernel(), va, NULL)) { + return false; + } + } + return true; + } return (false); } entry = tmp_entry; @@ -5066,3 +5135,26 @@ vm_map_starved_p(struct vm_map *map) } return false; } + +#if defined(DDB) +void +uvm_whatis(uintptr_t addr, void (*pr)(const char *, ...)) +{ + struct vm_map *map; + + for (map = kernel_map;;) { + struct vm_map_entry *entry; + + if (!uvm_map_lookup_entry_bytree(map, (vaddr_t)addr, &entry)) { + break; + } + (*pr)("%p is %p+%zu from VMMAP %p\n", + (void *)addr, (void *)entry->start, + (size_t)(addr - (uintptr_t)entry->start), map); + if (!UVM_ET_ISSUBMAP(entry)) { + break; + } + map = entry->object.sub_map; + } +} +#endif /* defined(DDB) */