Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/uvm/uvm_map.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/uvm/uvm_map.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.331 retrieving revision 1.331.2.2 diff -u -p -r1.331 -r1.331.2.2 --- src/sys/uvm/uvm_map.c 2014/10/26 01:42:07 1.331 +++ src/sys/uvm/uvm_map.c 2015/09/22 12:06:17 1.331.2.2 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.c,v 1.331 2014/10/26 01:42:07 christos Exp $ */ +/* $NetBSD: uvm_map.c,v 1.331.2.2 2015/09/22 12:06:17 skrll Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.331 2014/10/26 01:42:07 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.331.2.2 2015/09/22 12:06:17 skrll Exp $"); #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -501,7 +501,7 @@ _uvm_map_sanity(struct vm_map *map) const struct vm_map_entry *e; struct vm_map_entry *hint = map->hint; - e = &map->header; + e = &map->header; for (;;) { if (map->first_free == e) { first_free_found = true; @@ -538,7 +538,7 @@ _uvm_tree_sanity(struct vm_map *map) for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { if (tmp->gap != uvm_rb_gap(tmp)) { - printf("%d/%d gap %lx != %lx %s\n", + printf("%d/%d gap %#lx != %#lx %s\n", n + 1, map->nentries, (ulong)tmp->gap, (ulong)uvm_rb_gap(tmp), tmp->next == &map->header ? "(last)" : ""); @@ -563,7 +563,7 @@ _uvm_tree_sanity(struct vm_map *map) trtmp = NULL; for (tmp = map->header.next; tmp != &map->header; tmp = tmp->next) { if (tmp->maxgap != uvm_rb_maxgap(tmp)) { - printf("maxgap %lx != %lx\n", + printf("maxgap %#lx != %#lx\n", (ulong)tmp->maxgap, (ulong)uvm_rb_maxgap(tmp)); goto error; @@ -763,7 +763,7 @@ uvm_mapent_alloc(struct vm_map *map, int } me->flags = 0; - UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me, + UVMHIST_LOG(maphist, "<- new entry=%p [kentry=%d]", me, (map == kernel_map), 0, 0); return me; } @@ -777,7 +777,7 @@ uvm_mapent_free(struct vm_map_entry *me) { UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]", + UVMHIST_LOG(maphist,"<- freeing map entry=%p [flags=%d]", me, me->flags, 0, 0); pool_cache_put(&uvm_map_entry_cache, me); } @@ -1078,7 +1078,7 @@ uvm_map(struct vm_map *map, vaddr_t *sta } #if defined(DEBUG) - if (!error && VM_MAP_IS_KERNEL(map)) { + if (!error && VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) { uvm_km_check_empty(map, *startp, *startp + size); } #endif /* defined(DEBUG) */ @@ -1105,9 +1105,9 @@ uvm_map_prepare(struct vm_map *map, vadd UVMHIST_FUNC("uvm_map_prepare"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)", + UVMHIST_LOG(maphist, "(map=%p, start=%#lx, size=%lu, flags=%#x)", map, start, size, flags); - UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); + UVMHIST_LOG(maphist, " uobj/offset %p/%ld", uobj, uoffset,0,0); /* * detect a popular device driver bug. @@ -1129,7 +1129,7 @@ uvm_map_prepare(struct vm_map *map, vadd */ if ((prot & maxprot) != prot) { - UVMHIST_LOG(maphist, "<- prot. failure: prot=0x%x, max=0x%x", + UVMHIST_LOG(maphist, "<- prot. failure: prot=%#x, max=%#x", prot, maxprot,0,0); return EACCES; } @@ -1150,7 +1150,7 @@ retry: unsigned int timestamp; timestamp = map->timestamp; - UVMHIST_LOG(maphist,"waiting va timestamp=0x%x", + UVMHIST_LOG(maphist,"waiting va timestamp=%#x", timestamp,0,0,0); map->flags |= VM_MAP_WANTVA; vm_map_unlock(map); @@ -1259,9 +1259,9 @@ uvm_map_enter(struct vm_map *map, const UVMHIST_FUNC("uvm_map_enter"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, start=0x%x, size=%d, flags=0x%x)", + UVMHIST_LOG(maphist, "(map=%p, start=%#lx, size=%lu, flags=%#x)", map, start, size, flags); - UVMHIST_LOG(maphist, " uobj/offset 0x%x/%d", uobj, uoffset,0,0); + UVMHIST_LOG(maphist, " uobj/offset %p/%ld", uobj, uoffset,0,0); KASSERT(map->hint == prev_entry); /* bimerge case assumes this */ KASSERT(vm_map_locked_p(map)); @@ -1609,7 +1609,7 @@ uvm_map_lookup_entry(struct vm_map *map, UVMHIST_FUNC("uvm_map_lookup_entry"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,addr=0x%x,ent=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,addr=%#lx,ent=%p)", map, address, entry, 0); /* @@ -1640,7 +1640,7 @@ uvm_map_lookup_entry(struct vm_map *map, if (cur != &map->header && cur->end > address) { UVMMAP_EVCNT_INCR(mlk_hint); *entry = cur; - UVMHIST_LOG(maphist,"<- got it via hint (0x%x)", + UVMHIST_LOG(maphist,"<- got it via hint (%p)", cur, 0, 0, 0); uvm_mapent_check(*entry); return (true); @@ -1688,7 +1688,7 @@ uvm_map_lookup_entry(struct vm_map *map, *entry = cur; got: SAVE_HINT(map, map->hint, *entry); - UVMHIST_LOG(maphist,"<- search got it (0x%x)", + UVMHIST_LOG(maphist,"<- search got it (%p)", cur, 0, 0, 0); KDASSERT((*entry)->start <= address); KDASSERT(address < (*entry)->end); @@ -1803,7 +1803,7 @@ uvm_map_findspace(struct vm_map *map, va UVMHIST_FUNC("uvm_map_findspace"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, hint=0x%x, len=%d, flags=0x%x)", + UVMHIST_LOG(maphist, "(map=%p, hint=%l#x, len=%lu, flags=%#x)", map, hint, length, flags); KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || (align & (align - 1)) == 0); KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors); @@ -1826,7 +1826,7 @@ uvm_map_findspace(struct vm_map *map, va hint = vm_map_min(map); } if (hint > vm_map_max(map)) { - UVMHIST_LOG(maphist,"<- VA 0x%x > range [0x%x->0x%x]", + UVMHIST_LOG(maphist,"<- VA %#lx > range [%#lx->%#lx]", hint, vm_map_min(map), vm_map_max(map), 0); return (NULL); } @@ -2101,7 +2101,7 @@ nextgap: found: SAVE_HINT(map, map->hint, entry); *result = hint; - UVMHIST_LOG(maphist,"<- got it! (result=0x%x)", hint, 0,0,0); + UVMHIST_LOG(maphist,"<- got it! (result=%#lx)", hint, 0,0,0); KASSERT( topdown || hint >= orig_hint); KASSERT(!topdown || hint <= orig_hint); KASSERT(entry->end <= hint); @@ -2140,7 +2140,7 @@ uvm_unmap_remove(struct vm_map *map, vad vaddr_t len; UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x, start=0x%x, end=0x%x)", + UVMHIST_LOG(maphist,"(map=%p, start=%#lx, end=%#lx)", map, start, end, 0); VM_MAP_RANGE_CHECK(map, start, end); @@ -2253,7 +2253,7 @@ uvm_unmap_remove(struct vm_map *map, vad } } - if (VM_MAP_IS_KERNEL(map)) { + if (VM_MAP_IS_KERNEL(map) && (flags & UVM_FLAG_NOWAIT) == 0) { uvm_km_check_empty(map, entry->start, entry->end); } @@ -2264,7 +2264,7 @@ uvm_unmap_remove(struct vm_map *map, vad * that we've nuked. then go to next entry. */ - UVMHIST_LOG(maphist, " removed map entry 0x%x", entry, 0, 0,0); + UVMHIST_LOG(maphist, " removed map entry %p", entry, 0, 0,0); /* critical! prevents stale hint */ SAVE_HINT(map, entry, entry->prev); @@ -2321,7 +2321,7 @@ uvm_unmap_detach(struct vm_map_entry *fi while (first_entry) { KASSERT(!VM_MAPENT_ISWIRED(first_entry)); UVMHIST_LOG(maphist, - " detach 0x%x: amap=0x%x, obj=0x%x, submap?=%d", + " detach %p: amap=%p, obj=%p, submap?=%d", first_entry, first_entry->aref.ar_amap, first_entry->object.uvm_obj, UVM_ET_ISSUBMAP(first_entry)); @@ -2373,7 +2373,7 @@ uvm_map_reserve(struct vm_map *map, vsiz { UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, offset=0x%x,addr=0x%x)", + UVMHIST_LOG(maphist, "(map=%p, size=%#lx, offset=%#lx, addr=%p)", map,size,offset,raddr); size = round_page(size); @@ -2389,7 +2389,7 @@ uvm_map_reserve(struct vm_map *map, vsiz return (false); } - UVMHIST_LOG(maphist, "<- done (*raddr=0x%x)", *raddr,0,0,0); + UVMHIST_LOG(maphist, "<- done (*raddr=%#lx)", *raddr,0,0,0); return (true); } @@ -2452,9 +2452,9 @@ uvm_map_replace(struct vm_map *map, vadd panic("uvm_map_replace1"); if (tmpent->start >= tmpent->end || tmpent->end > end) { panic("uvm_map_replace2: " - "tmpent->start=0x%"PRIxVADDR - ", tmpent->end=0x%"PRIxVADDR - ", end=0x%"PRIxVADDR, + "tmpent->start=%#"PRIxVADDR + ", tmpent->end=%#"PRIxVADDR + ", end=%#"PRIxVADDR, tmpent->start, tmpent->end, end); } cur = tmpent->end; @@ -2556,9 +2556,9 @@ uvm_map_extract(struct vm_map *srcmap, v vsize_t nsize; UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(srcmap=0x%x,start=0x%x, len=0x%x", srcmap, start, + UVMHIST_LOG(maphist,"(srcmap=%p,start=%#lx, len=%#lx", srcmap, start, len,0); - UVMHIST_LOG(maphist," ...,dstmap=0x%x, flags=0x%x)", dstmap,flags,0,0); + UVMHIST_LOG(maphist," ...,dstmap=%p, flags=%#x)", dstmap,flags,0,0); /* * step 0: sanity check: start must be on a page boundary, length @@ -2576,13 +2576,13 @@ uvm_map_extract(struct vm_map *srcmap, v if ((flags & UVM_EXTRACT_RESERVED) == 0) { dstaddr = vm_map_min(dstmap); - if (!uvm_map_reserve(dstmap, len, start, + if (!uvm_map_reserve(dstmap, len, start, atop(start) & uvmexp.colormask, &dstaddr, UVM_FLAG_COLORMATCH)) return (ENOMEM); KASSERT((atop(start ^ dstaddr) & uvmexp.colormask) == 0); *dstaddrp = dstaddr; /* pass address back to caller */ - UVMHIST_LOG(maphist, " dstaddr=0x%x", dstaddr,0,0,0); + UVMHIST_LOG(maphist, " dstaddr=%#lx", dstaddr,0,0,0); } else { dstaddr = *dstaddrp; } @@ -2945,7 +2945,7 @@ uvm_map_protect(struct vm_map *map, vadd struct vm_map_entry *current, *entry; int error = 0; UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_prot=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_prot=%#x)", map, start, end, new_prot); vm_map_lock(map); @@ -3090,7 +3090,7 @@ uvm_map_inherit(struct vm_map *map, vadd { struct vm_map_entry *entry, *temp_entry; UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_inh=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_inh=%#x)", map, start, end, new_inheritance); switch (new_inheritance) { @@ -3133,7 +3133,7 @@ uvm_map_advice(struct vm_map *map, vaddr { struct vm_map_entry *entry, *temp_entry; UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_adv=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_adv=%#x)", map, start, end, new_advice); vm_map_lock(map); @@ -3182,7 +3182,7 @@ uvm_map_willneed(struct vm_map *map, vad { struct vm_map_entry *entry; UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%lx,start=0x%lx,end=0x%lx)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx)", map, start, end, 0); vm_map_lock_read(map); @@ -3249,7 +3249,7 @@ uvm_map_pageable(struct vm_map *map, vad u_int timestamp_save; #endif UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,new_pageable=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,new_pageable=%u)", map, start, end, new_pageable); KASSERT(map->flags & VM_MAP_PAGEABLE); @@ -3497,7 +3497,7 @@ uvm_map_pageable_all(struct vm_map *map, u_int timestamp_save; #endif UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,flags=0x%x)", map, flags, 0, 0); + UVMHIST_LOG(maphist,"(map=%p,flags=%#x)", map, flags, 0, 0); KASSERT(map->flags & VM_MAP_PAGEABLE); @@ -3732,7 +3732,7 @@ uvm_map_clean(struct vm_map *map, vaddr_ int error, refs; UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(map=0x%x,start=0x%x,end=0x%x,flags=0x%x)", + UVMHIST_LOG(maphist,"(map=%p,start=%#lx,end=%#lx,flags=%#x)", map, start, end, flags); KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) != (PGO_FREE|PGO_DEACTIVATE)); @@ -3798,6 +3798,9 @@ uvm_map_clean(struct vm_map *map, vaddr_ if (pg == NULL) { continue; } + if (pg->flags & PG_BUSY) { + continue; + } switch (flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)) { @@ -3935,7 +3938,7 @@ uvmspace_alloc(vaddr_t vmin, vaddr_t vma vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK); uvmspace_init(vm, NULL, vmin, vmax, topdown); - UVMHIST_LOG(maphist,"<- done (vm=0x%x)", vm,0,0,0); + UVMHIST_LOG(maphist,"<- done (vm=%p)", vm,0,0,0); return (vm); } @@ -3951,6 +3954,10 @@ uvmspace_init(struct vmspace *vm, struct { UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist); + UVMHIST_LOG(maphist, "(vm=%p, pmap=%p, vmin=%#lx, vmax=%#lx", + vm, pmap, vmin, vmax); + UVMHIST_LOG(maphist, " topdown=%u)", topdown, 0, 0, 0); + memset(vm, 0, sizeof(*vm)); uvm_map_setup(&vm->vm_map, vmin, vmax, VM_MAP_PAGEABLE | (topdown ? VM_MAP_TOPDOWN : 0) @@ -4148,7 +4155,7 @@ uvmspace_free(struct vmspace *vm) UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist,"(vm=0x%x) ref=%d", vm, vm->vm_refcnt,0,0); + UVMHIST_LOG(maphist,"(vm=%p) ref=%d", vm, vm->vm_refcnt,0,0); mutex_enter(&map->misc_lock); n = --vm->vm_refcnt; mutex_exit(&map->misc_lock); @@ -4621,7 +4628,7 @@ uvm_unmap1(struct vm_map *map, vaddr_t s struct vm_map_entry *dead_entries; UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist); - UVMHIST_LOG(maphist, " (map=0x%x, start=0x%x, end=0x%x)", + UVMHIST_LOG(maphist, " (map=%p, start=%#lx, end=%#lx)", map, start, end, 0); if (map == kernel_map) { LOCKDEBUG_MEM_CHECK((void *)start, end - start); @@ -4705,9 +4712,9 @@ uvm_map_printit(struct vm_map *map, bool { struct vm_map_entry *entry; - (*pr)("MAP %p: [0x%lx->0x%lx]\n", map, vm_map_min(map), + (*pr)("MAP %p: [%#lx->%#lx]\n", map, vm_map_min(map), vm_map_max(map)); - (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=0x%x\n", + (*pr)("\t#ent=%d, sz=%d, ref=%d, version=%d, flags=%#x\n", map->nentries, map->size, map->ref_count, map->timestamp, map->flags); (*pr)("\tpmap=%p(resident=%ld, wired=%ld)\n", map->pmap, @@ -4716,7 +4723,7 @@ uvm_map_printit(struct vm_map *map, bool return; for (entry = map->header.next; entry != &map->header; entry = entry->next) { - (*pr)(" - %p: 0x%lx->0x%lx: obj=%p/0x%llx, amap=%p/%d\n", + (*pr)(" - %p: %#lx->%#lx: obj=%p/%#llx, amap=%p/%d\n", entry, entry->start, entry->end, entry->object.uvm_obj, (long long)entry->offset, entry->aref.ar_amap, entry->aref.ar_pageoff);