version 1.114, 2011/04/23 18:14:12 |
version 1.114.2.1, 2011/06/23 14:20:34 |
Line 312 uao_set_swslot(struct uvm_object *uobj, |
|
Line 312 uao_set_swslot(struct uvm_object *uobj, |
|
UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d", |
UVMHIST_LOG(pdhist, "aobj %p pageidx %d slot %d", |
aobj, pageidx, slot, 0); |
aobj, pageidx, slot, 0); |
|
|
KASSERT(mutex_owned(&uobj->vmobjlock) || uobj->uo_refs == 0); |
KASSERT(mutex_owned(uobj->vmobjlock) || uobj->uo_refs == 0); |
|
|
/* |
/* |
* if noswap flag is set, then we can't set a non-zero slot. |
* if noswap flag is set, then we can't set a non-zero slot. |
Line 391 uao_free(struct uvm_aobj *aobj) |
|
Line 391 uao_free(struct uvm_aobj *aobj) |
|
uao_dropswap_range1(aobj, 0, 0); |
uao_dropswap_range1(aobj, 0, 0); |
#endif /* defined(VMSWAP) */ |
#endif /* defined(VMSWAP) */ |
|
|
mutex_exit(&aobj->u_obj.vmobjlock); |
mutex_exit(aobj->u_obj.vmobjlock); |
|
|
#if defined(VMSWAP) |
#if defined(VMSWAP) |
if (UAO_USES_SWHASH(aobj)) { |
if (UAO_USES_SWHASH(aobj)) { |
Line 415 uao_free(struct uvm_aobj *aobj) |
|
Line 415 uao_free(struct uvm_aobj *aobj) |
|
* finally free the aobj itself |
* finally free the aobj itself |
*/ |
*/ |
|
|
UVM_OBJ_DESTROY(&aobj->u_obj); |
uvm_obj_destroy(&aobj->u_obj, true); |
kmem_free(aobj, sizeof(struct uvm_aobj)); |
kmem_free(aobj, sizeof(struct uvm_aobj)); |
} |
} |
|
|
Line 436 struct uvm_object * |
|
Line 436 struct uvm_object * |
|
uao_create(vsize_t size, int flags) |
uao_create(vsize_t size, int flags) |
{ |
{ |
static struct uvm_aobj kernel_object_store; |
static struct uvm_aobj kernel_object_store; |
|
static kmutex_t kernel_object_lock; |
static int kobj_alloced = 0; |
static int kobj_alloced = 0; |
pgoff_t pages = round_page(size) >> PAGE_SHIFT; |
pgoff_t pages = round_page(size) >> PAGE_SHIFT; |
struct uvm_aobj *aobj; |
struct uvm_aobj *aobj; |
Line 497 uao_create(vsize_t size, int flags) |
|
Line 498 uao_create(vsize_t size, int flags) |
|
} |
} |
|
|
/* |
/* |
* init aobj fields |
* Initialise UVM object. |
*/ |
*/ |
|
|
UVM_OBJ_INIT(&aobj->u_obj, &aobj_pager, refs); |
const bool kernobj = (flags & UAO_FLAG_KERNOBJ) != 0; |
|
uvm_obj_init(&aobj->u_obj, &aobj_pager, !kernobj, refs); |
|
if (__predict_false(kernobj)) { |
|
/* Initialisation only once, for UAO_FLAG_KERNOBJ. */ |
|
mutex_init(&kernel_object_lock, MUTEX_DEFAULT, IPL_NONE); |
|
uvm_obj_setlock(&aobj->u_obj, &kernel_object_lock); |
|
} |
|
|
/* |
/* |
* now that aobj is ready, add it to the global list |
* now that aobj is ready, add it to the global list |
Line 552 uao_reference(struct uvm_object *uobj) |
|
Line 559 uao_reference(struct uvm_object *uobj) |
|
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) |
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) |
return; |
return; |
|
|
mutex_enter(&uobj->vmobjlock); |
mutex_enter(uobj->vmobjlock); |
uao_reference_locked(uobj); |
uao_reference_locked(uobj); |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
} |
} |
|
|
/* |
/* |
Line 601 uao_detach(struct uvm_object *uobj) |
|
Line 608 uao_detach(struct uvm_object *uobj) |
|
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) |
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) |
return; |
return; |
|
|
mutex_enter(&uobj->vmobjlock); |
mutex_enter(uobj->vmobjlock); |
uao_detach_locked(uobj); |
uao_detach_locked(uobj); |
} |
} |
|
|
Line 626 uao_detach_locked(struct uvm_object *uob |
|
Line 633 uao_detach_locked(struct uvm_object *uob |
|
*/ |
*/ |
|
|
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) { |
if (UVM_OBJ_IS_KERN_OBJECT(uobj)) { |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
return; |
return; |
} |
} |
|
|
UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); |
UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); |
uobj->uo_refs--; |
uobj->uo_refs--; |
if (uobj->uo_refs) { |
if (uobj->uo_refs) { |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); |
UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); |
return; |
return; |
} |
} |
Line 659 uao_detach_locked(struct uvm_object *uob |
|
Line 666 uao_detach_locked(struct uvm_object *uob |
|
if (pg->flags & PG_BUSY) { |
if (pg->flags & PG_BUSY) { |
pg->flags |= PG_WANTED; |
pg->flags |= PG_WANTED; |
mutex_exit(&uvm_pageqlock); |
mutex_exit(&uvm_pageqlock); |
UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, false, |
UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, false, |
"uao_det", 0); |
"uao_det", 0); |
mutex_enter(&uobj->vmobjlock); |
mutex_enter(uobj->vmobjlock); |
mutex_enter(&uvm_pageqlock); |
mutex_enter(&uvm_pageqlock); |
continue; |
continue; |
} |
} |
Line 725 uao_put(struct uvm_object *uobj, voff_t |
|
Line 732 uao_put(struct uvm_object *uobj, voff_t |
|
voff_t curoff; |
voff_t curoff; |
UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist); |
UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist); |
|
|
KASSERT(mutex_owned(&uobj->vmobjlock)); |
KASSERT(mutex_owned(uobj->vmobjlock)); |
|
|
curoff = 0; |
curoff = 0; |
if (flags & PGO_ALLPAGES) { |
if (flags & PGO_ALLPAGES) { |
Line 757 uao_put(struct uvm_object *uobj, voff_t |
|
Line 764 uao_put(struct uvm_object *uobj, voff_t |
|
*/ |
*/ |
|
|
if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { |
if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
return 0; |
return 0; |
} |
} |
|
|
Line 813 uao_put(struct uvm_object *uobj, voff_t |
|
Line 820 uao_put(struct uvm_object *uobj, voff_t |
|
TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue); |
TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue); |
} |
} |
pg->flags |= PG_WANTED; |
pg->flags |= PG_WANTED; |
UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, |
UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, |
"uao_put", 0); |
"uao_put", 0); |
mutex_enter(&uobj->vmobjlock); |
mutex_enter(uobj->vmobjlock); |
if (by_list) { |
if (by_list) { |
nextpg = TAILQ_NEXT(&curmp, listq.queue); |
nextpg = TAILQ_NEXT(&curmp, listq.queue); |
TAILQ_REMOVE(&uobj->memq, &curmp, |
TAILQ_REMOVE(&uobj->memq, &curmp, |
Line 880 uao_put(struct uvm_object *uobj, voff_t |
|
Line 887 uao_put(struct uvm_object *uobj, voff_t |
|
if (by_list) { |
if (by_list) { |
TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue); |
TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue); |
} |
} |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
return 0; |
return 0; |
} |
} |
|
|
|
|
|
|
/* out of RAM? */ |
/* out of RAM? */ |
if (ptmp == NULL) { |
if (ptmp == NULL) { |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
UVMHIST_LOG(pdhist, |
UVMHIST_LOG(pdhist, |
"sleeping, ptmp == NULL\n",0,0,0,0); |
"sleeping, ptmp == NULL\n",0,0,0,0); |
uvm_wait("uao_getpage"); |
uvm_wait("uao_getpage"); |
mutex_enter(&uobj->vmobjlock); |
mutex_enter(uobj->vmobjlock); |
continue; |
continue; |
} |
} |
|
|
|
|
UVMHIST_LOG(pdhist, |
UVMHIST_LOG(pdhist, |
"sleeping, ptmp->flags 0x%x\n", |
"sleeping, ptmp->flags 0x%x\n", |
ptmp->flags,0,0,0); |
ptmp->flags,0,0,0); |
UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, |
UVM_UNLOCK_AND_WAIT(ptmp, uobj->vmobjlock, |
false, "uao_get", 0); |
false, "uao_get", 0); |
mutex_enter(&uobj->vmobjlock); |
mutex_enter(uobj->vmobjlock); |
continue; |
continue; |
} |
} |
|
|
|
|
* unlock object for i/o, relock when done. |
* unlock object for i/o, relock when done. |
*/ |
*/ |
|
|
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO); |
error = uvm_swap_get(ptmp, swslot, PGO_SYNCIO); |
mutex_enter(&uobj->vmobjlock); |
mutex_enter(uobj->vmobjlock); |
|
|
/* |
/* |
* I/O done. check for errors. |
* I/O done. check for errors. |
|
|
mutex_enter(&uvm_pageqlock); |
mutex_enter(&uvm_pageqlock); |
uvm_pagefree(ptmp); |
uvm_pagefree(ptmp); |
mutex_exit(&uvm_pageqlock); |
mutex_exit(&uvm_pageqlock); |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
return error; |
return error; |
} |
} |
#else /* defined(VMSWAP) */ |
#else /* defined(VMSWAP) */ |
|
|
*/ |
*/ |
|
|
done: |
done: |
mutex_exit(&uobj->vmobjlock); |
mutex_exit(uobj->vmobjlock); |
UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0); |
UVMHIST_LOG(pdhist, "<- done (OK)",0,0,0,0); |
return 0; |
return 0; |
} |
} |
|
|
* so this should be a rare case. |
* so this should be a rare case. |
*/ |
*/ |
|
|
if (!mutex_tryenter(&aobj->u_obj.vmobjlock)) { |
if (!mutex_tryenter(aobj->u_obj.vmobjlock)) { |
mutex_exit(&uao_list_lock); |
mutex_exit(&uao_list_lock); |
/* XXX Better than yielding but inadequate. */ |
/* XXX Better than yielding but inadequate. */ |
kpause("livelock", false, 1, NULL); |
kpause("livelock", false, 1, NULL); |
Line 1407 uao_pagein_page(struct uvm_aobj *aobj, i |
|
Line 1414 uao_pagein_page(struct uvm_aobj *aobj, i |
|
* relock and finish up. |
* relock and finish up. |
*/ |
*/ |
|
|
mutex_enter(&aobj->u_obj.vmobjlock); |
mutex_enter(aobj->u_obj.vmobjlock); |
switch (rv) { |
switch (rv) { |
case 0: |
case 0: |
break; |
break; |
Line 1462 uao_dropswap_range(struct uvm_object *uo |
|
Line 1469 uao_dropswap_range(struct uvm_object *uo |
|
{ |
{ |
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; |
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; |
|
|
KASSERT(mutex_owned(&uobj->vmobjlock)); |
KASSERT(mutex_owned(uobj->vmobjlock)); |
|
|
uao_dropswap_range1(aobj, start, end); |
uao_dropswap_range1(aobj, start, end); |
} |
} |