version 1.15, 1998/10/18 23:49:59 |
version 1.15.2.2, 1999/02/25 04:08:38 |
|
|
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/malloc.h> |
#include <sys/malloc.h> |
|
#include <sys/kernel.h> |
#include <sys/pool.h> |
#include <sys/pool.h> |
|
|
#include <vm/vm.h> |
#include <vm/vm.h> |
Line 176 struct pool uvm_aobj_pool; |
|
Line 177 struct pool uvm_aobj_pool; |
|
* local functions |
* local functions |
*/ |
*/ |
|
|
static void uao_init __P((void)); |
|
static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *, |
static struct uao_swhash_elt *uao_find_swhash_elt __P((struct uvm_aobj *, |
int, boolean_t)); |
int, boolean_t)); |
static int uao_find_swslot __P((struct uvm_aobj *, |
static int uao_find_swslot __P((struct uvm_aobj *, |
Line 190 static int uao_get __P((struct uvm_ob |
|
Line 190 static int uao_get __P((struct uvm_ob |
|
vm_prot_t, int, int)); |
vm_prot_t, int, int)); |
static boolean_t uao_releasepg __P((struct vm_page *, |
static boolean_t uao_releasepg __P((struct vm_page *, |
struct vm_page **)); |
struct vm_page **)); |
|
static boolean_t uao_pagein __P((struct uvm_aobj *, int, int)); |
|
static boolean_t uao_pagein_page __P((struct uvm_aobj *, int)); |
|
|
|
|
|
|
Line 200 static boolean_t uao_releasepg __P((st |
|
Line 202 static boolean_t uao_releasepg __P((st |
|
*/ |
*/ |
|
|
struct uvm_pagerops aobj_pager = { |
struct uvm_pagerops aobj_pager = { |
uao_init, /* init */ |
NULL, /* init */ |
NULL, /* attach */ |
NULL, /* attach */ |
uao_reference, /* reference */ |
uao_reference, /* reference */ |
uao_detach, /* detach */ |
uao_detach, /* detach */ |
Line 387 uao_set_swslot(uobj, pageidx, slot) |
|
Line 389 uao_set_swslot(uobj, pageidx, slot) |
|
pool_put(&uao_swhash_elt_pool, elt); |
pool_put(&uao_swhash_elt_pool, elt); |
} |
} |
} |
} |
|
|
} else { |
} else { |
/* we are using an array */ |
/* we are using an array */ |
oldslot = aobj->u_swslots[pageidx]; |
oldslot = aobj->u_swslots[pageidx]; |
|
|
{ |
{ |
int slot = elt->slots[j]; |
int slot = elt->slots[j]; |
|
|
if (slot) |
if (slot) { |
uvm_swap_free(slot, 1); |
uvm_swap_free(slot, 1); |
|
|
|
/* |
|
* this page is no longer |
|
* only in swap. |
|
*/ |
|
uvmexp.swpguniq--; |
|
} |
} |
} |
|
|
next = elt->list.le_next; |
next = elt->list.le_next; |
|
|
{ |
{ |
int slot = aobj->u_swslots[i]; |
int slot = aobj->u_swslots[i]; |
|
|
if (slot) |
if (slot) { |
uvm_swap_free(slot, 1); |
uvm_swap_free(slot, 1); |
|
|
|
/* this page is no longer only in swap. */ |
|
uvmexp.swpguniq--; |
|
} |
} |
} |
FREE(aobj->u_swslots, M_UVMAOBJ); |
FREE(aobj->u_swslots, M_UVMAOBJ); |
} |
} |
Line 477 uao_create(size, flags) |
|
Line 489 uao_create(size, flags) |
|
vsize_t size; |
vsize_t size; |
int flags; |
int flags; |
{ |
{ |
static struct uvm_aobj kernel_object_store; /* home of kernel_object */ |
static struct uvm_aobj kernel_object_store; /* home of kernel_object */ |
static int kobj_alloced = 0; /* not allocated yet */ |
static int kobj_alloced = 0; /* not allocated yet */ |
int pages = round_page(size) >> PAGE_SHIFT; |
int pages = round_page(size) >> PAGE_SHIFT; |
struct uvm_aobj *aobj; |
struct uvm_aobj *aobj; |
|
|
/* |
/* |
* malloc a new aobj unless we are asked for the kernel object |
* malloc a new aobj unless we are asked for the kernel object |
*/ |
*/ |
if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */ |
if (flags & UAO_FLAG_KERNOBJ) { /* want kernel object? */ |
if (kobj_alloced) |
if (kobj_alloced) |
panic("uao_create: kernel object already allocated"); |
panic("uao_create: kernel object already allocated"); |
Line 524 uao_create(size, flags) |
|
Line 536 uao_create(size, flags) |
|
M_NOWAIT : M_WAITOK; |
M_NOWAIT : M_WAITOK; |
|
|
/* allocate hash table or array depending on object size */ |
/* allocate hash table or array depending on object size */ |
if (UAO_USES_SWHASH(aobj)) { |
if (UAO_USES_SWHASH(aobj)) { |
aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj), |
aobj->u_swhash = hashinit(UAO_SWHASH_BUCKETS(aobj), |
M_UVMAOBJ, mflags, &aobj->u_swhashmask); |
M_UVMAOBJ, mflags, &aobj->u_swhashmask); |
if (aobj->u_swhash == NULL) |
if (aobj->u_swhash == NULL) |
Line 554 uao_create(size, flags) |
|
Line 566 uao_create(size, flags) |
|
|
|
/* |
/* |
* now that aobj is ready, add it to the global list |
* now that aobj is ready, add it to the global list |
* XXXCHS: uao_init hasn't been called'd in the KERNOBJ case, |
|
* do we really need the kernel object on this list anyway? |
|
*/ |
*/ |
simple_lock(&uao_list_lock); |
simple_lock(&uao_list_lock); |
LIST_INSERT_HEAD(&uao_list, aobj, u_list); |
LIST_INSERT_HEAD(&uao_list, aobj, u_list); |
Line 574 uao_create(size, flags) |
|
Line 584 uao_create(size, flags) |
|
* |
* |
* => called at boot time from uvm_pager_init() |
* => called at boot time from uvm_pager_init() |
*/ |
*/ |
static void |
void |
uao_init() |
uao_init() |
{ |
{ |
static int uao_initialized; |
static int uao_initialized; |
|
|
* uao_reference: add a ref to an aobj |
* uao_reference: add a ref to an aobj |
* |
* |
* => aobj must be unlocked (we will lock it) |
* => aobj must be unlocked (we will lock it) |
|
* just lock and call the locked version |
*/ |
*/ |
void |
void |
uao_reference(uobj) |
uao_reference(uobj) |
struct uvm_object *uobj; |
struct uvm_object *uobj; |
{ |
{ |
|
simple_lock(&uobj->vmobjlock); |
|
uao_reference_locked(uobj); |
|
simple_unlock(&uobj->vmobjlock); |
|
} |
|
|
|
/* |
|
* uao_reference_locked: add a ref to an aobj that is already locked |
|
* |
|
* => aobj must be locked |
|
*/ |
|
void |
|
uao_reference_locked(uobj) |
|
struct uvm_object *uobj; |
|
{ |
UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist); |
UVMHIST_FUNC("uao_reference"); UVMHIST_CALLED(maphist); |
|
|
/* |
/* |
Line 616 uao_reference(uobj) |
|
Line 641 uao_reference(uobj) |
|
if (uobj->uo_refs == UVM_OBJ_KERN) |
if (uobj->uo_refs == UVM_OBJ_KERN) |
return; |
return; |
|
|
simple_lock(&uobj->vmobjlock); |
|
uobj->uo_refs++; /* bump! */ |
uobj->uo_refs++; /* bump! */ |
UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", |
UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", |
uobj, uobj->uo_refs,0,0); |
uobj, uobj->uo_refs,0,0); |
simple_unlock(&uobj->vmobjlock); |
|
} |
} |
|
|
|
|
/* |
/* |
* uao_detach: drop a reference to an aobj |
* uao_detach: drop a reference to an aobj |
* |
* |
* => aobj must be unlocked, we will lock it |
* => aobj must be unlocked |
*/ |
*/ |
void |
void |
uao_detach(uobj) |
uao_detach(uobj) |
struct uvm_object *uobj; |
struct uvm_object *uobj; |
{ |
{ |
|
simple_lock(&uobj->vmobjlock); |
|
uao_detach_locked(uobj); |
|
} |
|
|
|
|
|
/* |
|
* uao_detach_locked: drop a reference to an aobj |
|
* |
|
* => aobj must be locked, and is unlocked (or freed) upon return. |
|
*/ |
|
void |
|
uao_detach_locked(uobj) |
|
struct uvm_object *uobj; |
|
{ |
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; |
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; |
struct vm_page *pg; |
struct vm_page *pg; |
boolean_t busybody; |
boolean_t busybody; |
Line 640 uao_detach(uobj) |
|
Line 678 uao_detach(uobj) |
|
/* |
/* |
* detaching from kernel_object is a noop. |
* detaching from kernel_object is a noop. |
*/ |
*/ |
if (uobj->uo_refs == UVM_OBJ_KERN) |
if (uobj->uo_refs == UVM_OBJ_KERN) { |
|
simple_unlock(&uobj->vmobjlock); |
return; |
return; |
|
} |
simple_lock(&uobj->vmobjlock); |
|
|
|
UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); |
UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); |
uobj->uo_refs--; /* drop ref! */ |
uobj->uo_refs--; /* drop ref! */ |
Line 661 uao_detach(uobj) |
|
Line 699 uao_detach(uobj) |
|
simple_unlock(&uao_list_lock); |
simple_unlock(&uao_list_lock); |
|
|
/* |
/* |
* free all the pages that aren't PG_BUSY, mark for release any that are. |
* free all the pages that aren't PG_BUSY, |
|
* mark for release any that are. |
*/ |
*/ |
|
|
busybody = FALSE; |
busybody = FALSE; |
for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) { |
for (pg = uobj->memq.tqh_first ; pg != NULL ; pg = pg->listq.tqe_next) { |
int swslot; |
int swslot; |
Line 761 uao_get(uobj, offset, pps, npagesp, cent |
|
Line 799 uao_get(uobj, offset, pps, npagesp, cent |
|
boolean_t done; |
boolean_t done; |
UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist); |
UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist); |
|
|
UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", aobj, offset, flags,0); |
UVMHIST_LOG(pdhist, "aobj=%p offset=%d, flags=%d", |
|
aobj, offset, flags,0); |
|
|
/* |
/* |
* get number of pages |
* get number of pages |
*/ |
*/ |
|
|
maxpages = *npagesp; |
maxpages = *npagesp; |
|
|
/* |
/* |
Line 774 uao_get(uobj, offset, pps, npagesp, cent |
|
Line 812 uao_get(uobj, offset, pps, npagesp, cent |
|
*/ |
*/ |
|
|
if (flags & PGO_LOCKED) { |
if (flags & PGO_LOCKED) { |
|
|
/* |
/* |
* step 1a: get pages that are already resident. only do |
* step 1a: get pages that are already resident. only do |
* this if the data structures are locked (i.e. the first |
* this if the data structures are locked (i.e. the first |
Line 984 uao_get(uobj, offset, pps, npagesp, cent |
|
Line 1021 uao_get(uobj, offset, pps, npagesp, cent |
|
rv,0,0,0); |
rv,0,0,0); |
if (ptmp->flags & PG_WANTED) |
if (ptmp->flags & PG_WANTED) |
/* object lock still held */ |
/* object lock still held */ |
thread_wakeup(ptmp); |
wakeup(ptmp); |
ptmp->flags &= ~(PG_WANTED|PG_BUSY); |
ptmp->flags &= ~(PG_WANTED|PG_BUSY); |
UVM_PAGE_OWN(ptmp, NULL); |
UVM_PAGE_OWN(ptmp, NULL); |
uvm_lock_pageq(); |
uvm_lock_pageq(); |
uvm_pagefree(ptmp); |
uvm_pagefree(ptmp); |
uvm_unlock_pageq(); |
uvm_unlock_pageq(); |
|
|
simple_unlock(&uobj->vmobjlock); |
simple_unlock(&uobj->vmobjlock); |
return (rv); |
return (rv); |
} |
} |
Line 1037 uao_get(uobj, offset, pps, npagesp, cent |
|
Line 1075 uao_get(uobj, offset, pps, npagesp, cent |
|
* => we kill the aobj if it is not referenced and we are suppose to |
* => we kill the aobj if it is not referenced and we are suppose to |
* kill it ("KILLME"). |
* kill it ("KILLME"). |
*/ |
*/ |
static boolean_t uao_releasepg(pg, nextpgp) |
static boolean_t |
|
uao_releasepg(pg, nextpgp) |
struct vm_page *pg; |
struct vm_page *pg; |
struct vm_page **nextpgp; /* OUT */ |
struct vm_page **nextpgp; /* OUT */ |
{ |
{ |
Line 1048 static boolean_t uao_releasepg(pg, nextp |
|
Line 1087 static boolean_t uao_releasepg(pg, nextp |
|
if ((pg->flags & PG_RELEASED) == 0) |
if ((pg->flags & PG_RELEASED) == 0) |
panic("uao_releasepg: page not released!"); |
panic("uao_releasepg: page not released!"); |
#endif |
#endif |
|
|
/* |
/* |
* dispose of the page [caller handles PG_WANTED] and swap slot. |
* dispose of the page [caller handles PG_WANTED] and swap slot. |
*/ |
*/ |
Line 1056 static boolean_t uao_releasepg(pg, nextp |
|
Line 1095 static boolean_t uao_releasepg(pg, nextp |
|
slot = uao_set_swslot(&aobj->u_obj, pg->offset >> PAGE_SHIFT, 0); |
slot = uao_set_swslot(&aobj->u_obj, pg->offset >> PAGE_SHIFT, 0); |
if (slot) |
if (slot) |
uvm_swap_free(slot, 1); |
uvm_swap_free(slot, 1); |
|
|
uvm_lock_pageq(); |
uvm_lock_pageq(); |
if (nextpgp) |
if (nextpgp) |
*nextpgp = pg->pageq.tqe_next; /* next page for daemon */ |
*nextpgp = pg->pageq.tqe_next; /* next page for daemon */ |
uvm_pagefree(pg); |
uvm_pagefree(pg); |
if (!nextpgp) |
if (!nextpgp) |
uvm_unlock_pageq(); /* keep locked for daemon */ |
uvm_unlock_pageq(); /* keep locked for daemon */ |
|
|
/* |
/* |
* if we're not killing the object, we're done. |
* if we're not killing the object, we're done. |
Line 1092 static boolean_t uao_releasepg(pg, nextp |
|
Line 1132 static boolean_t uao_releasepg(pg, nextp |
|
|
|
return FALSE; |
return FALSE; |
} |
} |
|
|
|
|
|
|
|
/* |
|
* page in every page in every aobj that is paged-out to a range of swslots. |
|
* returns TRUE if pagein was aborted due to lack of memory. |
|
* |
|
* nothing is locked. |
|
*/ |
|
boolean_t |
|
uao_swap_off(startslot, endslot) |
|
int startslot, endslot; |
|
{ |
|
struct uvm_aobj *aobj, *nextaobj; |
|
|
|
/* |
|
* walk the list of all aobjs |
|
*/ |
|
|
|
restart: |
|
simple_lock(&uao_list_lock); |
|
|
|
for (aobj = uao_list.lh_first; |
|
aobj != NULL; |
|
aobj = nextaobj) |
|
{ |
|
int rv; |
|
|
|
/* |
|
* try to get the object lock, |
|
* start all over if we fail. |
|
*/ |
|
if (!simple_lock_try(&aobj->u_obj.vmobjlock)) |
|
{ |
|
simple_unlock(&uao_list_lock); |
|
goto restart; |
|
} |
|
|
|
/* |
|
* add a ref to the aobj so it doesn't disappear |
|
* while we're working. |
|
*/ |
|
uao_reference_locked(&aobj->u_obj); |
|
|
|
/* |
|
* now it's safe to unlock the uao list. |
|
*/ |
|
simple_unlock(&uao_list_lock); |
|
|
|
/* |
|
* page in any pages in the swslot range. |
|
* if there's an error, abort and return the error. |
|
*/ |
|
rv = uao_pagein(aobj, startslot, endslot); |
|
if (rv) { |
|
uao_detach_locked(&aobj->u_obj); |
|
return rv; |
|
} |
|
|
|
/* |
|
* we're done with this aobj. |
|
* relock the list and drop our ref on the aobj. |
|
*/ |
|
simple_lock(&uao_list_lock); |
|
nextaobj = aobj->u_list.le_next; |
|
uao_detach_locked(&aobj->u_obj); |
|
} |
|
|
|
/* |
|
* done with traversal, unlock the list |
|
*/ |
|
simple_unlock(&uao_list_lock); |
|
return FALSE; |
|
} |
|
|
|
|
|
/* |
|
* page in any pages from aobj in the given range. |
|
* returns TRUE if pagein was aborted due to lack of memory. |
|
* |
|
* => aobj must be locked and is returned locked. |
|
*/ |
|
static boolean_t |
|
uao_pagein(aobj, startslot, endslot) |
|
struct uvm_aobj *aobj; |
|
int startslot, endslot; |
|
{ |
|
int rv; |
|
|
|
if (UAO_USES_SWHASH(aobj)) { |
|
struct uao_swhash_elt *elt; |
|
int bucket; |
|
|
|
restart: |
|
for (bucket = aobj->u_swhashmask; bucket >= 0; bucket--) |
|
for (elt = aobj->u_swhash[bucket].lh_first; |
|
elt != NULL; |
|
elt = elt->list.le_next) |
|
{ |
|
int i; |
|
|
|
|
|
for (i = 0; i < UAO_SWHASH_CLUSTER_SIZE; i++) { |
|
int slot = elt->slots[i]; |
|
|
|
/* |
|
* if the slot isn't in range, skip it. |
|
*/ |
|
if (slot < startslot || |
|
slot >= endslot) { |
|
continue; |
|
} |
|
|
|
/* |
|
* process the page, |
|
* the start over on this object* |
|
* since the swhash elt |
|
* may have been freed. |
|
*/ |
|
rv = uao_pagein_page(aobj, |
|
UAO_SWHASH_ELT_PAGEIDX_BASE(elt) + i); |
|
if (rv) { |
|
return rv; |
|
} |
|
goto restart; |
|
} |
|
} |
|
} |
|
else { |
|
int i; |
|
|
|
for (i = 0; i < aobj->u_pages; i++) { |
|
int slot = aobj->u_swslots[i]; |
|
|
|
/* |
|
* if the slot isn't in range, skip it |
|
*/ |
|
if (slot < startslot || slot >= endslot) { |
|
continue; |
|
} |
|
|
|
/* |
|
* process the page. |
|
*/ |
|
rv = uao_pagein_page(aobj, i); |
|
if (rv) { |
|
return rv; |
|
} |
|
} |
|
} |
|
|
|
return FALSE; |
|
} |
|
|
|
|
|
|
|
/* |
|
* page in a page from an aobj. used for swap_off. |
|
* returns TRUE if pagein was aborted due to lack of memory. |
|
* |
|
* => aobj must be locked and is returned locked. |
|
*/ |
|
static boolean_t |
|
uao_pagein_page(aobj, pageidx) |
|
struct uvm_aobj *aobj; |
|
int pageidx; |
|
{ |
|
struct vm_page *pg; |
|
int slot, rv; |
|
vaddr_t offset = pageidx << PAGE_SHIFT; |
|
UVMHIST_FUNC("uao_pagein_page"); UVMHIST_CALLED(pdhist); |
|
|
|
/* |
|
* check if page is resident. |
|
*/ |
|
restart: |
|
while ((pg = uvm_pagelookup(&aobj->u_obj, offset)) != NULL) { |
|
|
|
/* |
|
* page is already resident. |
|
* if the page is busy or released, |
|
* wait til it's not, then try again. |
|
*/ |
|
|
|
if ((pg->flags & (PG_BUSY|PG_RELEASED)) != 0) { |
|
pg->flags |= PG_WANTED; |
|
UVM_UNLOCK_AND_WAIT(pg, &aobj->u_obj.vmobjlock, FALSE, |
|
"uao_pagein", 0); |
|
simple_lock(&aobj->u_obj.vmobjlock); |
|
continue; |
|
} |
|
|
|
/* |
|
* page is ours. |
|
* mark it as dirty, |
|
* mark the aobj as no longer having this page in swap, |
|
* and free the swap slot. |
|
*/ |
|
|
|
pg->flags &= ~(PG_CLEAN); |
|
slot = uao_set_swslot(&aobj->u_obj, pageidx, 0); |
|
if (slot) { |
|
uvm_swap_free(slot, 1); |
|
} |
|
|
|
return FALSE; |
|
} |
|
|
|
/* |
|
* page is not resident. |
|
* try to allocate a page, start over if we fail. |
|
*/ |
|
while ((pg = uvm_pagealloc(&aobj->u_obj, offset, NULL)) == NULL) { |
|
boolean_t nomem; |
|
|
|
/* |
|
* if we are out of swap here, then we have to fail. |
|
*/ |
|
/* XXX what locks these? */ |
|
nomem = uvmexp.swpages <= uvmexp.swpginuse; |
|
|
|
if (nomem) { |
|
simple_unlock(&aobj->u_obj.vmobjlock); |
|
return TRUE; |
|
} |
|
|
|
/* |
|
* otherwise wait for free pages. |
|
*/ |
|
simple_unlock(&aobj->u_obj.vmobjlock); |
|
uvm_wait("uao_pagein_page"); |
|
simple_lock(&aobj->u_obj.vmobjlock); |
|
goto restart; |
|
} |
|
|
|
pg->pqflags |= PQ_AOBJ; |
|
|
|
/* |
|
* fetch the page from swap. |
|
* everything is unlocked. |
|
*/ |
|
slot = uao_find_swslot(aobj, pageidx); |
|
simple_unlock(&aobj->u_obj.vmobjlock); |
|
rv = uvm_swap_get(pg, slot, PGO_SYNCIO); |
|
|
|
switch (rv) |
|
{ |
|
case VM_PAGER_OK: |
|
break; |
|
|
|
case VM_PAGER_AGAIN: |
|
/* |
|
* sleep a bit, then try again. |
|
*/ |
|
tsleep(&lbolt, PVM, "uao_pagein", 0); |
|
simple_lock(&aobj->u_obj.vmobjlock); |
|
goto restart; |
|
default: |
|
panic("uao_pagein_page: uvm_swap_get -> %d\n", rv); |
|
} |
|
|
|
/* |
|
* relock to finish up. |
|
*/ |
|
simple_lock(&aobj->u_obj.vmobjlock); |
|
|
|
/* |
|
* handle wanted pages |
|
*/ |
|
if (pg->flags & PG_WANTED) { |
|
wakeup(pg); |
|
} |
|
|
|
#ifdef DIAGNOSTIC |
|
/* |
|
* this should never happen, since we have a reference on the aobj. |
|
*/ |
|
if (pg->flags & PG_RELEASED) { |
|
panic("uao_pagein_page: found PG_RELEASED page?\n"); |
|
} |
|
#endif |
|
|
|
/* |
|
* ok, we've got the page now. |
|
* mark it as dirty, clear its swslot and un-busy it. |
|
*/ |
|
slot = uao_set_swslot(&aobj->u_obj, pageidx, 0); |
|
if (slot) { |
|
uvm_swap_free(slot, 1); |
|
} |
|
pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); |
|
pmap_clear_reference(PMAP_PGARG(pg)); |
|
pg->flags &= ~(PG_BUSY|PG_CLEAN|PG_FAKE); |
|
UVM_PAGE_OWN(pg, NULL); |
|
|
|
/* |
|
* deactivate the page (to put it on a page queue). |
|
*/ |
|
uvm_lock_pageq(); |
|
uvm_pagedeactivate(pg); |
|
uvm_unlock_pageq(); |
|
|
|
return FALSE; |
|
} |