version 1.114, 2008/01/02 11:49:16 |
version 1.115, 2008/01/28 12:22:47 |
|
|
uvm_cpu_attach(struct cpu_info *ci) |
uvm_cpu_attach(struct cpu_info *ci) |
{ |
{ |
|
|
mutex_init(&ci->ci_data.cpu_uarea_lock, MUTEX_DEFAULT, IPL_NONE); |
|
ci->ci_data.cpu_uarea_cnt = 0; |
|
ci->ci_data.cpu_uarea_list = 0; |
|
} |
} |
|
|
/* |
static int |
* uvm_uarea_alloc: allocate a u-area |
uarea_swapin(vaddr_t addr) |
*/ |
{ |
|
|
bool |
return uvm_fault_wire(kernel_map, addr, addr + USPACE, |
uvm_uarea_alloc(vaddr_t *uaddrp) |
VM_PROT_READ | VM_PROT_WRITE, 0); |
|
} |
|
|
|
static void |
|
uarea_swapout(vaddr_t addr) |
{ |
{ |
struct cpu_info *ci; |
|
vaddr_t uaddr; |
uvm_fault_unwire(kernel_map, addr, addr + USPACE); |
|
} |
|
|
#ifndef USPACE_ALIGN |
#ifndef USPACE_ALIGN |
#define USPACE_ALIGN 0 |
#define USPACE_ALIGN 0 |
#endif |
#endif |
|
|
ci = curcpu(); |
static pool_cache_t uvm_uarea_cache; |
|
|
if (ci->ci_data.cpu_uarea_cnt > 0) { |
static int |
mutex_enter(&ci->ci_data.cpu_uarea_lock); |
uarea_ctor(void *arg, void *obj, int flags) |
if (ci->ci_data.cpu_uarea_cnt == 0) { |
{ |
mutex_exit(&ci->ci_data.cpu_uarea_lock); |
|
} else { |
KASSERT((flags & PR_WAITOK) != 0); |
uaddr = ci->ci_data.cpu_uarea_list; |
return uarea_swapin((vaddr_t)obj); |
ci->ci_data.cpu_uarea_list = UAREA_NEXTFREE(uaddr); |
} |
ci->ci_data.cpu_uarea_cnt--; |
|
mutex_exit(&ci->ci_data.cpu_uarea_lock); |
static void * |
*uaddrp = uaddr; |
uarea_poolpage_alloc(struct pool *pp, int flags) |
return true; |
{ |
} |
|
} |
return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz, |
|
USPACE_ALIGN, UVM_KMF_PAGEABLE | |
|
((flags & PR_WAITOK) != 0 ? UVM_KMF_WAITVA : |
|
(UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK))); |
|
} |
|
|
|
static void |
|
uarea_poolpage_free(struct pool *pp, void *addr) |
|
{ |
|
|
*uaddrp = uvm_km_alloc(kernel_map, USPACE, USPACE_ALIGN, |
uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz, |
UVM_KMF_PAGEABLE); |
UVM_KMF_PAGEABLE); |
return false; |
} |
|
|
|
static struct pool_allocator uvm_uarea_allocator = { |
|
.pa_alloc = uarea_poolpage_alloc, |
|
.pa_free = uarea_poolpage_free, |
|
.pa_pagesz = USPACE, |
|
}; |
|
|
|
void |
|
uvm_uarea_init(void) |
|
{ |
|
|
|
uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, |
|
#if USPACE_ALIGN == 0 |
|
PR_NOALIGN | |
|
#endif |
|
PR_NOTOUCH, |
|
"uarea", &uvm_uarea_allocator, IPL_NONE, uarea_ctor, NULL, NULL); |
} |
} |
|
|
/* |
/* |
* uvm_uarea_free: free a u-area |
* uvm_uarea_alloc: allocate a u-area |
*/ |
*/ |
|
|
void |
bool |
uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci) |
uvm_uarea_alloc(vaddr_t *uaddrp) |
{ |
{ |
|
|
mutex_enter(&ci->ci_data.cpu_uarea_lock); |
*uaddrp = (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK); |
UAREA_NEXTFREE(uaddr) = ci->ci_data.cpu_uarea_list; |
return true; |
ci->ci_data.cpu_uarea_list = uaddr; |
|
ci->ci_data.cpu_uarea_cnt++; |
|
mutex_exit(&ci->ci_data.cpu_uarea_lock); |
|
} |
} |
|
|
/* |
/* |
* uvm_uarea_drain: return memory of u-areas over limit |
* uvm_uarea_free: free a u-area |
* back to system |
|
* |
|
* => if asked to drain as much as possible, drain all cpus. |
|
* => if asked to drain to low water mark, drain local cpu only. |
|
*/ |
*/ |
|
|
void |
void |
uvm_uarea_drain(bool empty) |
uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci) |
{ |
{ |
CPU_INFO_ITERATOR cii; |
|
struct cpu_info *ci; |
|
vaddr_t uaddr, nuaddr; |
|
int count; |
|
|
|
if (empty) { |
|
for (CPU_INFO_FOREACH(cii, ci)) { |
|
mutex_enter(&ci->ci_data.cpu_uarea_lock); |
|
count = ci->ci_data.cpu_uarea_cnt; |
|
uaddr = ci->ci_data.cpu_uarea_list; |
|
ci->ci_data.cpu_uarea_cnt = 0; |
|
ci->ci_data.cpu_uarea_list = 0; |
|
mutex_exit(&ci->ci_data.cpu_uarea_lock); |
|
|
|
while (count != 0) { |
|
nuaddr = UAREA_NEXTFREE(uaddr); |
|
uvm_km_free(kernel_map, uaddr, USPACE, |
|
UVM_KMF_PAGEABLE); |
|
uaddr = nuaddr; |
|
count--; |
|
} |
|
} |
|
return; |
|
} |
|
|
|
ci = curcpu(); |
pool_cache_put(uvm_uarea_cache, (void *)uaddr); |
if (ci->ci_data.cpu_uarea_cnt > UVM_NUAREA_HIWAT) { |
|
mutex_enter(&ci->ci_data.cpu_uarea_lock); |
|
while (ci->ci_data.cpu_uarea_cnt > UVM_NUAREA_LOWAT) { |
|
uaddr = ci->ci_data.cpu_uarea_list; |
|
ci->ci_data.cpu_uarea_list = UAREA_NEXTFREE(uaddr); |
|
ci->ci_data.cpu_uarea_cnt--; |
|
mutex_exit(&ci->ci_data.cpu_uarea_lock); |
|
uvm_km_free(kernel_map, uaddr, USPACE, |
|
UVM_KMF_PAGEABLE); |
|
mutex_enter(&ci->ci_data.cpu_uarea_lock); |
|
} |
|
mutex_exit(&ci->ci_data.cpu_uarea_lock); |
|
} |
|
} |
} |
|
|
/* |
/* |
Line 472 int swapdebug = 0; |
|
Line 455 int swapdebug = 0; |
|
void |
void |
uvm_swapin(struct lwp *l) |
uvm_swapin(struct lwp *l) |
{ |
{ |
vaddr_t addr; |
|
int error; |
int error; |
|
|
/* XXXSMP notyet KASSERT(mutex_owned(&l->l_swaplock)); */ |
/* XXXSMP notyet KASSERT(mutex_owned(&l->l_swaplock)); */ |
KASSERT(l != curlwp); |
KASSERT(l != curlwp); |
|
|
addr = USER_TO_UAREA(l->l_addr); |
error = uarea_swapin(USER_TO_UAREA(l->l_addr)); |
/* make L_INMEM true */ |
|
error = uvm_fault_wire(kernel_map, addr, addr + USPACE, |
|
VM_PROT_READ | VM_PROT_WRITE, 0); |
|
if (error) { |
if (error) { |
panic("uvm_swapin: rewiring stack failed: %d", error); |
panic("uvm_swapin: rewiring stack failed: %d", error); |
} |
} |
Line 758 uvm_swapout_threads(void) |
|
Line 737 uvm_swapout_threads(void) |
|
static void |
static void |
uvm_swapout(struct lwp *l) |
uvm_swapout(struct lwp *l) |
{ |
{ |
vaddr_t addr; |
|
struct proc *p = l->l_proc; |
struct proc *p = l->l_proc; |
|
|
KASSERT(mutex_owned(&l->l_swaplock)); |
KASSERT(mutex_owned(&l->l_swaplock)); |
Line 796 uvm_swapout(struct lwp *l) |
|
Line 774 uvm_swapout(struct lwp *l) |
|
/* |
/* |
* Unwire the to-be-swapped process's user struct and kernel stack. |
* Unwire the to-be-swapped process's user struct and kernel stack. |
*/ |
*/ |
addr = USER_TO_UAREA(l->l_addr); |
uarea_swapout(USER_TO_UAREA(l->l_addr)); |
uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !L_INMEM */ |
|
pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); |
pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); |
} |
} |
|
|