version 1.93, 2006/03/15 18:09:25 |
version 1.93.2.1, 2006/05/24 15:50:48 |
Line 95 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 95 __KERNEL_RCSID(0, "$NetBSD$"); |
|
static void uvm_swapout(struct lwp *); |
static void uvm_swapout(struct lwp *); |
|
|
#define UVM_NUAREA_MAX 16 |
#define UVM_NUAREA_MAX 16 |
void *uvm_uareas; |
static vaddr_t uvm_uareas; |
int uvm_nuarea; |
static int uvm_nuarea; |
struct simplelock uvm_uareas_slock = SIMPLELOCK_INITIALIZER; |
static struct simplelock uvm_uareas_slock = SIMPLELOCK_INITIALIZER; |
|
#define UAREA_NEXTFREE(uarea) (*(vaddr_t *)(UAREA_TO_USER(uarea))) |
|
|
static void uvm_uarea_free(vaddr_t); |
static void uvm_uarea_free(vaddr_t); |
|
|
|
|
uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, |
uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, |
void (*func)(void *), void *arg) |
void (*func)(void *), void *arg) |
{ |
{ |
struct user *up = l2->l_addr; |
|
int error; |
int error; |
|
|
/* |
/* |
Line 248 uvm_lwp_fork(struct lwp *l1, struct lwp |
|
Line 248 uvm_lwp_fork(struct lwp *l1, struct lwp |
|
*/ |
*/ |
|
|
if ((l2->l_flag & L_INMEM) == 0) { |
if ((l2->l_flag & L_INMEM) == 0) { |
error = uvm_fault_wire(kernel_map, (vaddr_t)up, |
vaddr_t uarea = USER_TO_UAREA(l2->l_addr); |
(vaddr_t)up + USPACE, VM_PROT_READ | VM_PROT_WRITE, 0); |
|
|
error = uvm_fault_wire(kernel_map, uarea, |
|
uarea + USPACE, VM_PROT_READ | VM_PROT_WRITE, 0); |
if (error) |
if (error) |
panic("uvm_lwp_fork: uvm_fault_wire failed: %d", error); |
panic("uvm_lwp_fork: uvm_fault_wire failed: %d", error); |
#ifdef PMAP_UAREA |
#ifdef PMAP_UAREA |
/* Tell the pmap this is a u-area mapping */ |
/* Tell the pmap this is a u-area mapping */ |
PMAP_UAREA((vaddr_t)up); |
PMAP_UAREA(uarea); |
#endif |
#endif |
l2->l_flag |= L_INMEM; |
l2->l_flag |= L_INMEM; |
} |
} |
Line 291 uvm_uarea_alloc(vaddr_t *uaddrp) |
|
Line 293 uvm_uarea_alloc(vaddr_t *uaddrp) |
|
|
|
simple_lock(&uvm_uareas_slock); |
simple_lock(&uvm_uareas_slock); |
if (uvm_nuarea > 0) { |
if (uvm_nuarea > 0) { |
uaddr = (vaddr_t)uvm_uareas; |
uaddr = uvm_uareas; |
uvm_uareas = *(void **)uvm_uareas; |
uvm_uareas = UAREA_NEXTFREE(uaddr); |
uvm_nuarea--; |
uvm_nuarea--; |
simple_unlock(&uvm_uareas_slock); |
simple_unlock(&uvm_uareas_slock); |
*uaddrp = uaddr; |
*uaddrp = uaddr; |
Line 313 static inline void |
|
Line 315 static inline void |
|
uvm_uarea_free(vaddr_t uaddr) |
uvm_uarea_free(vaddr_t uaddr) |
{ |
{ |
simple_lock(&uvm_uareas_slock); |
simple_lock(&uvm_uareas_slock); |
*(void **)uaddr = uvm_uareas; |
UAREA_NEXTFREE(uaddr) = uvm_uareas; |
uvm_uareas = (void *)uaddr; |
uvm_uareas = uaddr; |
uvm_nuarea++; |
uvm_nuarea++; |
simple_unlock(&uvm_uareas_slock); |
simple_unlock(&uvm_uareas_slock); |
} |
} |
Line 335 uvm_uarea_drain(boolean_t empty) |
|
Line 337 uvm_uarea_drain(boolean_t empty) |
|
|
|
simple_lock(&uvm_uareas_slock); |
simple_lock(&uvm_uareas_slock); |
while(uvm_nuarea > leave) { |
while(uvm_nuarea > leave) { |
uaddr = (vaddr_t)uvm_uareas; |
uaddr = uvm_uareas; |
uvm_uareas = *(void **)uvm_uareas; |
uvm_uareas = UAREA_NEXTFREE(uaddr); |
uvm_nuarea--; |
uvm_nuarea--; |
simple_unlock(&uvm_uareas_slock); |
simple_unlock(&uvm_uareas_slock); |
uvm_km_free(kernel_map, uaddr, USPACE, UVM_KMF_PAGEABLE); |
uvm_km_free(kernel_map, uaddr, USPACE, UVM_KMF_PAGEABLE); |
Line 376 uvm_proc_exit(struct proc *p) |
|
Line 378 uvm_proc_exit(struct proc *p) |
|
void |
void |
uvm_lwp_exit(struct lwp *l) |
uvm_lwp_exit(struct lwp *l) |
{ |
{ |
vaddr_t va = (vaddr_t)l->l_addr; |
vaddr_t va = USER_TO_UAREA(l->l_addr); |
|
|
l->l_flag &= ~L_INMEM; |
l->l_flag &= ~L_INMEM; |
uvm_uarea_free(va); |
uvm_uarea_free(va); |
Line 425 uvm_swapin(struct lwp *l) |
|
Line 427 uvm_swapin(struct lwp *l) |
|
vaddr_t addr; |
vaddr_t addr; |
int s, error; |
int s, error; |
|
|
addr = (vaddr_t)l->l_addr; |
addr = USER_TO_UAREA(l->l_addr); |
/* make L_INMEM true */ |
/* make L_INMEM true */ |
error = uvm_fault_wire(kernel_map, addr, addr + USPACE, |
error = uvm_fault_wire(kernel_map, addr, addr + USPACE, |
VM_PROT_READ | VM_PROT_WRITE, 0); |
VM_PROT_READ | VM_PROT_WRITE, 0); |
Line 670 uvm_swapout(struct lwp *l) |
|
Line 672 uvm_swapout(struct lwp *l) |
|
/* |
/* |
* Unwire the to-be-swapped process's user struct and kernel stack. |
* Unwire the to-be-swapped process's user struct and kernel stack. |
*/ |
*/ |
addr = (vaddr_t)l->l_addr; |
addr = USER_TO_UAREA(l->l_addr); |
uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !L_INMEM */ |
uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !L_INMEM */ |
pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); |
pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); |
} |
} |