Annotation of src/sys/uvm/uvm_glue.c, Revision 1.36
1.36 ! simonb 1: /* $NetBSD: uvm_glue.c,v 1.35 2000/06/08 05:52:34 thorpej Exp $ */
1.1 mrg 2:
3: /*
4: * Copyright (c) 1997 Charles D. Cranor and Washington University.
5: * Copyright (c) 1991, 1993, The Regents of the University of California.
6: *
7: * All rights reserved.
8: *
9: * This code is derived from software contributed to Berkeley by
10: * The Mach Operating System project at Carnegie-Mellon University.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by Charles D. Cranor,
23: * Washington University, the University of California, Berkeley and
24: * its contributors.
25: * 4. Neither the name of the University nor the names of its contributors
26: * may be used to endorse or promote products derived from this software
27: * without specific prior written permission.
28: *
29: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39: * SUCH DAMAGE.
40: *
41: * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
1.4 mrg 42: * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
1.1 mrg 43: *
44: *
45: * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46: * All rights reserved.
47: *
48: * Permission to use, copy, modify and distribute this software and
49: * its documentation is hereby granted, provided that both the copyright
50: * notice and this permission notice appear in all copies of the
51: * software, derivative works or modified versions, and any portions
52: * thereof, and that both notices appear in supporting documentation.
53: *
54: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57: *
58: * Carnegie Mellon requests users of this software to return to
59: *
60: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61: * School of Computer Science
62: * Carnegie Mellon University
63: * Pittsburgh PA 15213-3890
64: *
65: * any improvements or extensions that they make and grant Carnegie the
66: * rights to redistribute these changes.
67: */
68:
1.5 mrg 69: #include "opt_uvmhist.h"
1.15 tron 70: #include "opt_sysv.h"
1.5 mrg 71:
1.1 mrg 72: /*
73: * uvm_glue.c: glue functions
74: */
75:
76: #include <sys/param.h>
77: #include <sys/systm.h>
78: #include <sys/proc.h>
79: #include <sys/resourcevar.h>
80: #include <sys/buf.h>
81: #include <sys/user.h>
82: #ifdef SYSVSHM
83: #include <sys/shm.h>
84: #endif
85:
86: #include <vm/vm.h>
87: #include <vm/vm_page.h>
88: #include <vm/vm_kern.h>
89:
90: #include <uvm/uvm.h>
91:
92: #include <machine/cpu.h>
93:
94: /*
95: * local prototypes
96: */
97:
98: static void uvm_swapout __P((struct proc *));
99:
100: /*
101: * XXXCDC: do these really belong here?
102: */
103:
104: unsigned maxdmap = MAXDSIZ; /* kern_resource.c: RLIMIT_DATA max */
105: unsigned maxsmap = MAXSSIZ; /* kern_resource.c: RLIMIT_STACK max */
106:
107: int readbuffers = 0; /* allow KGDB to read kern buffer pool */
108: /* XXX: see uvm_kernacc */
109:
1.28 thorpej 110:
111: /*
1.1 mrg 112: * uvm_kernacc: can the kernel access a region of memory
113: *
114: * - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c)
115: */
116:
1.6 mrg 117: boolean_t
118: uvm_kernacc(addr, len, rw)
119: caddr_t addr;
1.11 kleink 120: size_t len;
121: int rw;
1.6 mrg 122: {
123: boolean_t rv;
1.13 eeh 124: vaddr_t saddr, eaddr;
1.6 mrg 125: vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
126:
1.31 kleink 127: saddr = trunc_page((vaddr_t)addr);
128: eaddr = round_page((vaddr_t)addr+len);
1.6 mrg 129: vm_map_lock_read(kernel_map);
130: rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
131: vm_map_unlock_read(kernel_map);
132:
133: /*
134: * XXX there are still some things (e.g. the buffer cache) that
135: * are managed behind the VM system's back so even though an
136: * address is accessible in the mind of the VM system, there may
137: * not be physical pages where the VM thinks there is. This can
138: * lead to bogus allocation of pages in the kernel address space
139: * or worse, inconsistencies at the pmap level. We only worry
140: * about the buffer cache for now.
141: */
1.13 eeh 142: if (!readbuffers && rv && (eaddr > (vaddr_t)buffers &&
143: saddr < (vaddr_t)buffers + MAXBSIZE * nbuf))
1.6 mrg 144: rv = FALSE;
145: return(rv);
1.1 mrg 146: }
147:
148: /*
149: * uvm_useracc: can the user access it?
150: *
151: * - called from physio() and sys___sysctl().
152: */
153:
1.6 mrg 154: boolean_t
155: uvm_useracc(addr, len, rw)
156: caddr_t addr;
1.11 kleink 157: size_t len;
158: int rw;
1.1 mrg 159: {
1.25 thorpej 160: vm_map_t map;
1.6 mrg 161: boolean_t rv;
162: vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
1.1 mrg 163:
1.25 thorpej 164: /* XXX curproc */
165: map = &curproc->p_vmspace->vm_map;
166:
167: vm_map_lock_read(map);
1.31 kleink 168: rv = uvm_map_checkprot(map, trunc_page((vaddr_t)addr),
169: round_page((vaddr_t)addr+len), prot);
1.25 thorpej 170: vm_map_unlock_read(map);
171:
1.6 mrg 172: return(rv);
1.1 mrg 173: }
174:
175: #ifdef KGDB
176: /*
177: * Change protections on kernel pages from addr to addr+len
178: * (presumably so debugger can plant a breakpoint).
179: *
180: * We force the protection change at the pmap level. If we were
181: * to use vm_map_protect a change to allow writing would be lazily-
182: * applied meaning we would still take a protection fault, something
183: * we really don't want to do. It would also fragment the kernel
184: * map unnecessarily. We cannot use pmap_protect since it also won't
185: * enforce a write-enable request. Using pmap_enter is the only way
186: * we can ensure the change takes place properly.
187: */
1.6 mrg 188: void
189: uvm_chgkprot(addr, len, rw)
1.32 augustss 190: caddr_t addr;
1.11 kleink 191: size_t len;
192: int rw;
1.6 mrg 193: {
194: vm_prot_t prot;
1.13 eeh 195: paddr_t pa;
196: vaddr_t sva, eva;
1.6 mrg 197:
198: prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
1.31 kleink 199: eva = round_page((vaddr_t)addr + len);
200: for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
1.6 mrg 201: /*
202: * Extract physical address for the page.
203: * We use a cheezy hack to differentiate physical
204: * page 0 from an invalid mapping, not that it
205: * really matters...
206: */
1.27 thorpej 207: if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE)
1.6 mrg 208: panic("chgkprot: invalid page");
1.30 thorpej 209: pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
1.6 mrg 210: }
1.1 mrg 211: }
212: #endif
213:
214: /*
215: * vslock: wire user memory for I/O
216: *
217: * - called from physio and sys___sysctl
218: * - XXXCDC: consider nuking this (or making it a macro?)
219: */
220:
1.26 thorpej 221: int
1.22 thorpej 222: uvm_vslock(p, addr, len, access_type)
1.9 thorpej 223: struct proc *p;
1.6 mrg 224: caddr_t addr;
1.11 kleink 225: size_t len;
1.22 thorpej 226: vm_prot_t access_type;
1.1 mrg 227: {
1.26 thorpej 228: vm_map_t map;
229: vaddr_t start, end;
230: int rv;
231:
232: map = &p->p_vmspace->vm_map;
1.31 kleink 233: start = trunc_page((vaddr_t)addr);
234: end = round_page((vaddr_t)addr + len);
1.26 thorpej 235:
236: rv = uvm_fault_wire(map, start, end, access_type);
1.21 thorpej 237:
1.26 thorpej 238: return (rv);
1.1 mrg 239: }
240:
241: /*
242: * vslock: wire user memory for I/O
243: *
244: * - called from physio and sys___sysctl
245: * - XXXCDC: consider nuking this (or making it a macro?)
246: */
247:
1.6 mrg 248: void
1.9 thorpej 249: uvm_vsunlock(p, addr, len)
250: struct proc *p;
1.6 mrg 251: caddr_t addr;
1.11 kleink 252: size_t len;
1.1 mrg 253: {
1.31 kleink 254: uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr),
255: round_page((vaddr_t)addr+len));
1.1 mrg 256: }
257:
258: /*
259: * uvm_fork: fork a virtual address space
260: *
261: * - the address space is copied as per parent map's inherit values
262: * - a new "user" structure is allocated for the child process
263: * [filled in by MD layer...]
1.20 thorpej 264: * - if specified, the child gets a new user stack described by
265: * stack and stacksize
1.1 mrg 266: * - NOTE: the kernel stack may be at a different location in the child
267: * process, and thus addresses of automatic variables may be invalid
268: * after cpu_fork returns in the child process. We do nothing here
269: * after cpu_fork returns.
270: * - XXXCDC: we need a way for this to return a failure value rather
271: * than just hang
272: */
1.6 mrg 273: void
1.34 thorpej 274: uvm_fork(p1, p2, shared, stack, stacksize, func, arg)
1.6 mrg 275: struct proc *p1, *p2;
276: boolean_t shared;
1.20 thorpej 277: void *stack;
278: size_t stacksize;
1.34 thorpej 279: void (*func) __P((void *));
280: void *arg;
1.6 mrg 281: {
1.7 thorpej 282: struct user *up = p2->p_addr;
1.6 mrg 283: int rv;
284:
285: if (shared == TRUE)
286: uvmspace_share(p1, p2); /* share vmspace */
287: else
288: p2->p_vmspace = uvmspace_fork(p1->p_vmspace); /* fork vmspace */
1.1 mrg 289:
1.6 mrg 290: /*
1.7 thorpej 291: * Wire down the U-area for the process, which contains the PCB
292: * and the kernel stack. Wired state is stored in p->p_flag's
293: * P_INMEM bit rather than in the vm_map_entry's wired count
294: * to prevent kernel_map fragmentation.
1.21 thorpej 295: *
296: * Note the kernel stack gets read/write accesses right off
297: * the bat.
1.6 mrg 298: */
1.13 eeh 299: rv = uvm_fault_wire(kernel_map, (vaddr_t)up,
1.21 thorpej 300: (vaddr_t)up + USPACE, VM_PROT_READ | VM_PROT_WRITE);
1.6 mrg 301: if (rv != KERN_SUCCESS)
1.8 thorpej 302: panic("uvm_fork: uvm_fault_wire failed: %d", rv);
1.6 mrg 303:
304: /*
1.19 thorpej 305: * p_stats currently points at a field in the user struct. Copy
306: * parts of p_stats, and zero out the rest.
1.6 mrg 307: */
308: p2->p_stats = &up->u_stats;
1.12 perry 309: memset(&up->u_stats.pstat_startzero, 0,
1.1 mrg 310: (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
311: (caddr_t)&up->u_stats.pstat_startzero));
1.12 perry 312: memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy,
1.1 mrg 313: ((caddr_t)&up->u_stats.pstat_endcopy -
314: (caddr_t)&up->u_stats.pstat_startcopy));
1.6 mrg 315:
316: /*
1.34 thorpej 317: * cpu_fork() copy and update the pcb, and make the child ready
318: * to run. If this is a normal user fork, the child will exit
319: * directly to user mode via child_return() on its first time
320: * slice and will not return here. If this is a kernel thread,
321: * the specified entry point will be executed.
1.6 mrg 322: */
1.34 thorpej 323: cpu_fork(p1, p2, stack, stacksize, func, arg);
1.14 thorpej 324: }
325:
326: /*
327: * uvm_exit: exit a virtual address space
328: *
329: * - the process passed to us is a dead (pre-zombie) process; we
330: * are running on a different context now (the reaper).
331: * - we must run in a separate thread because freeing the vmspace
332: * of the dead process may block.
333: */
334: void
335: uvm_exit(p)
336: struct proc *p;
337: {
338:
339: uvmspace_free(p->p_vmspace);
340: uvm_km_free(kernel_map, (vaddr_t)p->p_addr, USPACE);
1.36 ! simonb 341: p->p_addr = NULL;
1.1 mrg 342: }
343:
344: /*
345: * uvm_init_limit: init per-process VM limits
346: *
347: * - called for process 0 and then inherited by all others.
348: */
1.6 mrg 349: void
350: uvm_init_limits(p)
351: struct proc *p;
352: {
353:
354: /*
355: * Set up the initial limits on process VM. Set the maximum
356: * resident set size to be all of (reasonably) available memory.
357: * This causes any single, large process to start random page
358: * replacement once it fills memory.
359: */
360:
361: p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
362: p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
363: p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
364: p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
365: p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
1.1 mrg 366: }
367:
368: #ifdef DEBUG
369: int enableswap = 1;
370: int swapdebug = 0;
371: #define SDB_FOLLOW 1
372: #define SDB_SWAPIN 2
373: #define SDB_SWAPOUT 4
374: #endif
375:
376: /*
377: * uvm_swapin: swap in a process's u-area.
378: */
379:
1.6 mrg 380: void
381: uvm_swapin(p)
382: struct proc *p;
383: {
1.13 eeh 384: vaddr_t addr;
1.6 mrg 385: int s;
386:
1.13 eeh 387: addr = (vaddr_t)p->p_addr;
1.6 mrg 388: /* make P_INMEM true */
1.21 thorpej 389: uvm_fault_wire(kernel_map, addr, addr + USPACE,
390: VM_PROT_READ | VM_PROT_WRITE);
1.6 mrg 391:
392: /*
393: * Some architectures need to be notified when the user area has
394: * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c).
395: */
396: cpu_swapin(p);
397: s = splstatclock();
398: if (p->p_stat == SRUN)
399: setrunqueue(p);
400: p->p_flag |= P_INMEM;
401: splx(s);
402: p->p_swtime = 0;
403: ++uvmexp.swapins;
1.1 mrg 404: }
405:
406: /*
407: * uvm_scheduler: process zero main loop
408: *
409: * - attempt to swapin every swaped-out, runnable process in order of
410: * priority.
411: * - if not enough memory, wake the pagedaemon and let it clear space.
412: */
413:
1.6 mrg 414: void
415: uvm_scheduler()
1.1 mrg 416: {
1.32 augustss 417: struct proc *p;
418: int pri;
1.6 mrg 419: struct proc *pp;
420: int ppri;
421: UVMHIST_FUNC("uvm_scheduler"); UVMHIST_CALLED(maphist);
1.1 mrg 422:
423: loop:
424: #ifdef DEBUG
1.6 mrg 425: while (!enableswap)
426: tsleep((caddr_t)&proc0, PVM, "noswap", 0);
1.1 mrg 427: #endif
1.6 mrg 428: pp = NULL; /* process to choose */
429: ppri = INT_MIN; /* its priority */
1.29 thorpej 430: proclist_lock_read();
1.6 mrg 431: for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
432:
433: /* is it a runnable swapped out process? */
434: if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
435: pri = p->p_swtime + p->p_slptime -
436: (p->p_nice - NZERO) * 8;
437: if (pri > ppri) { /* higher priority? remember it. */
438: pp = p;
439: ppri = pri;
440: }
441: }
442: }
1.28 thorpej 443: proclist_unlock_read();
1.1 mrg 444:
445: #ifdef DEBUG
1.6 mrg 446: if (swapdebug & SDB_FOLLOW)
447: printf("scheduler: running, procp %p pri %d\n", pp, ppri);
1.1 mrg 448: #endif
1.6 mrg 449: /*
450: * Nothing to do, back to sleep
451: */
452: if ((p = pp) == NULL) {
453: tsleep((caddr_t)&proc0, PVM, "scheduler", 0);
454: goto loop;
455: }
456:
457: /*
458: * we have found swapped out process which we would like to bring
459: * back in.
460: *
461: * XXX: this part is really bogus cuz we could deadlock on memory
462: * despite our feeble check
463: */
464: if (uvmexp.free > atop(USPACE)) {
1.1 mrg 465: #ifdef DEBUG
1.6 mrg 466: if (swapdebug & SDB_SWAPIN)
467: printf("swapin: pid %d(%s)@%p, pri %d free %d\n",
1.1 mrg 468: p->p_pid, p->p_comm, p->p_addr, ppri, uvmexp.free);
469: #endif
1.6 mrg 470: uvm_swapin(p);
471: goto loop;
472: }
473: /*
474: * not enough memory, jab the pageout daemon and wait til the coast
475: * is clear
476: */
1.1 mrg 477: #ifdef DEBUG
1.6 mrg 478: if (swapdebug & SDB_FOLLOW)
479: printf("scheduler: no room for pid %d(%s), free %d\n",
1.1 mrg 480: p->p_pid, p->p_comm, uvmexp.free);
481: #endif
1.6 mrg 482: (void) splhigh();
483: uvm_wait("schedpwait");
484: (void) spl0();
1.1 mrg 485: #ifdef DEBUG
1.6 mrg 486: if (swapdebug & SDB_FOLLOW)
487: printf("scheduler: room again, free %d\n", uvmexp.free);
1.1 mrg 488: #endif
1.6 mrg 489: goto loop;
1.1 mrg 490: }
491:
492: /*
493: * swappable: is process "p" swappable?
494: */
495:
496: #define swappable(p) \
497: (((p)->p_flag & (P_SYSTEM | P_INMEM | P_WEXIT)) == P_INMEM && \
498: (p)->p_holdcnt == 0)
499:
500: /*
501: * swapout_threads: find threads that can be swapped and unwire their
502: * u-areas.
503: *
504: * - called by the pagedaemon
505: * - try and swap at least one processs
506: * - processes that are sleeping or stopped for maxslp or more seconds
507: * are swapped... otherwise the longest-sleeping or stopped process
508: * is swapped, otherwise the longest resident process...
509: */
1.6 mrg 510: void
511: uvm_swapout_threads()
1.1 mrg 512: {
1.32 augustss 513: struct proc *p;
1.6 mrg 514: struct proc *outp, *outp2;
515: int outpri, outpri2;
516: int didswap = 0;
517: extern int maxslp;
518: /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
1.1 mrg 519:
520: #ifdef DEBUG
1.6 mrg 521: if (!enableswap)
522: return;
1.1 mrg 523: #endif
524:
1.6 mrg 525: /*
526: * outp/outpri : stop/sleep process with largest sleeptime < maxslp
527: * outp2/outpri2: the longest resident process (its swap time)
528: */
529: outp = outp2 = NULL;
530: outpri = outpri2 = 0;
1.29 thorpej 531: proclist_lock_read();
1.6 mrg 532: for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
533: if (!swappable(p))
534: continue;
535: switch (p->p_stat) {
536: case SRUN:
1.33 thorpej 537: case SONPROC:
1.6 mrg 538: if (p->p_swtime > outpri2) {
539: outp2 = p;
540: outpri2 = p->p_swtime;
541: }
542: continue;
1.1 mrg 543:
1.6 mrg 544: case SSLEEP:
545: case SSTOP:
546: if (p->p_slptime >= maxslp) {
547: uvm_swapout(p); /* zap! */
548: didswap++;
549: } else if (p->p_slptime > outpri) {
550: outp = p;
551: outpri = p->p_slptime;
552: }
553: continue;
554: }
555: }
1.28 thorpej 556: proclist_unlock_read();
1.6 mrg 557:
558: /*
559: * If we didn't get rid of any real duds, toss out the next most
560: * likely sleeping/stopped or running candidate. We only do this
561: * if we are real low on memory since we don't gain much by doing
562: * it (USPACE bytes).
563: */
564: if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
565: if ((p = outp) == NULL)
566: p = outp2;
1.1 mrg 567: #ifdef DEBUG
1.6 mrg 568: if (swapdebug & SDB_SWAPOUT)
569: printf("swapout_threads: no duds, try procp %p\n", p);
1.1 mrg 570: #endif
1.6 mrg 571: if (p)
572: uvm_swapout(p);
573: }
1.1 mrg 574: }
575:
576: /*
577: * uvm_swapout: swap out process "p"
578: *
579: * - currently "swapout" means "unwire U-area" and "pmap_collect()"
580: * the pmap.
581: * - XXXCDC: should deactivate all process' private anonymous memory
582: */
583:
1.6 mrg 584: static void
585: uvm_swapout(p)
1.32 augustss 586: struct proc *p;
1.1 mrg 587: {
1.13 eeh 588: vaddr_t addr;
1.6 mrg 589: int s;
1.1 mrg 590:
591: #ifdef DEBUG
1.6 mrg 592: if (swapdebug & SDB_SWAPOUT)
593: printf("swapout: pid %d(%s)@%p, stat %x pri %d free %d\n",
1.1 mrg 594: p->p_pid, p->p_comm, p->p_addr, p->p_stat,
595: p->p_slptime, uvmexp.free);
596: #endif
597:
1.6 mrg 598: /*
599: * Do any machine-specific actions necessary before swapout.
600: * This can include saving floating point state, etc.
601: */
602: cpu_swapout(p);
603:
604: /*
605: * Unwire the to-be-swapped process's user struct and kernel stack.
606: */
1.13 eeh 607: addr = (vaddr_t)p->p_addr;
1.23 thorpej 608: uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !P_INMEM */
1.6 mrg 609: pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
610:
611: /*
612: * Mark it as (potentially) swapped out.
613: */
614: s = splstatclock();
615: p->p_flag &= ~P_INMEM;
616: if (p->p_stat == SRUN)
617: remrunqueue(p);
618: splx(s);
619: p->p_swtime = 0;
620: ++uvmexp.swapouts;
1.1 mrg 621: }
622:
CVSweb <webmaster@jp.NetBSD.org>