Annotation of src/sys/uvm/uvm_glue.c, Revision 1.19
1.19 ! thorpej 1: /* $NetBSD: uvm_glue.c,v 1.18 1999/03/26 21:58:39 mycroft Exp $ */
1.1 mrg 2:
3: /*
4: * Copyright (c) 1997 Charles D. Cranor and Washington University.
5: * Copyright (c) 1991, 1993, The Regents of the University of California.
6: *
7: * All rights reserved.
8: *
9: * This code is derived from software contributed to Berkeley by
10: * The Mach Operating System project at Carnegie-Mellon University.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by Charles D. Cranor,
23: * Washington University, the University of California, Berkeley and
24: * its contributors.
25: * 4. Neither the name of the University nor the names of its contributors
26: * may be used to endorse or promote products derived from this software
27: * without specific prior written permission.
28: *
29: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39: * SUCH DAMAGE.
40: *
41: * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
1.4 mrg 42: * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
1.1 mrg 43: *
44: *
45: * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46: * All rights reserved.
47: *
48: * Permission to use, copy, modify and distribute this software and
49: * its documentation is hereby granted, provided that both the copyright
50: * notice and this permission notice appear in all copies of the
51: * software, derivative works or modified versions, and any portions
52: * thereof, and that both notices appear in supporting documentation.
53: *
54: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57: *
58: * Carnegie Mellon requests users of this software to return to
59: *
60: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
61: * School of Computer Science
62: * Carnegie Mellon University
63: * Pittsburgh PA 15213-3890
64: *
65: * any improvements or extensions that they make and grant Carnegie the
66: * rights to redistribute these changes.
67: */
68:
1.5 mrg 69: #include "opt_uvmhist.h"
1.15 tron 70: #include "opt_sysv.h"
1.5 mrg 71:
1.1 mrg 72: /*
73: * uvm_glue.c: glue functions
74: */
75:
76: #include <sys/param.h>
77: #include <sys/systm.h>
78: #include <sys/proc.h>
79: #include <sys/resourcevar.h>
80: #include <sys/buf.h>
81: #include <sys/user.h>
82: #ifdef SYSVSHM
83: #include <sys/shm.h>
84: #endif
85:
86: #include <vm/vm.h>
87: #include <vm/vm_page.h>
88: #include <vm/vm_kern.h>
89:
90: #include <uvm/uvm.h>
91:
92: #include <machine/cpu.h>
93:
94: /*
95: * local prototypes
96: */
97:
98: static void uvm_swapout __P((struct proc *));
99:
100: /*
101: * XXXCDC: do these really belong here?
102: */
103:
104: unsigned maxdmap = MAXDSIZ; /* kern_resource.c: RLIMIT_DATA max */
105: unsigned maxsmap = MAXSSIZ; /* kern_resource.c: RLIMIT_STACK max */
106:
107: int readbuffers = 0; /* allow KGDB to read kern buffer pool */
108: /* XXX: see uvm_kernacc */
109:
110:
111: /*
112: * uvm_kernacc: can the kernel access a region of memory
113: *
114: * - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c)
115: */
116:
1.6 mrg 117: boolean_t
118: uvm_kernacc(addr, len, rw)
119: caddr_t addr;
1.11 kleink 120: size_t len;
121: int rw;
1.6 mrg 122: {
123: boolean_t rv;
1.13 eeh 124: vaddr_t saddr, eaddr;
1.6 mrg 125: vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
126:
127: saddr = trunc_page(addr);
128: eaddr = round_page(addr+len);
129: vm_map_lock_read(kernel_map);
130: rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
131: vm_map_unlock_read(kernel_map);
132:
133: /*
134: * XXX there are still some things (e.g. the buffer cache) that
135: * are managed behind the VM system's back so even though an
136: * address is accessible in the mind of the VM system, there may
137: * not be physical pages where the VM thinks there is. This can
138: * lead to bogus allocation of pages in the kernel address space
139: * or worse, inconsistencies at the pmap level. We only worry
140: * about the buffer cache for now.
141: */
1.13 eeh 142: if (!readbuffers && rv && (eaddr > (vaddr_t)buffers &&
143: saddr < (vaddr_t)buffers + MAXBSIZE * nbuf))
1.6 mrg 144: rv = FALSE;
145: return(rv);
1.1 mrg 146: }
147:
148: /*
149: * uvm_useracc: can the user access it?
150: *
151: * - called from physio() and sys___sysctl().
152: */
153:
1.6 mrg 154: boolean_t
155: uvm_useracc(addr, len, rw)
156: caddr_t addr;
1.11 kleink 157: size_t len;
158: int rw;
1.1 mrg 159: {
1.6 mrg 160: boolean_t rv;
161: vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
1.1 mrg 162:
163: #if defined(i386) || defined(pc532)
1.6 mrg 164: /*
165: * XXX - specially disallow access to user page tables - they are
166: * in the map. This is here until i386 & pc532 pmaps are fixed...
167: */
1.13 eeh 168: if ((vaddr_t) addr >= VM_MAXUSER_ADDRESS
169: || (vaddr_t) addr + len > VM_MAXUSER_ADDRESS
170: || (vaddr_t) addr + len <= (vaddr_t) addr)
1.6 mrg 171: return (FALSE);
1.1 mrg 172: #endif
173:
1.6 mrg 174: rv = uvm_map_checkprot(&curproc->p_vmspace->vm_map,
1.1 mrg 175: trunc_page(addr), round_page(addr+len), prot);
1.6 mrg 176: return(rv);
1.1 mrg 177: }
178:
179: #ifdef KGDB
180: /*
181: * Change protections on kernel pages from addr to addr+len
182: * (presumably so debugger can plant a breakpoint).
183: *
184: * We force the protection change at the pmap level. If we were
185: * to use vm_map_protect a change to allow writing would be lazily-
186: * applied meaning we would still take a protection fault, something
187: * we really don't want to do. It would also fragment the kernel
188: * map unnecessarily. We cannot use pmap_protect since it also won't
189: * enforce a write-enable request. Using pmap_enter is the only way
190: * we can ensure the change takes place properly.
191: */
1.6 mrg 192: void
193: uvm_chgkprot(addr, len, rw)
194: register caddr_t addr;
1.11 kleink 195: size_t len;
196: int rw;
1.6 mrg 197: {
198: vm_prot_t prot;
1.13 eeh 199: paddr_t pa;
200: vaddr_t sva, eva;
1.6 mrg 201:
202: prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
203: eva = round_page(addr + len);
204: for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) {
205: /*
206: * Extract physical address for the page.
207: * We use a cheezy hack to differentiate physical
208: * page 0 from an invalid mapping, not that it
209: * really matters...
210: */
211: pa = pmap_extract(pmap_kernel(), sva|1);
212: if (pa == 0)
213: panic("chgkprot: invalid page");
1.18 mycroft 214: pmap_enter(pmap_kernel(), sva, pa&~1, prot, TRUE, 0);
1.6 mrg 215: }
1.1 mrg 216: }
217: #endif
218:
219: /*
220: * vslock: wire user memory for I/O
221: *
222: * - called from physio and sys___sysctl
223: * - XXXCDC: consider nuking this (or making it a macro?)
224: */
225:
1.6 mrg 226: void
1.9 thorpej 227: uvm_vslock(p, addr, len)
228: struct proc *p;
1.6 mrg 229: caddr_t addr;
1.11 kleink 230: size_t len;
1.1 mrg 231: {
1.9 thorpej 232: uvm_fault_wire(&p->p_vmspace->vm_map, trunc_page(addr),
1.6 mrg 233: round_page(addr+len));
1.1 mrg 234: }
235:
236: /*
237: * vslock: wire user memory for I/O
238: *
239: * - called from physio and sys___sysctl
240: * - XXXCDC: consider nuking this (or making it a macro?)
241: */
242:
1.6 mrg 243: void
1.9 thorpej 244: uvm_vsunlock(p, addr, len)
245: struct proc *p;
1.6 mrg 246: caddr_t addr;
1.11 kleink 247: size_t len;
1.1 mrg 248: {
1.10 kleink 249: uvm_fault_unwire(p->p_vmspace->vm_map.pmap, trunc_page(addr),
1.6 mrg 250: round_page(addr+len));
1.1 mrg 251: }
252:
253: /*
254: * uvm_fork: fork a virtual address space
255: *
256: * - the address space is copied as per parent map's inherit values
257: * - a new "user" structure is allocated for the child process
258: * [filled in by MD layer...]
259: * - NOTE: the kernel stack may be at a different location in the child
260: * process, and thus addresses of automatic variables may be invalid
261: * after cpu_fork returns in the child process. We do nothing here
262: * after cpu_fork returns.
263: * - XXXCDC: we need a way for this to return a failure value rather
264: * than just hang
265: */
1.6 mrg 266: void
267: uvm_fork(p1, p2, shared)
268: struct proc *p1, *p2;
269: boolean_t shared;
270: {
1.7 thorpej 271: struct user *up = p2->p_addr;
1.6 mrg 272: int rv;
273:
274: if (shared == TRUE)
275: uvmspace_share(p1, p2); /* share vmspace */
276: else
277: p2->p_vmspace = uvmspace_fork(p1->p_vmspace); /* fork vmspace */
1.1 mrg 278:
1.6 mrg 279: /*
1.7 thorpej 280: * Wire down the U-area for the process, which contains the PCB
281: * and the kernel stack. Wired state is stored in p->p_flag's
282: * P_INMEM bit rather than in the vm_map_entry's wired count
283: * to prevent kernel_map fragmentation.
1.6 mrg 284: */
1.13 eeh 285: rv = uvm_fault_wire(kernel_map, (vaddr_t)up,
286: (vaddr_t)up + USPACE);
1.6 mrg 287: if (rv != KERN_SUCCESS)
1.8 thorpej 288: panic("uvm_fork: uvm_fault_wire failed: %d", rv);
1.6 mrg 289:
290: /*
1.19 ! thorpej 291: * p_stats currently points at a field in the user struct. Copy
! 292: * parts of p_stats, and zero out the rest.
1.6 mrg 293: */
294: p2->p_stats = &up->u_stats;
1.12 perry 295: memset(&up->u_stats.pstat_startzero, 0,
1.1 mrg 296: (unsigned) ((caddr_t)&up->u_stats.pstat_endzero -
297: (caddr_t)&up->u_stats.pstat_startzero));
1.12 perry 298: memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy,
1.1 mrg 299: ((caddr_t)&up->u_stats.pstat_endcopy -
300: (caddr_t)&up->u_stats.pstat_startcopy));
1.6 mrg 301:
302: /*
303: * cpu_fork will copy and update the kernel stack and pcb, and make
304: * the child ready to run. The child will exit directly to user
305: * mode on its first time slice, and will not return here.
306: */
307: cpu_fork(p1, p2);
1.14 thorpej 308: }
309:
310: /*
311: * uvm_exit: exit a virtual address space
312: *
313: * - the process passed to us is a dead (pre-zombie) process; we
314: * are running on a different context now (the reaper).
315: * - we must run in a separate thread because freeing the vmspace
316: * of the dead process may block.
317: */
318: void
319: uvm_exit(p)
320: struct proc *p;
321: {
322:
323: uvmspace_free(p->p_vmspace);
324: uvm_km_free(kernel_map, (vaddr_t)p->p_addr, USPACE);
1.1 mrg 325: }
326:
327: /*
328: * uvm_init_limit: init per-process VM limits
329: *
330: * - called for process 0 and then inherited by all others.
331: */
1.6 mrg 332: void
333: uvm_init_limits(p)
334: struct proc *p;
335: {
336:
337: /*
338: * Set up the initial limits on process VM. Set the maximum
339: * resident set size to be all of (reasonably) available memory.
340: * This causes any single, large process to start random page
341: * replacement once it fills memory.
342: */
343:
344: p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
345: p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ;
346: p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
347: p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ;
348: p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
1.1 mrg 349: }
350:
351: #ifdef DEBUG
352: int enableswap = 1;
353: int swapdebug = 0;
354: #define SDB_FOLLOW 1
355: #define SDB_SWAPIN 2
356: #define SDB_SWAPOUT 4
357: #endif
358:
359: /*
360: * uvm_swapin: swap in a process's u-area.
361: */
362:
1.6 mrg 363: void
364: uvm_swapin(p)
365: struct proc *p;
366: {
1.13 eeh 367: vaddr_t addr;
1.6 mrg 368: int s;
369:
1.13 eeh 370: addr = (vaddr_t)p->p_addr;
1.6 mrg 371: /* make P_INMEM true */
372: uvm_fault_wire(kernel_map, addr, addr + USPACE);
373:
374: /*
375: * Some architectures need to be notified when the user area has
376: * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c).
377: */
378: cpu_swapin(p);
379: s = splstatclock();
380: if (p->p_stat == SRUN)
381: setrunqueue(p);
382: p->p_flag |= P_INMEM;
383: splx(s);
384: p->p_swtime = 0;
385: ++uvmexp.swapins;
1.1 mrg 386: }
387:
388: /*
389: * uvm_scheduler: process zero main loop
390: *
391: * - attempt to swapin every swaped-out, runnable process in order of
392: * priority.
393: * - if not enough memory, wake the pagedaemon and let it clear space.
394: */
395:
1.6 mrg 396: void
397: uvm_scheduler()
1.1 mrg 398: {
1.6 mrg 399: register struct proc *p;
400: register int pri;
401: struct proc *pp;
402: int ppri;
403: UVMHIST_FUNC("uvm_scheduler"); UVMHIST_CALLED(maphist);
1.1 mrg 404:
405: loop:
406: #ifdef DEBUG
1.6 mrg 407: while (!enableswap)
408: tsleep((caddr_t)&proc0, PVM, "noswap", 0);
1.1 mrg 409: #endif
1.6 mrg 410: pp = NULL; /* process to choose */
411: ppri = INT_MIN; /* its priority */
412: for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
413:
414: /* is it a runnable swapped out process? */
415: if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) {
416: pri = p->p_swtime + p->p_slptime -
417: (p->p_nice - NZERO) * 8;
418: if (pri > ppri) { /* higher priority? remember it. */
419: pp = p;
420: ppri = pri;
421: }
422: }
423: }
1.1 mrg 424:
425: #ifdef DEBUG
1.6 mrg 426: if (swapdebug & SDB_FOLLOW)
427: printf("scheduler: running, procp %p pri %d\n", pp, ppri);
1.1 mrg 428: #endif
1.6 mrg 429: /*
430: * Nothing to do, back to sleep
431: */
432: if ((p = pp) == NULL) {
433: tsleep((caddr_t)&proc0, PVM, "scheduler", 0);
434: goto loop;
435: }
436:
437: /*
438: * we have found swapped out process which we would like to bring
439: * back in.
440: *
441: * XXX: this part is really bogus cuz we could deadlock on memory
442: * despite our feeble check
443: */
444: if (uvmexp.free > atop(USPACE)) {
1.1 mrg 445: #ifdef DEBUG
1.6 mrg 446: if (swapdebug & SDB_SWAPIN)
447: printf("swapin: pid %d(%s)@%p, pri %d free %d\n",
1.1 mrg 448: p->p_pid, p->p_comm, p->p_addr, ppri, uvmexp.free);
449: #endif
1.6 mrg 450: uvm_swapin(p);
451: goto loop;
452: }
453: /*
454: * not enough memory, jab the pageout daemon and wait til the coast
455: * is clear
456: */
1.1 mrg 457: #ifdef DEBUG
1.6 mrg 458: if (swapdebug & SDB_FOLLOW)
459: printf("scheduler: no room for pid %d(%s), free %d\n",
1.1 mrg 460: p->p_pid, p->p_comm, uvmexp.free);
461: #endif
1.6 mrg 462: (void) splhigh();
463: uvm_wait("schedpwait");
464: (void) spl0();
1.1 mrg 465: #ifdef DEBUG
1.6 mrg 466: if (swapdebug & SDB_FOLLOW)
467: printf("scheduler: room again, free %d\n", uvmexp.free);
1.1 mrg 468: #endif
1.6 mrg 469: goto loop;
1.1 mrg 470: }
471:
472: /*
473: * swappable: is process "p" swappable?
474: */
475:
476: #define swappable(p) \
477: (((p)->p_flag & (P_SYSTEM | P_INMEM | P_WEXIT)) == P_INMEM && \
478: (p)->p_holdcnt == 0)
479:
480: /*
481: * swapout_threads: find threads that can be swapped and unwire their
482: * u-areas.
483: *
484: * - called by the pagedaemon
485: * - try and swap at least one processs
486: * - processes that are sleeping or stopped for maxslp or more seconds
487: * are swapped... otherwise the longest-sleeping or stopped process
488: * is swapped, otherwise the longest resident process...
489: */
1.6 mrg 490: void
491: uvm_swapout_threads()
1.1 mrg 492: {
1.6 mrg 493: register struct proc *p;
494: struct proc *outp, *outp2;
495: int outpri, outpri2;
496: int didswap = 0;
497: extern int maxslp;
498: /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */
1.1 mrg 499:
500: #ifdef DEBUG
1.6 mrg 501: if (!enableswap)
502: return;
1.1 mrg 503: #endif
504:
1.6 mrg 505: /*
506: * outp/outpri : stop/sleep process with largest sleeptime < maxslp
507: * outp2/outpri2: the longest resident process (its swap time)
508: */
509: outp = outp2 = NULL;
510: outpri = outpri2 = 0;
511: for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
512: if (!swappable(p))
513: continue;
514: switch (p->p_stat) {
515: case SRUN:
516: if (p->p_swtime > outpri2) {
517: outp2 = p;
518: outpri2 = p->p_swtime;
519: }
520: continue;
1.1 mrg 521:
1.6 mrg 522: case SSLEEP:
523: case SSTOP:
524: if (p->p_slptime >= maxslp) {
525: uvm_swapout(p); /* zap! */
526: didswap++;
527: } else if (p->p_slptime > outpri) {
528: outp = p;
529: outpri = p->p_slptime;
530: }
531: continue;
532: }
533: }
534:
535: /*
536: * If we didn't get rid of any real duds, toss out the next most
537: * likely sleeping/stopped or running candidate. We only do this
538: * if we are real low on memory since we don't gain much by doing
539: * it (USPACE bytes).
540: */
541: if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) {
542: if ((p = outp) == NULL)
543: p = outp2;
1.1 mrg 544: #ifdef DEBUG
1.6 mrg 545: if (swapdebug & SDB_SWAPOUT)
546: printf("swapout_threads: no duds, try procp %p\n", p);
1.1 mrg 547: #endif
1.6 mrg 548: if (p)
549: uvm_swapout(p);
550: }
1.1 mrg 551: }
552:
553: /*
554: * uvm_swapout: swap out process "p"
555: *
556: * - currently "swapout" means "unwire U-area" and "pmap_collect()"
557: * the pmap.
558: * - XXXCDC: should deactivate all process' private anonymous memory
559: */
560:
1.6 mrg 561: static void
562: uvm_swapout(p)
563: register struct proc *p;
1.1 mrg 564: {
1.13 eeh 565: vaddr_t addr;
1.6 mrg 566: int s;
1.1 mrg 567:
568: #ifdef DEBUG
1.6 mrg 569: if (swapdebug & SDB_SWAPOUT)
570: printf("swapout: pid %d(%s)@%p, stat %x pri %d free %d\n",
1.1 mrg 571: p->p_pid, p->p_comm, p->p_addr, p->p_stat,
572: p->p_slptime, uvmexp.free);
573: #endif
574:
1.6 mrg 575: /*
576: * Do any machine-specific actions necessary before swapout.
577: * This can include saving floating point state, etc.
578: */
579: cpu_swapout(p);
580:
581: /*
582: * Unwire the to-be-swapped process's user struct and kernel stack.
583: */
1.13 eeh 584: addr = (vaddr_t)p->p_addr;
1.6 mrg 585: uvm_fault_unwire(kernel_map->pmap, addr, addr + USPACE); /* !P_INMEM */
586: pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
587:
588: /*
589: * Mark it as (potentially) swapped out.
590: */
591: s = splstatclock();
592: p->p_flag &= ~P_INMEM;
593: if (p->p_stat == SRUN)
594: remrunqueue(p);
595: splx(s);
596: p->p_swtime = 0;
597: ++uvmexp.swapouts;
1.1 mrg 598: }
599:
CVSweb <webmaster@jp.NetBSD.org>