Annotation of src/sys/uvm/uvm_glue.c, Revision 1.150
1.150 ! rmind 1: /* $NetBSD$ */
1.1 mrg 2:
1.48 chs 3: /*
1.1 mrg 4: * Copyright (c) 1997 Charles D. Cranor and Washington University.
1.48 chs 5: * Copyright (c) 1991, 1993, The Regents of the University of California.
1.1 mrg 6: *
7: * All rights reserved.
8: *
9: * This code is derived from software contributed to Berkeley by
10: * The Mach Operating System project at Carnegie-Mellon University.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
1.147 chuck 20: * 3. Neither the name of the University nor the names of its contributors
1.1 mrg 21: * may be used to endorse or promote products derived from this software
22: * without specific prior written permission.
23: *
24: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34: * SUCH DAMAGE.
35: *
36: * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94
1.4 mrg 37: * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
1.1 mrg 38: *
39: *
40: * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41: * All rights reserved.
1.48 chs 42: *
1.1 mrg 43: * Permission to use, copy, modify and distribute this software and
44: * its documentation is hereby granted, provided that both the copyright
45: * notice and this permission notice appear in all copies of the
46: * software, derivative works or modified versions, and any portions
47: * thereof, and that both notices appear in supporting documentation.
1.48 chs 48: *
49: * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50: * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
1.1 mrg 51: * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
1.48 chs 52: *
1.1 mrg 53: * Carnegie Mellon requests users of this software to return to
54: *
55: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
56: * School of Computer Science
57: * Carnegie Mellon University
58: * Pittsburgh PA 15213-3890
59: *
60: * any improvements or extensions that they make and grant Carnegie the
61: * rights to redistribute these changes.
62: */
1.55 lukem 63:
64: #include <sys/cdefs.h>
1.150 ! rmind 65: __KERNEL_RCSID(0, "$NetBSD$");
1.1 mrg 66:
1.49 lukem 67: #include "opt_kgdb.h"
1.59 yamt 68: #include "opt_kstack.h"
1.5 mrg 69: #include "opt_uvmhist.h"
70:
1.1 mrg 71: /*
72: * uvm_glue.c: glue functions
73: */
74:
75: #include <sys/param.h>
1.145 rmind 76: #include <sys/kernel.h>
77:
1.1 mrg 78: #include <sys/systm.h>
79: #include <sys/proc.h>
80: #include <sys/resourcevar.h>
81: #include <sys/buf.h>
1.106 yamt 82: #include <sys/syncobj.h>
1.111 ad 83: #include <sys/cpu.h>
1.114 ad 84: #include <sys/atomic.h>
1.146 rmind 85: #include <sys/lwp.h>
1.1 mrg 86:
87: #include <uvm/uvm.h>
88:
89: /*
1.150 ! rmind 90: * uvm_kernacc: test if kernel can access a memory region.
1.1 mrg 91: *
1.150 ! rmind 92: * => Currently used only by /dev/kmem driver (dev/mm.c).
1.1 mrg 93: */
1.102 thorpej 94: bool
1.150 ! rmind 95: uvm_kernacc(void *addr, size_t len, vm_prot_t prot)
1.6 mrg 96: {
1.150 ! rmind 97: vaddr_t saddr = trunc_page((vaddr_t)addr);
! 98: vaddr_t eaddr = round_page(saddr + len);
1.102 thorpej 99: bool rv;
1.6 mrg 100:
101: vm_map_lock_read(kernel_map);
102: rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
103: vm_map_unlock_read(kernel_map);
104:
1.150 ! rmind 105: return rv;
1.1 mrg 106: }
107:
108: #ifdef KGDB
109: /*
110: * Change protections on kernel pages from addr to addr+len
111: * (presumably so debugger can plant a breakpoint).
112: *
113: * We force the protection change at the pmap level. If we were
114: * to use vm_map_protect a change to allow writing would be lazily-
115: * applied meaning we would still take a protection fault, something
116: * we really don't want to do. It would also fragment the kernel
117: * map unnecessarily. We cannot use pmap_protect since it also won't
118: * enforce a write-enable request. Using pmap_enter is the only way
119: * we can ensure the change takes place properly.
120: */
1.6 mrg 121: void
1.104 christos 122: uvm_chgkprot(void *addr, size_t len, int rw)
1.6 mrg 123: {
124: vm_prot_t prot;
1.13 eeh 125: paddr_t pa;
126: vaddr_t sva, eva;
1.6 mrg 127:
128: prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
1.31 kleink 129: eva = round_page((vaddr_t)addr + len);
130: for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
1.6 mrg 131: /*
132: * Extract physical address for the page.
133: */
1.103 thorpej 134: if (pmap_extract(pmap_kernel(), sva, &pa) == false)
1.123 christos 135: panic("%s: invalid page", __func__);
1.30 thorpej 136: pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
1.6 mrg 137: }
1.51 chris 138: pmap_update(pmap_kernel());
1.1 mrg 139: }
140: #endif
141:
142: /*
1.52 chs 143: * uvm_vslock: wire user memory for I/O
1.1 mrg 144: *
145: * - called from physio and sys___sysctl
146: * - XXXCDC: consider nuking this (or making it a macro?)
147: */
148:
1.26 thorpej 149: int
1.97 chs 150: uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type)
1.1 mrg 151: {
1.50 chs 152: struct vm_map *map;
1.26 thorpej 153: vaddr_t start, end;
1.45 chs 154: int error;
1.26 thorpej 155:
1.97 chs 156: map = &vs->vm_map;
1.31 kleink 157: start = trunc_page((vaddr_t)addr);
158: end = round_page((vaddr_t)addr + len);
1.93 drochner 159: error = uvm_fault_wire(map, start, end, access_type, 0);
1.45 chs 160: return error;
1.1 mrg 161: }
162:
163: /*
1.52 chs 164: * uvm_vsunlock: unwire user memory wired by uvm_vslock()
1.1 mrg 165: *
166: * - called from physio and sys___sysctl
167: * - XXXCDC: consider nuking this (or making it a macro?)
168: */
169:
1.6 mrg 170: void
1.97 chs 171: uvm_vsunlock(struct vmspace *vs, void *addr, size_t len)
1.1 mrg 172: {
1.97 chs 173: uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr),
1.43 chs 174: round_page((vaddr_t)addr + len));
1.1 mrg 175: }
176:
177: /*
1.62 thorpej 178: * uvm_proc_fork: fork a virtual address space
1.1 mrg 179: *
180: * - the address space is copied as per parent map's inherit values
1.62 thorpej 181: */
182: void
1.102 thorpej 183: uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared)
1.62 thorpej 184: {
185:
1.103 thorpej 186: if (shared == true) {
1.62 thorpej 187: p2->p_vmspace = NULL;
188: uvmspace_share(p1, p2);
189: } else {
190: p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
191: }
192:
193: cpu_proc_fork(p1, p2);
194: }
195:
196: /*
197: * uvm_lwp_fork: fork a thread
198: *
1.146 rmind 199: * - a new PCB structure is allocated for the child process,
200: * and filled in by MD layer
1.20 thorpej 201: * - if specified, the child gets a new user stack described by
202: * stack and stacksize
1.1 mrg 203: * - NOTE: the kernel stack may be at a different location in the child
204: * process, and thus addresses of automatic variables may be invalid
1.62 thorpej 205: * after cpu_lwp_fork returns in the child process. We do nothing here
206: * after cpu_lwp_fork returns.
1.1 mrg 207: */
1.6 mrg 208: void
1.89 thorpej 209: uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
210: void (*func)(void *), void *arg)
1.6 mrg 211: {
212:
1.137 rmind 213: /* Fill stack with magic number. */
1.63 yamt 214: kstack_setup_magic(l2);
1.6 mrg 215:
216: /*
1.62 thorpej 217: * cpu_lwp_fork() copy and update the pcb, and make the child ready
218: * to run. If this is a normal user fork, the child will exit
1.34 thorpej 219: * directly to user mode via child_return() on its first time
220: * slice and will not return here. If this is a kernel thread,
221: * the specified entry point will be executed.
1.6 mrg 222: */
1.62 thorpej 223: cpu_lwp_fork(l1, l2, stack, stacksize, func, arg);
1.138 rmind 224:
225: /* Inactive emap for new LWP. */
226: l2->l_emap_gen = UVM_EMAP_INACTIVE;
1.14 thorpej 227: }
228:
1.60 chs 229: #ifndef USPACE_ALIGN
1.115 yamt 230: #define USPACE_ALIGN 0
1.60 chs 231: #endif
232:
1.115 yamt 233: static pool_cache_t uvm_uarea_cache;
1.148 matt 234: #if defined(__HAVE_CPU_UAREA_ROUTINES)
235: static pool_cache_t uvm_uarea_system_cache;
236: #else
237: #define uvm_uarea_system_cache uvm_uarea_cache
238: #endif
1.115 yamt 239:
240: static void *
241: uarea_poolpage_alloc(struct pool *pp, int flags)
242: {
1.141 rmind 243: #if defined(PMAP_MAP_POOLPAGE)
1.139 matt 244: if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
245: struct vm_page *pg;
246: vaddr_t va;
247:
248: pg = uvm_pagealloc(NULL, 0, NULL,
249: ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0));
250: if (pg == NULL)
251: return NULL;
252: va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
253: if (va == 0)
254: uvm_pagefree(pg);
255: return (void *)va;
256: }
257: #endif
1.148 matt 258: #if defined(__HAVE_CPU_UAREA_ROUTINES)
259: void *va = cpu_uarea_alloc(false);
260: if (va)
261: return (void *)va;
262: #endif
1.115 yamt 263: return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz,
1.141 rmind 264: USPACE_ALIGN, UVM_KMF_WIRED |
265: ((flags & PR_WAITOK) ? UVM_KMF_WAITVA :
1.115 yamt 266: (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)));
267: }
1.109 ad 268:
1.115 yamt 269: static void
270: uarea_poolpage_free(struct pool *pp, void *addr)
271: {
1.141 rmind 272: #if defined(PMAP_MAP_POOLPAGE)
1.139 matt 273: if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
274: paddr_t pa;
275:
276: pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
277: KASSERT(pa != 0);
278: uvm_pagefree(PHYS_TO_VM_PAGE(pa));
279: return;
280: }
281: #endif
1.148 matt 282: #if defined(__HAVE_CPU_UAREA_ROUTINES)
283: if (cpu_uarea_free(addr))
284: return;
285: #endif
1.115 yamt 286: uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz,
1.141 rmind 287: UVM_KMF_WIRED);
1.115 yamt 288: }
289:
290: static struct pool_allocator uvm_uarea_allocator = {
291: .pa_alloc = uarea_poolpage_alloc,
292: .pa_free = uarea_poolpage_free,
293: .pa_pagesz = USPACE,
294: };
295:
1.148 matt 296: #if defined(__HAVE_CPU_UAREA_ROUTINES)
297: static void *
298: uarea_system_poolpage_alloc(struct pool *pp, int flags)
299: {
300: void * const va = cpu_uarea_alloc(true);
301: KASSERT(va != NULL);
302: return va;
303: }
304:
305: static void
306: uarea_system_poolpage_free(struct pool *pp, void *addr)
307: {
308: if (!cpu_uarea_free(addr))
309: panic("%s: failed to free uarea %p", __func__, addr);
310: }
311:
312: static struct pool_allocator uvm_uarea_system_allocator = {
313: .pa_alloc = uarea_system_poolpage_alloc,
314: .pa_free = uarea_system_poolpage_free,
315: .pa_pagesz = USPACE,
316: };
317: #endif /* __HAVE_CPU_UAREA_ROUTINES */
318:
1.115 yamt 319: void
320: uvm_uarea_init(void)
321: {
1.117 yamt 322: int flags = PR_NOTOUCH;
1.115 yamt 323:
1.116 yamt 324: /*
325: * specify PR_NOALIGN unless the alignment provided by
326: * the backend (USPACE_ALIGN) is sufficient to provide
327: * pool page size (UPSACE) alignment.
328: */
329:
1.117 yamt 330: if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) ||
331: (USPACE_ALIGN % USPACE) != 0) {
332: flags |= PR_NOALIGN;
333: }
334:
335: uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags,
1.141 rmind 336: "uarea", &uvm_uarea_allocator, IPL_NONE, NULL, NULL, NULL);
1.149 drochner 337: #if defined(__HAVE_CPU_UAREA_ROUTINES)
338: uvm_uarea_system_cache = pool_cache_init(USPACE, USPACE_ALIGN,
339: 0, flags, "uareasys", &uvm_uarea_system_allocator,
340: IPL_NONE, NULL, NULL, NULL);
341: #endif
1.60 chs 342: }
343:
344: /*
1.115 yamt 345: * uvm_uarea_alloc: allocate a u-area
1.75 jdolecek 346: */
347:
1.141 rmind 348: vaddr_t
349: uvm_uarea_alloc(void)
1.75 jdolecek 350: {
1.109 ad 351:
1.141 rmind 352: return (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK);
1.75 jdolecek 353: }
354:
1.148 matt 355: vaddr_t
356: uvm_uarea_system_alloc(void)
357: {
358:
359: return (vaddr_t)pool_cache_get(uvm_uarea_system_cache, PR_WAITOK);
360: }
361:
1.75 jdolecek 362: /*
1.115 yamt 363: * uvm_uarea_free: free a u-area
1.60 chs 364: */
365:
366: void
1.141 rmind 367: uvm_uarea_free(vaddr_t uaddr)
1.60 chs 368: {
369:
1.115 yamt 370: pool_cache_put(uvm_uarea_cache, (void *)uaddr);
1.60 chs 371: }
372:
1.148 matt 373: void
374: uvm_uarea_system_free(vaddr_t uaddr)
375: {
376:
377: pool_cache_put(uvm_uarea_system_cache, (void *)uaddr);
378: }
379:
1.142 rmind 380: vaddr_t
381: uvm_lwp_getuarea(lwp_t *l)
382: {
383:
1.146 rmind 384: return (vaddr_t)l->l_addr - UAREA_PCB_OFFSET;
1.142 rmind 385: }
386:
387: void
388: uvm_lwp_setuarea(lwp_t *l, vaddr_t addr)
389: {
390:
1.146 rmind 391: l->l_addr = (void *)(addr + UAREA_PCB_OFFSET);
1.142 rmind 392: }
393:
1.60 chs 394: /*
1.118 yamt 395: * uvm_proc_exit: exit a virtual address space
1.80 pk 396: *
397: * - borrow proc0's address space because freeing the vmspace
398: * of the dead process may block.
399: */
400:
401: void
1.89 thorpej 402: uvm_proc_exit(struct proc *p)
1.80 pk 403: {
404: struct lwp *l = curlwp; /* XXX */
405: struct vmspace *ovm;
406:
407: KASSERT(p == l->l_proc);
408: ovm = p->p_vmspace;
409:
410: /*
411: * borrow proc0's address space.
412: */
1.129 ad 413: KPREEMPT_DISABLE(l);
1.80 pk 414: pmap_deactivate(l);
415: p->p_vmspace = proc0.p_vmspace;
416: pmap_activate(l);
1.129 ad 417: KPREEMPT_ENABLE(l);
1.80 pk 418:
419: uvmspace_free(ovm);
420: }
421:
422: void
423: uvm_lwp_exit(struct lwp *l)
424: {
1.143 rmind 425: vaddr_t va = uvm_lwp_getuarea(l);
1.148 matt 426: bool system = (l->l_flag & LW_SYSTEM) != 0;
1.80 pk 427:
1.148 matt 428: if (system)
429: uvm_uarea_system_free(va);
430: else
431: uvm_uarea_free(va);
1.143 rmind 432: #ifdef DIAGNOSTIC
433: uvm_lwp_setuarea(l, (vaddr_t)NULL);
434: #endif
1.80 pk 435: }
436:
437: /*
1.1 mrg 438: * uvm_init_limit: init per-process VM limits
439: *
440: * - called for process 0 and then inherited by all others.
441: */
1.60 chs 442:
1.6 mrg 443: void
1.89 thorpej 444: uvm_init_limits(struct proc *p)
1.6 mrg 445: {
446:
447: /*
448: * Set up the initial limits on process VM. Set the maximum
449: * resident set size to be all of (reasonably) available memory.
450: * This causes any single, large process to start random page
451: * replacement once it fills memory.
452: */
453:
454: p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ;
1.79 pk 455: p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap;
1.6 mrg 456: p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ;
1.79 pk 457: p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap;
1.136 mrg 458: p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY;
459: p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY;
1.144 jym 460: p->p_rlimit[RLIMIT_RSS].rlim_cur = MIN(
461: VM_MAXUSER_ADDRESS, ctob((rlim_t)uvmexp.free));
1.1 mrg 462: }
463:
1.99 ad 464: /*
1.141 rmind 465: * uvm_scheduler: process zero main loop.
1.1 mrg 466: */
1.145 rmind 467:
468: extern struct loadavg averunnable;
469:
1.6 mrg 470: void
1.89 thorpej 471: uvm_scheduler(void)
1.1 mrg 472: {
1.141 rmind 473: lwp_t *l = curlwp;
1.1 mrg 474:
1.99 ad 475: lwp_lock(l);
1.113 ad 476: l->l_priority = PRI_VM;
477: l->l_class = SCHED_FIFO;
1.99 ad 478: lwp_unlock(l);
479:
480: for (;;) {
1.145 rmind 481: sched_pstats();
482: (void)kpause("uvm", false, hz, NULL);
1.114 ad 483: }
1.107 ad 484: }
CVSweb <webmaster@jp.NetBSD.org>