Annotation of src/sys/arch/vax/vax/pmap.c, Revision 1.140
1.140 ! matt 1: /* $NetBSD: pmap.c,v 1.139 2006/10/02 02:59:38 chs Exp $ */
1.1 ragge 2: /*
1.124 ragge 3: * Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
1.1 ragge 4: * All rights reserved.
5: *
6: * Redistribution and use in source and binary forms, with or without
7: * modification, are permitted provided that the following conditions
8: * are met:
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
14: * 3. All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed at Ludd, University of Lule}.
17: * 4. The name of the author may not be used to endorse or promote products
18: * derived from this software without specific prior written permission
19: *
20: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30: */
1.128 lukem 31:
32: #include <sys/cdefs.h>
1.140 ! matt 33: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.139 2006/10/02 02:59:38 chs Exp $");
1.1 ragge 34:
1.69 ragge 35: #include "opt_ddb.h"
1.104 ragge 36: #include "opt_cputype.h"
1.101 ragge 37: #include "opt_multiprocessor.h"
38: #include "opt_lockdebug.h"
1.114 ragge 39: #include "opt_pipe.h"
1.69 ragge 40:
1.20 mycroft 41: #include <sys/types.h>
42: #include <sys/param.h>
43: #include <sys/queue.h>
44: #include <sys/malloc.h>
1.69 ragge 45: #include <sys/extent.h>
1.20 mycroft 46: #include <sys/proc.h>
47: #include <sys/user.h>
1.26 ragge 48: #include <sys/systm.h>
1.27 ragge 49: #include <sys/device.h>
1.104 ragge 50: #include <sys/buf.h>
1.18 ragge 51:
1.84 mrg 52: #include <uvm/uvm_extern.h>
1.2 ragge 53:
1.48 ragge 54: #ifdef PMAPDEBUG
55: #include <dev/cons.h>
56: #endif
57:
1.46 ragge 58: #include <uvm/uvm.h>
59:
1.20 mycroft 60: #include <machine/pte.h>
61: #include <machine/pcb.h>
62: #include <machine/mtpr.h>
63: #include <machine/macros.h>
64: #include <machine/sid.h>
65: #include <machine/cpu.h>
66: #include <machine/scb.h>
1.79 ragge 67: #include <machine/rpb.h>
1.48 ragge 68:
1.47 ragge 69: /* QDSS console mapping hack */
70: #include "qd.h"
1.63 ragge 71: void qdearly(void);
1.47 ragge 72:
1.125 thorpej 73: #define ISTACK_SIZE (PAGE_SIZE*2)
1.110 ragge 74: vaddr_t istack;
1.40 ragge 75: /*
76: * This code uses bitfield operators for most page table entries.
77: */
1.46 ragge 78: #define PROTSHIFT 27
79: #define PROT_KW (PG_KW >> PROTSHIFT)
80: #define PROT_KR (PG_KR >> PROTSHIFT)
81: #define PROT_RW (PG_RW >> PROTSHIFT)
82: #define PROT_RO (PG_RO >> PROTSHIFT)
83: #define PROT_URKW (PG_URKW >> PROTSHIFT)
1.5 ragge 84:
1.80 ragge 85: /*
86: * Scratch pages usage:
87: * Page 1: initial frame pointer during autoconfig. Stack and pcb for
1.133 wiz 88: * processes during exit on boot CPU only.
89: * Page 2: cpu_info struct for any CPU.
1.80 ragge 90: * Page 3: unused
91: * Page 4: unused
92: */
93: long scratch;
1.110 ragge 94: #define SCRATCHPAGES 4
1.80 ragge 95:
96:
1.1 ragge 97: struct pmap kernel_pmap_store;
98:
1.48 ragge 99: struct pte *Sysmap; /* System page table */
1.40 ragge 100: struct pv_entry *pv_table; /* array of entries, one per LOGICAL page */
1.70 ragge 101: int pventries;
1.110 ragge 102: vaddr_t iospace;
1.40 ragge 103:
1.54 ragge 104: vaddr_t ptemapstart, ptemapend;
1.69 ragge 105: struct extent *ptemap;
1.110 ragge 106: #define PTMAPSZ EXTENT_FIXED_STORAGE_SIZE(100)
1.69 ragge 107: char ptmapstorage[PTMAPSZ];
1.1 ragge 108:
1.38 leo 109: extern caddr_t msgbufaddr;
110:
1.110 ragge 111: #define IOSPACE(p) (((u_long)(p)) & 0xe0000000)
112: #define NPTEPROCSPC 0x1000 /* # of virtual PTEs per process space */
113: #define NPTEPG 0x80 /* # of PTEs per page (logical or physical) */
114: #define PPTESZ sizeof(struct pte)
115: #define NOVADDR 0xffffffff /* Illegal virtual address */
1.111 ragge 116: #define WAITOK M_WAITOK
117: #define NOWAIT M_NOWAIT
118: #define NPTEPERREG 0x200000
1.92 ragge 119:
1.124 ragge 120: #define SEGTYPE(x) (((unsigned int)(x)) >> 30)
121: #define P0SEG 0
122: #define P1SEG 1
123: #define SYSSEG 2
124:
125: /*
126: * Map in a virtual page.
127: */
128: static inline void
129: mapin8(int *ptep, long pte)
130: {
131: ptep[0] = pte;
132: ptep[1] = pte+1;
133: ptep[2] = pte+2;
134: ptep[3] = pte+3;
135: ptep[4] = pte+4;
136: ptep[5] = pte+5;
137: ptep[6] = pte+6;
138: ptep[7] = pte+7;
139: }
140:
141: /*
142: * Check if page table page is in use.
143: */
144: static inline int
145: ptpinuse(void *pte)
146: {
147: int *pve = (int *)vax_trunc_page(pte);
148: int i;
149:
150: for (i = 0; i < NPTEPG; i += 8)
151: if (pve[i] != 0)
152: return 1;
153: return 0;
154: }
155:
1.25 ragge 156: #ifdef PMAPDEBUG
1.69 ragge 157: volatile int recurse;
158: #define RECURSESTART { \
159: if (recurse) \
160: printf("enter at %d, previous %d\n", __LINE__, recurse);\
161: recurse = __LINE__; \
162: }
163: #define RECURSEEND {recurse = 0; }
1.110 ragge 164: #define PMDEBUG(x) if (startpmapdebug)printf x
1.69 ragge 165: #else
166: #define RECURSESTART
167: #define RECURSEEND
1.110 ragge 168: #define PMDEBUG(x)
1.69 ragge 169: #endif
170:
1.101 ragge 171: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
172: static struct simplelock pvtable_lock;
1.110 ragge 173: #define PVTABLE_LOCK simple_lock(&pvtable_lock);
1.101 ragge 174: #define PVTABLE_UNLOCK simple_unlock(&pvtable_lock);
175: #else
176: #define PVTABLE_LOCK
177: #define PVTABLE_UNLOCK
178: #endif
179:
1.69 ragge 180: #ifdef PMAPDEBUG
1.18 ragge 181: int startpmapdebug = 0;
182: #endif
183:
1.118 matt 184: paddr_t avail_start, avail_end;
1.127 thorpej 185: vaddr_t virtual_avail, virtual_end; /* Available virtual memory */
1.49 thorpej 186:
1.101 ragge 187: struct pv_entry *get_pventry(void);
188: void free_pventry(struct pv_entry *);
189: void more_pventries(void);
1.124 ragge 190: vaddr_t get_ptp(int);
191: void free_ptp(paddr_t);
1.104 ragge 192:
193: /*
194: * Calculation of the System Page Table is somewhat a pain, because it
195: * must be in contiguous physical memory and all size calculations must
196: * be done before memory management is turned on.
1.109 ragge 197: * Arg is usrptsize in ptes.
1.104 ragge 198: */
199: static vsize_t
1.109 ragge 200: calc_kvmsize(vsize_t usrptsize)
1.104 ragge 201: {
1.132 pk 202: vsize_t kvmsize, bufsz;
1.104 ragge 203:
1.139 chs 204: /*
205: * Compute the number of pages kmem_map will have.
206: */
207: kmeminit_nkmempages();
208:
1.104 ragge 209: /* All physical memory */
210: kvmsize = avail_end;
211: /* User Page table area. This may be large */
1.109 ragge 212: kvmsize += (usrptsize * sizeof(struct pte));
1.104 ragge 213: /* Kernel stacks per process */
214: kvmsize += (USPACE * maxproc);
215: /* kernel malloc arena */
1.139 chs 216: kvmsize += nkmempages * PAGE_SIZE;
1.104 ragge 217: /* IO device register space */
218: kvmsize += (IOSPSZ * VAX_NBPG);
219: /* Pager allocations */
220: kvmsize += (PAGER_MAP_SIZE + MAXBSIZE);
221: /* Anon pool structures */
222: kvmsize += (physmem * sizeof(struct vm_anon));
223:
1.132 pk 224: /* Buffer space - get size of buffer cache and set an upper limit */
225: bufsz = buf_memcalc();
226: buf_setvalimit(bufsz);
227: kvmsize += bufsz;
1.104 ragge 228:
1.109 ragge 229: /* UBC submap space */
230: kvmsize += (UBC_NWINS << UBC_WINSHIFT);
231:
1.104 ragge 232: /* Exec arg space */
233: kvmsize += NCARGS;
234: #if VAX46 || VAX48 || VAX49 || VAX53 || VAXANY
235: /* Physmap */
236: kvmsize += VM_PHYS_SIZE;
237: #endif
1.129 ragge 238: #if VAX46 || VAX49
239: kvmsize += 0x800000; /* 8 MB framebuffer */
240: #endif
1.104 ragge 241: #ifdef LKM
242: /* LKMs are allocated out of kernel_map */
1.110 ragge 243: #define MAXLKMSIZ 0x100000 /* XXX */
1.104 ragge 244: kvmsize += MAXLKMSIZ;
1.114 ragge 245: #endif
246:
247: /* The swapper uses many anon's, set an arbitrary size */
248: #ifndef SWAPSIZE
249: #define SWAPSIZE (200*1024*1024) /* Assume 200MB swap */
250: #endif
251: kvmsize += ((SWAPSIZE/PAGE_SIZE)*sizeof(struct vm_anon));
252:
253: /* New pipes may steal some amount of memory. Calculate 10 pipes */
254: #ifndef PIPE_SOCKETPAIR
255: kvmsize += PIPE_DIRECT_CHUNK*10;
1.104 ragge 256: #endif
257: return kvmsize;
258: }
1.28 ragge 259:
260: /*
1.14 ragge 261: * pmap_bootstrap().
1.1 ragge 262: * Called as part of vm bootstrap, allocates internal pmap structures.
1.3 ragge 263: * Assumes that nothing is mapped, and that kernel stack is located
264: * immediately after end.
1.1 ragge 265: */
1.18 ragge 266: void
1.6 ragge 267: pmap_bootstrap()
1.3 ragge 268: {
1.118 matt 269: extern unsigned int etext;
270: extern struct user *proc0paddr;
1.56 ragge 271: unsigned int sysptsize, i;
1.40 ragge 272: struct pcb *pcb = (struct pcb *)proc0paddr;
1.42 thorpej 273: pmap_t pmap = pmap_kernel();
1.109 ragge 274: vsize_t kvmsize, usrptsize;
1.18 ragge 275:
1.90 chs 276: /* Set logical page size */
277: uvmexp.pagesize = NBPG;
278: uvm_setpagesize();
279:
1.75 ragge 280: physmem = btoc(avail_end);
1.31 ragge 281:
1.123 ragge 282: usrptsize = (1024*1024*1024)/VAX_NBPG; /* 1GB total VM */
1.110 ragge 283: if (vax_btop(usrptsize)* PPTESZ > avail_end/20)
284: usrptsize = (avail_end/(20 * PPTESZ)) * VAX_NBPG;
285:
1.109 ragge 286: kvmsize = calc_kvmsize(usrptsize);
1.104 ragge 287: sysptsize = kvmsize >> VAX_PGSHIFT;
1.18 ragge 288: /*
289: * Virtual_* and avail_* is used for mapping of system page table.
1.19 ragge 290: * The need for kernel virtual memory is linear dependent of the
291: * amount of physical memory also, therefore sysptsize is
292: * a variable here that is changed dependent of the physical
293: * memory size.
1.18 ragge 294: */
1.40 ragge 295: virtual_avail = avail_end + KERNBASE;
1.56 ragge 296: virtual_end = KERNBASE + sysptsize * VAX_NBPG;
1.54 ragge 297: memset(Sysmap, 0, sysptsize * 4); /* clear SPT before using it */
1.43 ragge 298:
1.18 ragge 299: /*
1.40 ragge 300: * The first part of Kernel Virtual memory is the physical
301: * memory mapped in. This makes some mm routines both simpler
302: * and faster, but takes ~0.75% more memory.
303: */
304: pmap_map(KERNBASE, 0, avail_end, VM_PROT_READ|VM_PROT_WRITE);
305: /*
306: * Kernel code is always readable for user, it must be because
307: * of the emulation code that is somewhere in there.
308: * And it doesn't hurt, /netbsd is also public readable.
1.18 ragge 309: * There are also a couple of other things that must be in
310: * physical memory and that isn't managed by the vm system.
311: */
1.105 matt 312: for (i = 0; i < ((unsigned)&etext ^ KERNBASE) >> VAX_PGSHIFT; i++)
1.40 ragge 313: Sysmap[i].pg_prot = PROT_URKW;
1.6 ragge 314:
1.18 ragge 315: /* Map System Page Table and zero it, Sysmap already set. */
1.40 ragge 316: mtpr((unsigned)Sysmap - KERNBASE, PR_SBR);
1.18 ragge 317:
318: /* Map Interrupt stack and set red zone */
1.90 chs 319: istack = (unsigned)Sysmap + round_page(sysptsize * 4);
1.18 ragge 320: mtpr(istack + ISTACK_SIZE, PR_ISP);
321: kvtopte(istack)->pg_v = 0;
322:
1.40 ragge 323: /* Some scratch pages */
1.90 chs 324: scratch = istack + ISTACK_SIZE;
1.40 ragge 325:
326: /* Physical-to-virtual translation table */
1.90 chs 327: pv_table = (struct pv_entry *)(scratch + 4 * VAX_NBPG);
1.40 ragge 328:
1.90 chs 329: avail_start = (vaddr_t)pv_table + (round_page(avail_end >> PGSHIFT)) *
1.40 ragge 330: sizeof(struct pv_entry) - KERNBASE;
1.18 ragge 331:
332: /* Kernel message buffer */
1.40 ragge 333: avail_end -= MSGBUFSIZE;
334: msgbufaddr = (void *)(avail_end + KERNBASE);
1.18 ragge 335:
336: /* zero all mapped physical memory from Sysmap to here */
1.54 ragge 337: memset((void *)istack, 0, (avail_start + KERNBASE) - istack);
1.43 ragge 338:
1.110 ragge 339: /* QDSS console mapping hack */
1.47 ragge 340: #if NQD > 0
1.63 ragge 341: qdearly();
1.46 ragge 342: #endif
1.18 ragge 343:
1.69 ragge 344: /* User page table map. This is big. */
1.109 ragge 345: MAPVIRT(ptemapstart, vax_btoc(usrptsize * sizeof(struct pte)));
1.69 ragge 346: ptemapend = virtual_avail;
347:
1.59 ragge 348: MAPVIRT(iospace, IOSPSZ); /* Device iospace mapping area */
349:
1.57 ragge 350: /* Init SCB and set up stray vectors. */
351: avail_start = scb_init(avail_start);
1.138 matt 352: *(struct rpb *) 0 = *(struct rpb *) ((caddr_t)proc0paddr + REDZONEADDR);
1.57 ragge 353:
1.59 ragge 354: if (dep_call->cpu_steal_pages)
355: (*dep_call->cpu_steal_pages)();
1.57 ragge 356:
1.90 chs 357: avail_start = round_page(avail_start);
358: virtual_avail = round_page(virtual_avail);
359: virtual_end = trunc_page(virtual_end);
1.28 ragge 360:
1.56 ragge 361:
1.110 ragge 362: #if 0 /* Breaks cninit() on some machines */
1.40 ragge 363: cninit();
1.104 ragge 364: printf("Sysmap %p, istack %lx, scratch %lx\n",Sysmap,istack,scratch);
365: printf("etext %p, kvmsize %lx\n", &etext, kvmsize);
1.124 ragge 366: printf("SYSPTSIZE %x usrptsize %lx\n",
367: sysptsize, usrptsize * sizeof(struct pte));
1.69 ragge 368: printf("pv_table %p, ptemapstart %lx ptemapend %lx\n",
369: pv_table, ptemapstart, ptemapend);
1.48 ragge 370: printf("avail_start %lx, avail_end %lx\n",avail_start,avail_end);
1.69 ragge 371: printf("virtual_avail %lx,virtual_end %lx\n",
372: virtual_avail, virtual_end);
1.48 ragge 373: printf("startpmapdebug %p\n",&startpmapdebug);
1.3 ragge 374: #endif
375:
1.18 ragge 376:
1.28 ragge 377: /* Init kernel pmap */
1.110 ragge 378: pmap->pm_p1br = (struct pte *)KERNBASE;
379: pmap->pm_p0br = (struct pte *)KERNBASE;
1.111 ragge 380: pmap->pm_p1lr = NPTEPERREG;
1.110 ragge 381: pmap->pm_p0lr = 0;
1.56 ragge 382: pmap->pm_stats.wired_count = pmap->pm_stats.resident_count = 0;
383: /* btop(virtual_avail - KERNBASE); */
1.42 thorpej 384:
1.110 ragge 385: pmap->pm_count = 1;
1.101 ragge 386: simple_lock_init(&pmap->pm_lock);
1.42 thorpej 387:
388: /* Activate the kernel pmap. */
1.140 ! matt 389: pcb->P1BR = pmap->pm_p1br;
! 390: pcb->P0BR = pmap->pm_p0br;
! 391: pcb->P1LR = pmap->pm_p1lr;
! 392: pcb->P0LR = pmap->pm_p0lr|AST_PCB;
! 393: mtpr((uintptr_t)pcb->P1BR, PR_P1BR);
! 394: mtpr((uintptr_t)pcb->P0BR, PR_P0BR);
! 395: mtpr(pcb->P1LR, PR_P1LR);
! 396: mtpr(pcb->P0LR, PR_P0LR);
1.80 ragge 397:
398: /* cpu_info struct */
399: pcb->SSP = scratch + VAX_NBPG;
400: mtpr(pcb->SSP, PR_SSP);
1.82 ragge 401: bzero((caddr_t)pcb->SSP,
402: sizeof(struct cpu_info) + sizeof(struct device));
1.80 ragge 403: curcpu()->ci_exit = scratch;
1.82 ragge 404: curcpu()->ci_dev = (void *)(pcb->SSP + sizeof(struct cpu_info));
1.81 ragge 405: #if defined(MULTIPROCESSOR)
406: curcpu()->ci_flags = CI_MASTERCPU|CI_RUNNING;
1.134 matt 407: SIMPLEQ_FIRST(&cpus) = curcpu();
1.81 ragge 408: #endif
1.101 ragge 409: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
410: simple_lock_init(&pvtable_lock);
411: #endif
1.28 ragge 412:
1.23 ragge 413: /*
414: * Now everything should be complete, start virtual memory.
415: */
1.56 ragge 416: uvm_page_physload(avail_start >> PGSHIFT, avail_end >> PGSHIFT,
417: avail_start >> PGSHIFT, avail_end >> PGSHIFT,
1.51 thorpej 418: VM_FREELIST_DEFAULT);
1.28 ragge 419: mtpr(sysptsize, PR_SLR);
1.79 ragge 420: rpb.sbr = mfpr(PR_SBR);
421: rpb.slr = mfpr(PR_SLR);
1.104 ragge 422: rpb.wait = 0; /* DDB signal */
1.28 ragge 423: mtpr(1, PR_MAPEN);
1.1 ragge 424: }
425:
1.67 ragge 426: /*
1.127 thorpej 427: * Define the initial bounds of the kernel virtual address space.
428: */
429: void
430: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
431: {
432:
433: *vstartp = virtual_avail;
434: *vendp = virtual_end;
435: }
436:
437: /*
1.67 ragge 438: * Let the VM system do early memory allocation from the direct-mapped
439: * physical memory instead.
440: */
441: vaddr_t
1.127 thorpej 442: pmap_steal_memory(size, vstartp, vendp)
1.67 ragge 443: vsize_t size;
1.127 thorpej 444: vaddr_t *vstartp, *vendp;
1.67 ragge 445: {
446: vaddr_t v;
447: int npgs;
448:
1.127 thorpej 449: PMDEBUG(("pmap_steal_memory: size 0x%lx start %p end %p\n",
450: size, vstartp, vendp));
1.110 ragge 451:
1.67 ragge 452: size = round_page(size);
453: npgs = btoc(size);
454:
1.77 thorpej 455: #ifdef DIAGNOSTIC
456: if (uvm.page_init_done == TRUE)
457: panic("pmap_steal_memory: called _after_ bootstrap");
458: #endif
459:
1.67 ragge 460: /*
461: * A vax only have one segment of memory.
462: */
463:
464: v = (vm_physmem[0].avail_start << PGSHIFT) | KERNBASE;
465: vm_physmem[0].avail_start += npgs;
466: vm_physmem[0].start += npgs;
467: bzero((caddr_t)v, size);
468: return v;
469: }
1.18 ragge 470:
471: /*
472: * pmap_init() is called as part of vm init after memory management
473: * is enabled. It is meant to do machine-specific allocations.
1.69 ragge 474: * Here is the resource map for the user page tables inited.
1.1 ragge 475: */
1.2 ragge 476: void
1.46 ragge 477: pmap_init()
1.2 ragge 478: {
1.110 ragge 479: /*
480: * Create the extent map used to manage the page table space.
481: */
482: ptemap = extent_create("ptemap", ptemapstart, ptemapend,
1.120 thorpej 483: M_VMPMAP, ptmapstorage, PTMAPSZ, EX_NOCOALESCE);
1.110 ragge 484: if (ptemap == NULL)
1.69 ragge 485: panic("pmap_init");
1.1 ragge 486: }
487:
1.110 ragge 488: static u_long
489: pmap_extwrap(vsize_t nsize)
490: {
491: int res;
492: u_long rv;
493:
494: for (;;) {
495: res = extent_alloc(ptemap, nsize, PAGE_SIZE, 0,
496: EX_WAITOK|EX_MALLOCOK, &rv);
497: if (res == EAGAIN)
498: return 0;
499: if (res == 0)
500: return rv;
501: }
502: }
503:
504: /*
1.111 ragge 505: * Do a page removal from the pv table. A page is identified by its
506: * virtual address combined with its struct pmap in the pv table.
1.110 ragge 507: */
508: static void
509: rmpage(pmap_t pm, int *br)
510: {
511: struct pv_entry *pv, *pl, *pf;
1.111 ragge 512: vaddr_t vaddr;
513: int found = 0;
514:
515: if (pm == pmap_kernel())
516: vaddr = (br - (int *)Sysmap) * VAX_NBPG + 0x80000000;
517: else if ((br >= (int *)pm->pm_p0br) &&
518: (br < ((int *)pm->pm_p0br + pm->pm_p0lr)))
519: vaddr = (br - (int *)pm->pm_p0br) * VAX_NBPG;
520: else
521: vaddr = (br - (int *)pm->pm_p1br) * VAX_NBPG + 0x40000000;
1.129 ragge 522:
523: if (IOSPACE((br[0] & PG_FRAME) << VAX_PGSHIFT))
524: return; /* Forget mappings of IO space */
1.110 ragge 525:
526: pv = pv_table + ((br[0] & PG_FRAME) >> LTOHPS);
527: if (((br[0] & PG_PROT) == PG_RW) &&
528: ((pv->pv_attr & PG_M) != PG_M))
529: pv->pv_attr |= br[0]|br[1]|br[2]|br[3]|br[4]|br[5]|br[6]|br[7];
530: simple_lock(&pm->pm_lock);
531: pm->pm_stats.resident_count--;
532: if (br[0] & PG_W)
533: pm->pm_stats.wired_count--;
534: simple_unlock(&pm->pm_lock);
1.111 ragge 535: if (pv->pv_pmap == pm && pv->pv_vaddr == vaddr) {
1.110 ragge 536: pv->pv_vaddr = NOVADDR;
537: pv->pv_pmap = 0;
1.111 ragge 538: found++;
1.110 ragge 539: } else
540: for (pl = pv; pl->pv_next; pl = pl->pv_next) {
1.111 ragge 541: if (pl->pv_next->pv_pmap != pm ||
542: pl->pv_next->pv_vaddr != vaddr)
1.110 ragge 543: continue;
544: pf = pl->pv_next;
545: pl->pv_next = pl->pv_next->pv_next;
546: free_pventry(pf);
1.111 ragge 547: found++;
1.110 ragge 548: break;
549: }
1.111 ragge 550: if (found == 0)
551: panic("rmpage: pm %p br %p", pm, br);
1.110 ragge 552: }
553: /*
554: * Update the PCBs using this pmap after a change.
555: */
556: static void
557: update_pcbs(struct pmap *pm)
558: {
559: struct pm_share *ps;
560:
561: ps = pm->pm_share;
562: while (ps != NULL) {
563: ps->ps_pcb->P0BR = pm->pm_p0br;
564: ps->ps_pcb->P0LR = pm->pm_p0lr|AST_PCB;
565: ps->ps_pcb->P1BR = pm->pm_p1br;
566: ps->ps_pcb->P1LR = pm->pm_p1lr;
567: ps = ps->ps_next;
568: }
569:
1.119 thorpej 570: /* If curlwp uses this pmap update the regs too */
1.110 ragge 571: if (pm == curproc->p_vmspace->vm_map.pmap) {
1.140 ! matt 572: mtpr((uintptr_t)pm->pm_p0br, PR_P0BR);
1.110 ragge 573: mtpr(pm->pm_p0lr|AST_PCB, PR_P0LR);
1.140 ! matt 574: mtpr((uintptr_t)pm->pm_p1br, PR_P1BR);
1.110 ragge 575: mtpr(pm->pm_p1lr, PR_P1LR);
576: }
577: #if defined(MULTIPROCESSOR) && defined(notyet)
578: /* If someone else is using this pmap, be sure to reread */
579: cpu_send_ipi(IPI_DEST_ALL, IPI_NEWPTE);
580: #endif
581: }
582:
583: /*
1.111 ragge 584: * Allocate a page through direct-mapped segment.
585: */
586: static vaddr_t
587: getpage(int w)
588: {
589: struct vm_page *pg;
590:
591: for (;;) {
592: pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
593: if (pg != NULL)
594: break;
595: if (w == NOWAIT)
596: return 0;
597: uvm_wait("getpage");
598: }
599: return (VM_PAGE_TO_PHYS(pg)|KERNBASE);
600: }
601:
1.124 ragge 602: #if 0
1.111 ragge 603: /*
604: * Free the page allocated above.
605: */
606: static void
607: freepage(vaddr_t v)
608: {
609: paddr_t paddr = (kvtopte(v)->pg_pfn << VAX_PGSHIFT);
610: uvm_pagefree(PHYS_TO_VM_PAGE(paddr));
611: }
1.124 ragge 612: #endif
1.111 ragge 613:
614: /*
1.110 ragge 615: * Remove a full process space. Update all processes pcbs.
616: */
617: static void
618: rmspace(struct pmap *pm)
619: {
1.124 ragge 620: int lr, i, j, *br, *ptpp;
1.110 ragge 621:
1.111 ragge 622: if (pm->pm_p0lr == 0 && pm->pm_p1lr == NPTEPERREG)
1.110 ragge 623: return; /* Already free */
624:
1.124 ragge 625: lr = pm->pm_p0lr/NPTEPG;
1.110 ragge 626: for (i = 0; i < lr; i++) {
1.124 ragge 627: ptpp = (int *)kvtopte(&pm->pm_p0br[i*NPTEPG]);
628: if (*ptpp == 0)
1.110 ragge 629: continue;
1.124 ragge 630: br = (int *)&pm->pm_p0br[i*NPTEPG];
631: for (j = 0; j < NPTEPG; j+=LTOHPN) {
1.110 ragge 632: if (br[j] == 0)
633: continue;
634: rmpage(pm, &br[j]);
635: }
1.124 ragge 636: free_ptp((((struct pte *)ptpp)->pg_pfn << VAX_PGSHIFT));
637: *ptpp = 0;
1.110 ragge 638: }
1.124 ragge 639: lr = pm->pm_p1lr/NPTEPG;
640: for (i = lr; i < NPTEPERREG/NPTEPG; i++) {
641: ptpp = (int *)kvtopte(&pm->pm_p1br[i*NPTEPG]);
642: if (*ptpp == 0)
1.110 ragge 643: continue;
1.124 ragge 644: br = (int *)&pm->pm_p1br[i*NPTEPG];
645: for (j = 0; j < NPTEPG; j+=LTOHPN) {
1.110 ragge 646: if (br[j] == 0)
647: continue;
648: rmpage(pm, &br[j]);
649: }
1.124 ragge 650: free_ptp((((struct pte *)ptpp)->pg_pfn << VAX_PGSHIFT));
651: *ptpp = 0;
1.110 ragge 652: }
653:
1.112 ragge 654: if (pm->pm_p0lr != 0)
655: extent_free(ptemap, (u_long)pm->pm_p0br,
656: pm->pm_p0lr * PPTESZ, EX_WAITOK);
657: if (pm->pm_p1lr != NPTEPERREG)
658: extent_free(ptemap, (u_long)pm->pm_p1ap,
659: (NPTEPERREG - pm->pm_p1lr) * PPTESZ, EX_WAITOK);
1.110 ragge 660: pm->pm_p0br = pm->pm_p1br = (struct pte *)KERNBASE;
1.111 ragge 661: pm->pm_p0lr = 0;
662: pm->pm_p1lr = NPTEPERREG;
1.110 ragge 663: pm->pm_p1ap = NULL;
664: update_pcbs(pm);
665: }
666:
667: /*
668: * Find a process to remove the process space for.
669: * This is based on uvm_swapout_threads().
670: * Avoid to remove ourselves.
671: */
672:
1.118 matt 673: #undef swappable
1.119 thorpej 674: #define swappable(l, pm) \
1.140 ! matt 675: (((l)->l_flag & (L_SYSTEM | L_INMEM | L_WEXIT)) == L_INMEM \
! 676: && (l)->l_holdcnt == 0 \
! 677: && (l)->l_proc->p_vmspace->vm_map.pmap != pm)
1.110 ragge 678:
679: static int
680: pmap_rmproc(struct pmap *pm)
681: {
682: struct pmap *ppm;
1.119 thorpej 683: struct lwp *l;
684: struct lwp *outl, *outl2;
1.110 ragge 685: int outpri, outpri2;
686: int didswap = 0;
687: extern int maxslp;
688:
1.119 thorpej 689: outl = outl2 = NULL;
1.110 ragge 690: outpri = outpri2 = 0;
1.140 ! matt 691: rw_enter(&proclist_lock, RW_READER);
1.119 thorpej 692: LIST_FOREACH(l, &alllwp, l_list) {
693: if (!swappable(l, pm))
1.110 ragge 694: continue;
1.119 thorpej 695: ppm = l->l_proc->p_vmspace->vm_map.pmap;
1.111 ragge 696: if (ppm->pm_p0lr == 0 && ppm->pm_p1lr == NPTEPERREG)
1.110 ragge 697: continue; /* Already swapped */
1.119 thorpej 698: switch (l->l_stat) {
699: case LSRUN:
700: case LSONPROC:
701: if (l->l_swtime > outpri2) {
702: outl2 = l;
703: outpri2 = l->l_swtime;
1.110 ragge 704: }
705: continue;
1.119 thorpej 706: case LSSLEEP:
707: case LSSTOP:
708: if (l->l_slptime >= maxslp) {
709: rmspace(l->l_proc->p_vmspace->vm_map.pmap);
1.110 ragge 710: didswap++;
1.119 thorpej 711: } else if (l->l_slptime > outpri) {
712: outl = l;
713: outpri = l->l_slptime;
1.110 ragge 714: }
715: continue;
716: }
717: }
1.140 ! matt 718: rw_exit(&proclist_lock);
1.110 ragge 719: if (didswap == 0) {
1.119 thorpej 720: if ((l = outl) == NULL)
721: l = outl2;
722: if (l) {
723: rmspace(l->l_proc->p_vmspace->vm_map.pmap);
1.110 ragge 724: didswap++;
725: }
726: }
727: return didswap;
728: }
729:
730: /*
731: * Allocate space for user page tables, from ptemap.
1.111 ragge 732: * This routine should never fail; use the same algorithm as when processes
733: * are swapped.
1.110 ragge 734: * Argument is needed space, in bytes.
735: * Returns a pointer to the newly allocated space.
736: */
737: static vaddr_t
738: pmap_getusrptes(pmap_t pm, vsize_t nsize)
739: {
740: u_long rv;
741:
742: #ifdef DEBUG
743: if (nsize & PAGE_MASK)
744: panic("pmap_getusrptes: bad size %lx", nsize);
745: #endif
746: while (((rv = pmap_extwrap(nsize)) == 0) && (pmap_rmproc(pm) != 0))
747: ;
748: if (rv)
749: return rv;
750: panic("usrptmap space leakage");
751: }
752:
1.70 ragge 753: /*
1.111 ragge 754: * Remove a pte page when all references are gone.
1.70 ragge 755: */
1.111 ragge 756: static void
757: rmptep(struct pte *pte)
1.70 ragge 758: {
1.124 ragge 759: int *ptpp = (int *)kvtopte(pte);
1.110 ragge 760: #ifdef DEBUG
1.124 ragge 761: { int i, *ptr = (int *)vax_trunc_page(pte);
762: for (i = 0; i < NPTEPG; i++)
1.110 ragge 763: if (ptr[i] != 0)
1.111 ragge 764: panic("rmptep: ptr[%d] != 0", i);
765: }
1.110 ragge 766: #endif
1.124 ragge 767: free_ptp((((struct pte *)ptpp)->pg_pfn << VAX_PGSHIFT));
768: *ptpp = 0;
1.70 ragge 769: }
1.18 ragge 770:
1.110 ragge 771: static void
772: grow_p0(struct pmap *pm, int reqlen)
773: {
774: vaddr_t nptespc;
775: char *from, *to;
776: int srclen, dstlen;
777: int inuse, len, p0lr;
778: u_long p0br;
779:
780: PMDEBUG(("grow_p0: pmap %p reqlen %d\n", pm, reqlen));
781:
782: /* Get new pte space */
1.111 ragge 783: p0lr = pm->pm_p0lr;
1.110 ragge 784: inuse = p0lr != 0;
785: len = round_page((reqlen+1) * PPTESZ);
786: RECURSEEND;
787: nptespc = pmap_getusrptes(pm, len);
788: RECURSESTART;
789:
790: /*
791: * Copy the old ptes to the new space.
792: * Done by moving on system page table.
793: */
794: srclen = vax_btop(p0lr * PPTESZ) * PPTESZ;
795: dstlen = vax_btoc(len)*PPTESZ;
796: from = (char *)kvtopte(pm->pm_p0br);
797: to = (char *)kvtopte(nptespc);
798:
799: PMDEBUG(("grow_p0: from %p to %p src %d dst %d\n",
800: from, to, srclen, dstlen));
801:
802: if (inuse)
803: bcopy(from, to, srclen);
804: bzero(to+srclen, dstlen-srclen);
805: p0br = (u_long)pm->pm_p0br;
806: pm->pm_p0br = (struct pte *)nptespc;
1.111 ragge 807: pm->pm_p0lr = (len/PPTESZ);
1.110 ragge 808: update_pcbs(pm);
809:
1.133 wiz 810: /* Remove the old after update_pcbs() (for multi-CPU propagation) */
1.110 ragge 811: if (inuse)
812: extent_free(ptemap, p0br, p0lr*PPTESZ, EX_WAITOK);
813: }
814:
815:
816: static void
817: grow_p1(struct pmap *pm, int len)
818: {
819: vaddr_t nptespc, optespc;
820: int nlen, olen;
821:
822: PMDEBUG(("grow_p1: pm %p len %x\n", pm, len));
823:
824: /* Get new pte space */
1.124 ragge 825: nlen = (NPTEPERREG*PPTESZ) - trunc_page(len * PPTESZ);
1.110 ragge 826: RECURSEEND;
827: nptespc = pmap_getusrptes(pm, nlen);
828: RECURSESTART;
1.124 ragge 829: olen = (NPTEPERREG*PPTESZ) - (pm->pm_p1lr * PPTESZ);
1.110 ragge 830: optespc = (vaddr_t)pm->pm_p1ap;
831:
832: /*
833: * Copy the old ptes to the new space.
834: * Done by moving on system page table.
835: */
836: bzero(kvtopte(nptespc), vax_btop(nlen-olen) * PPTESZ);
837: if (optespc)
838: bcopy(kvtopte(optespc), kvtopte(nptespc+nlen-olen),
1.115 ragge 839: vax_btop(olen) * PPTESZ);
1.110 ragge 840:
841: pm->pm_p1ap = (struct pte *)nptespc;
1.124 ragge 842: pm->pm_p1br = (struct pte *)(nptespc+nlen-(NPTEPERREG*PPTESZ));
1.111 ragge 843: pm->pm_p1lr = NPTEPERREG - nlen/PPTESZ;
1.110 ragge 844: update_pcbs(pm);
845:
846: if (optespc)
847: extent_free(ptemap, optespc, olen, EX_WAITOK);
848: }
849:
1.18 ragge 850: /*
1.42 thorpej 851: * Initialize a preallocated an zeroed pmap structure,
852: */
1.101 ragge 853: static void
854: pmap_pinit(pmap_t pmap)
1.42 thorpej 855: {
856:
857: /*
1.111 ragge 858: * Do not allocate any pte's here, we don't know the size and
859: * we'll get a page pault anyway when some page is referenced,
860: * so do it then.
1.42 thorpej 861: */
1.111 ragge 862: pmap->pm_p0br = (struct pte *)KERNBASE;
863: pmap->pm_p1br = (struct pte *)KERNBASE;
864: pmap->pm_p0lr = 0;
865: pmap->pm_p1lr = NPTEPERREG;
866: pmap->pm_p1ap = NULL;
1.110 ragge 867:
868: PMDEBUG(("pmap_pinit(%p): p0br=%p p0lr=0x%lx p1br=%p p1lr=0x%lx\n",
869: pmap, pmap->pm_p0br, pmap->pm_p0lr, pmap->pm_p1br, pmap->pm_p1lr));
870:
871: pmap->pm_count = 1;
1.54 ragge 872: pmap->pm_stats.resident_count = pmap->pm_stats.wired_count = 0;
1.42 thorpej 873: }
1.1 ragge 874:
875: /*
1.101 ragge 876: * pmap_create() creates a pmap for a new task.
877: * If not already allocated, malloc space for one.
878: */
879: struct pmap *
880: pmap_create()
881: {
882: struct pmap *pmap;
883:
884: MALLOC(pmap, struct pmap *, sizeof(*pmap), M_VMPMAP, M_WAITOK);
885: bzero(pmap, sizeof(struct pmap));
886: pmap_pinit(pmap);
887: simple_lock_init(&pmap->pm_lock);
888: return (pmap);
889: }
890:
891: /*
1.1 ragge 892: * Release any resources held by the given physical map.
893: * Called when a pmap initialized by pmap_pinit is being released.
894: * Should only be called if the map contains no valid mappings.
895: */
1.101 ragge 896: static void
897: pmap_release(struct pmap *pmap)
1.1 ragge 898: {
1.70 ragge 899: #ifdef DEBUG
1.69 ragge 900: vaddr_t saddr, eaddr;
1.70 ragge 901: #endif
1.69 ragge 902:
1.110 ragge 903: PMDEBUG(("pmap_release: pmap %p\n",pmap));
1.5 ragge 904:
1.69 ragge 905: if (pmap->pm_p0br == 0)
906: return;
907:
1.70 ragge 908: #ifdef DEBUG
1.124 ragge 909: #if 0
1.110 ragge 910: for (i = 0; i < NPTEPROCSPC; i++)
911: if (pmap->pm_pref[i])
1.70 ragge 912: panic("pmap_release: refcnt %d index %d",
1.110 ragge 913: pmap->pm_pref[i], i);
1.124 ragge 914: #endif
1.70 ragge 915:
1.69 ragge 916: saddr = (vaddr_t)pmap->pm_p0br;
1.111 ragge 917: eaddr = saddr + pmap->pm_p0lr * PPTESZ;
1.125 thorpej 918: for (; saddr < eaddr; saddr += PAGE_SIZE)
1.70 ragge 919: if (kvtopte(saddr)->pg_pfn)
1.110 ragge 920: panic("pmap_release: P0 page mapped");
921: saddr = (vaddr_t)pmap->pm_p1br + pmap->pm_p1lr * PPTESZ;
922: eaddr = KERNBASE;
1.125 thorpej 923: for (; saddr < eaddr; saddr += PAGE_SIZE)
1.110 ragge 924: if (kvtopte(saddr)->pg_pfn)
925: panic("pmap_release: P1 page mapped");
1.70 ragge 926: #endif
1.111 ragge 927: if (pmap->pm_p0lr != 0)
1.110 ragge 928: extent_free(ptemap, (u_long)pmap->pm_p0br,
1.111 ragge 929: pmap->pm_p0lr * PPTESZ, EX_WAITOK);
930: if (pmap->pm_p1lr != NPTEPERREG)
1.110 ragge 931: extent_free(ptemap, (u_long)pmap->pm_p1ap,
1.111 ragge 932: (NPTEPERREG - pmap->pm_p1lr) * PPTESZ, EX_WAITOK);
1.1 ragge 933: }
934:
1.17 ragge 935: /*
936: * pmap_destroy(pmap): Remove a reference from the pmap.
1.1 ragge 937: * If the pmap is NULL then just return else decrese pm_count.
938: * If this was the last reference we call's pmap_relaese to release this pmap.
939: * OBS! remember to set pm_lock
940: */
941:
942: void
1.101 ragge 943: pmap_destroy(pmap_t pmap)
1.1 ragge 944: {
1.2 ragge 945: int count;
1.1 ragge 946:
1.110 ragge 947: PMDEBUG(("pmap_destroy: pmap %p\n",pmap));
1.2 ragge 948:
949: simple_lock(&pmap->pm_lock);
1.110 ragge 950: count = --pmap->pm_count;
1.2 ragge 951: simple_unlock(&pmap->pm_lock);
1.1 ragge 952:
1.36 ragge 953: if (count == 0) {
1.110 ragge 954: #ifdef DEBUG
1.113 ragge 955: if (pmap->pm_share)
1.110 ragge 956: panic("pmap_destroy used pmap");
957: #endif
1.2 ragge 958: pmap_release(pmap);
1.90 chs 959: FREE(pmap, M_VMPMAP);
1.2 ragge 960: }
1.1 ragge 961: }
962:
1.110 ragge 963: static struct pte *
964: vaddrtopte(struct pv_entry *pv)
965: {
966: struct pmap *pm;
967: if (pv->pv_pmap == NULL || pv->pv_vaddr == NOVADDR)
968: return NULL;
969: if (pv->pv_vaddr & KERNBASE)
970: return &Sysmap[(pv->pv_vaddr & ~KERNBASE) >> VAX_PGSHIFT];
971: pm = pv->pv_pmap;
972: if (pv->pv_vaddr & 0x40000000)
973: return &pm->pm_p1br[vax_btop(pv->pv_vaddr & ~0x40000000)];
974: else
975: return &pm->pm_p0br[vax_btop(pv->pv_vaddr)];
976: }
977:
1.48 ragge 978: /*
979: * New (real nice!) function that allocates memory in kernel space
980: * without tracking it in the MD code.
981: */
982: void
1.101 ragge 983: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1.48 ragge 984: {
1.101 ragge 985: int *ptp, opte;
1.48 ragge 986:
987: ptp = (int *)kvtopte(va);
1.110 ragge 988: PMDEBUG(("pmap_kenter_pa: va: %lx, pa %lx, prot %x ptp %p\n",
989: va, pa, prot, ptp));
1.101 ragge 990: opte = ptp[0];
1.48 ragge 991: ptp[0] = PG_V | ((prot & VM_PROT_WRITE)? PG_KW : PG_KR) |
992: PG_PFNUM(pa) | PG_SREF;
993: ptp[1] = ptp[0] + 1;
1.53 ragge 994: ptp[2] = ptp[0] + 2;
995: ptp[3] = ptp[0] + 3;
996: ptp[4] = ptp[0] + 4;
997: ptp[5] = ptp[0] + 5;
998: ptp[6] = ptp[0] + 6;
999: ptp[7] = ptp[0] + 7;
1.101 ragge 1000: if (opte & PG_V) {
1001: #if defined(MULTIPROCESSOR)
1002: cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1003: #endif
1004: mtpr(0, PR_TBIA);
1005: }
1.48 ragge 1006: }
1007:
1008: void
1.101 ragge 1009: pmap_kremove(vaddr_t va, vsize_t len)
1.48 ragge 1010: {
1.52 ragge 1011: struct pte *pte;
1.102 chs 1012: #ifdef PMAPDEBUG
1.52 ragge 1013: int i;
1.110 ragge 1014: #endif
1.52 ragge 1015:
1.110 ragge 1016: PMDEBUG(("pmap_kremove: va: %lx, len %lx, ptp %p\n",
1017: va, len, kvtopte(va)));
1.48 ragge 1018:
1.101 ragge 1019: pte = kvtopte(va);
1020:
1.102 chs 1021: #ifdef PMAPDEBUG
1.48 ragge 1022: /*
1.101 ragge 1023: * Check if any pages are on the pv list.
1024: * This shouldn't happen anymore.
1.48 ragge 1025: */
1.56 ragge 1026: len >>= PGSHIFT;
1.52 ragge 1027: for (i = 0; i < len; i++) {
1028: if (pte->pg_pfn == 0)
1029: continue;
1030: if (pte->pg_sref == 0)
1.101 ragge 1031: panic("pmap_kremove");
1.56 ragge 1032: bzero(pte, LTOHPN * sizeof(struct pte));
1033: pte += LTOHPN;
1.52 ragge 1034: }
1.101 ragge 1035: #else
1036: len >>= VAX_PGSHIFT;
1037: bzero(pte, len * sizeof(struct pte));
1038: #endif
1039: #if defined(MULTIPROCESSOR)
1040: cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1041: #endif
1.48 ragge 1042: mtpr(0, PR_TBIA);
1043: }
1044:
1.62 ragge 1045: /*
1046: * pmap_enter() is the main routine that puts in mappings for pages, or
1.92 ragge 1047: * upgrades mappings to more "rights".
1.62 ragge 1048: */
1.73 thorpej 1049: int
1.124 ragge 1050: pmap_enter(pmap_t pmap, vaddr_t v, paddr_t p, vm_prot_t prot, int flags)
1.1 ragge 1051: {
1.110 ragge 1052: struct pv_entry *pv, *tmp;
1.124 ragge 1053: int s, newpte, oldpte;
1054: int *pteptr; /* current pte to write mapping info to */
1055: int *ptpptr; /* ptr to page table page */
1056:
1.2 ragge 1057:
1.110 ragge 1058: PMDEBUG(("pmap_enter: pmap %p v %lx p %lx prot %x wired %d access %x\n",
1.124 ragge 1059: pmap, v, p, prot, (flags & PMAP_WIRED) != 0, flags & VM_PROT_ALL));
1.5 ragge 1060:
1.69 ragge 1061: RECURSESTART;
1.124 ragge 1062:
1.101 ragge 1063: /* Find address of correct pte */
1.124 ragge 1064: switch (SEGTYPE(v)) {
1065: case SYSSEG:
1066: pteptr = ((int *)Sysmap) + vax_btop(v - KERNBASE);
1067: newpte = (prot & VM_PROT_WRITE ? PG_KW : PG_KR);
1068: break;
1069:
1070: case P0SEG:
1071: if (vax_btop(v) >= pmap->pm_p0lr)
1072: grow_p0(pmap, vax_btop(v));
1073: pteptr = (int *)pmap->pm_p0br + vax_btop(v);
1074: newpte = (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
1075: break;
1076:
1077: case P1SEG:
1078: if (vax_btop(v - 0x40000000) < pmap->pm_p1lr)
1079: grow_p1(pmap, vax_btop(v - 0x40000000));
1080: pteptr = (int *)pmap->pm_p1br + vax_btop(v - 0x40000000);
1081: newpte = (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
1082: break;
1083: default:
1084: panic("bad seg");
1085: }
1086: newpte |= vax_btop(p);
1.70 ragge 1087:
1.124 ragge 1088: if (SEGTYPE(v) != SYSSEG) {
1.70 ragge 1089: /*
1090: * Check if a pte page must be mapped in.
1091: */
1.124 ragge 1092: ptpptr = (int *)kvtopte(pteptr);
1.110 ragge 1093:
1.124 ragge 1094: if (*ptpptr == 0) {
1.69 ragge 1095: paddr_t phys;
1.124 ragge 1096:
1097: phys = get_ptp(flags & PMAP_CANFAIL ? NOWAIT : WAITOK);
1098: if (phys == 0) {
1.111 ragge 1099: RECURSEEND;
1100: return ENOMEM;
1.70 ragge 1101: }
1.124 ragge 1102: *ptpptr = PG_V | PG_KW | PG_PFNUM(phys);
1.69 ragge 1103: }
1.5 ragge 1104: }
1.124 ragge 1105:
1.92 ragge 1106: /*
1107: * Do not keep track of anything if mapping IO space.
1108: */
1109: if (IOSPACE(p)) {
1.124 ragge 1110: mapin8(pteptr, newpte);
1.92 ragge 1111: RECURSEEND;
1.94 chs 1112: return 0;
1.92 ragge 1113: }
1114:
1.87 ragge 1115: if (flags & PMAP_WIRED)
1116: newpte |= PG_W;
1.5 ragge 1117:
1.124 ragge 1118: oldpte = *pteptr & ~(PG_V|PG_M);
1.86 ragge 1119: pv = pv_table + (p >> PGSHIFT);
1120:
1.107 chs 1121: /* just a wiring change? */
1.86 ragge 1122: if (newpte == (oldpte | PG_W)) {
1.124 ragge 1123: *pteptr |= PG_W;
1.107 chs 1124: pmap->pm_stats.wired_count++;
1.69 ragge 1125: RECURSEEND;
1.94 chs 1126: return 0;
1.69 ragge 1127: }
1.91 chs 1128:
1129: /* mapping unchanged? just return. */
1130: if (newpte == oldpte) {
1131: RECURSEEND;
1.94 chs 1132: return 0;
1.91 chs 1133: }
1.89 ragge 1134:
1.62 ragge 1135: /* Changing mapping? */
1.107 chs 1136:
1137: if ((newpte & PG_FRAME) == (oldpte & PG_FRAME)) {
1.86 ragge 1138: /* prot change. resident_count will be increased later */
1139: pmap->pm_stats.resident_count--;
1.124 ragge 1140: if (oldpte & PG_W)
1.107 chs 1141: pmap->pm_stats.wired_count--;
1.86 ragge 1142: } else {
1.107 chs 1143:
1.67 ragge 1144: /*
1145: * Mapped before? Remove it then.
1146: */
1.107 chs 1147:
1.108 ragge 1148: if (oldpte & PG_FRAME) {
1.69 ragge 1149: RECURSEEND;
1.108 ragge 1150: if ((oldpte & PG_SREF) == 0)
1.124 ragge 1151: rmpage(pmap, pteptr);
1.111 ragge 1152: else
1153: panic("pmap_enter on PG_SREF page");
1.69 ragge 1154: RECURSESTART;
1.124 ragge 1155: }
1.40 ragge 1156:
1.96 thorpej 1157: s = splvm();
1.101 ragge 1158: PVTABLE_LOCK;
1.110 ragge 1159: if (pv->pv_pmap == NULL) {
1160: pv->pv_vaddr = v;
1.54 ragge 1161: pv->pv_pmap = pmap;
1.40 ragge 1162: } else {
1.70 ragge 1163: tmp = get_pventry();
1.110 ragge 1164: tmp->pv_vaddr = v;
1.54 ragge 1165: tmp->pv_pmap = pmap;
1.40 ragge 1166: tmp->pv_next = pv->pv_next;
1167: pv->pv_next = tmp;
1168: }
1.101 ragge 1169: PVTABLE_UNLOCK;
1.8 ragge 1170: splx(s);
1.5 ragge 1171: }
1.54 ragge 1172: pmap->pm_stats.resident_count++;
1.124 ragge 1173: if (flags & PMAP_WIRED)
1.107 chs 1174: pmap->pm_stats.wired_count++;
1.2 ragge 1175:
1.101 ragge 1176: PVTABLE_LOCK;
1.111 ragge 1177: if (flags & (VM_PROT_READ|VM_PROT_WRITE)) {
1.62 ragge 1178: pv->pv_attr |= PG_V;
1179: newpte |= PG_V;
1180: }
1.73 thorpej 1181: if (flags & VM_PROT_WRITE)
1.62 ragge 1182: pv->pv_attr |= PG_M;
1.101 ragge 1183: PVTABLE_UNLOCK;
1.40 ragge 1184:
1.95 ragge 1185: if (flags & PMAP_WIRED)
1186: newpte |= PG_V; /* Not allowed to be invalid */
1187:
1.124 ragge 1188: mapin8(pteptr, newpte);
1.69 ragge 1189: RECURSEEND;
1.124 ragge 1190:
1.70 ragge 1191: if (pventries < 10)
1192: more_pventries();
1.73 thorpej 1193:
1.85 ragge 1194: mtpr(0, PR_TBIA); /* Always; safety belt */
1.94 chs 1195: return 0;
1.1 ragge 1196: }
1197:
1.54 ragge 1198: vaddr_t
1.2 ragge 1199: pmap_map(virtuell, pstart, pend, prot)
1.54 ragge 1200: vaddr_t virtuell;
1.110 ragge 1201: paddr_t pstart, pend;
1.13 ragge 1202: int prot;
1.1 ragge 1203: {
1.54 ragge 1204: vaddr_t count;
1.1 ragge 1205: int *pentry;
1206:
1.110 ragge 1207: PMDEBUG(("pmap_map: virt %lx, pstart %lx, pend %lx, Sysmap %p\n",
1208: virtuell, pstart, pend, Sysmap));
1.3 ragge 1209:
1.7 ragge 1210: pstart=(uint)pstart &0x7fffffff;
1211: pend=(uint)pend &0x7fffffff;
1212: virtuell=(uint)virtuell &0x7fffffff;
1.137 matt 1213: pentry = (int *)((((uint)(virtuell)>>VAX_PGSHIFT)*4)+(uint)Sysmap);
1.56 ragge 1214: for(count=pstart;count<pend;count+=VAX_NBPG){
1215: *pentry++ = (count>>VAX_PGSHIFT)|PG_V|
1.40 ragge 1216: (prot & VM_PROT_WRITE ? PG_KW : PG_KR);
1.7 ragge 1217: }
1.3 ragge 1218: return(virtuell+(count-pstart)+0x80000000);
1.1 ragge 1219: }
1220:
1.70 ragge 1221: #if 0
1.68 thorpej 1222: boolean_t
1223: pmap_extract(pmap, va, pap)
1.13 ragge 1224: pmap_t pmap;
1.54 ragge 1225: vaddr_t va;
1.68 thorpej 1226: paddr_t *pap;
1.13 ragge 1227: {
1.69 ragge 1228: paddr_t pa = 0;
1229: int *pte, sva;
1.2 ragge 1230:
1.110 ragge 1231: PMDEBUG(("pmap_extract: pmap %p, va %lx\n",pmap, va));
1.40 ragge 1232:
1.69 ragge 1233: if (va & KERNBASE) {
1234: pa = kvtophys(va); /* Is 0 if not mapped */
1235: if (pap)
1236: *pap = pa;
1237: if (pa)
1238: return (TRUE);
1239: return (FALSE);
1240: }
1241:
1242: sva = PG_PFNUM(va);
1.46 ragge 1243: if (va < 0x40000000) {
1.111 ragge 1244: if (sva > pmap->pm_p0lr)
1.68 thorpej 1245: return FALSE;
1.56 ragge 1246: pte = (int *)pmap->pm_p0br;
1.40 ragge 1247: } else {
1.56 ragge 1248: if (sva < pmap->pm_p1lr)
1.68 thorpej 1249: return FALSE;
1.56 ragge 1250: pte = (int *)pmap->pm_p1br;
1.46 ragge 1251: }
1.69 ragge 1252: if (kvtopte(&pte[sva])->pg_pfn) {
1253: if (pap)
1.68 thorpej 1254: *pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
1255: return (TRUE);
1256: }
1257: return (FALSE);
1.1 ragge 1258: }
1.70 ragge 1259: #endif
1.2 ragge 1260: /*
1.40 ragge 1261: * Sets protection for a given region to prot. If prot == none then
1262: * unmap region. pmap_remove is implemented as pmap_protect with
1263: * protection none.
1.2 ragge 1264: */
1265: void
1.111 ragge 1266: pmap_protect_long(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
1.2 ragge 1267: {
1.40 ragge 1268: struct pte *pt, *pts, *ptd;
1.124 ragge 1269: int pr, lr;
1.2 ragge 1270:
1.110 ragge 1271: PMDEBUG(("pmap_protect: pmap %p, start %lx, end %lx, prot %x\n",
1272: pmap, start, end,prot));
1.2 ragge 1273:
1.69 ragge 1274: RECURSESTART;
1.124 ragge 1275:
1276: switch (SEGTYPE(start)) {
1277: case SYSSEG:
1.40 ragge 1278: pt = Sysmap;
1279: #ifdef DIAGNOSTIC
1.56 ragge 1280: if (((end & 0x3fffffff) >> VAX_PGSHIFT) > mfpr(PR_SLR))
1.48 ragge 1281: panic("pmap_protect: outside SLR: %lx", end);
1.2 ragge 1282: #endif
1.48 ragge 1283: start &= ~KERNBASE;
1284: end &= ~KERNBASE;
1.40 ragge 1285: pr = (prot & VM_PROT_WRITE ? PROT_KW : PROT_KR);
1.124 ragge 1286: break;
1287:
1288: case P1SEG:
1289: if (vax_btop(end - 0x40000000) <= pmap->pm_p1lr) {
1290: RECURSEEND;
1291: return;
1292: }
1293: if (vax_btop(start - 0x40000000) < pmap->pm_p1lr)
1294: start = pmap->pm_p1lr * VAX_NBPG;
1295: pt = pmap->pm_p1br;
1296: start &= 0x3fffffff;
1297: end = (end == KERNBASE ? end >> 1 : end & 0x3fffffff);
1298: pr = (prot & VM_PROT_WRITE ? PROT_RW : PROT_RO);
1299: break;
1300:
1301: case P0SEG:
1302: lr = pmap->pm_p0lr;
1.110 ragge 1303:
1.124 ragge 1304: /* Anything to care about at all? */
1305: if (vax_btop(start) > lr) {
1306: RECURSEEND;
1307: return;
1.48 ragge 1308: }
1.124 ragge 1309: if (vax_btop(end) > lr)
1310: end = lr * VAX_NBPG;
1311: pt = pmap->pm_p0br;
1.40 ragge 1312: pr = (prot & VM_PROT_WRITE ? PROT_RW : PROT_RO);
1.124 ragge 1313: break;
1.130 he 1314: default:
1315: panic("unsupported segtype: %d", SEGTYPE(start));
1.2 ragge 1316: }
1.124 ragge 1317:
1.56 ragge 1318: pts = &pt[start >> VAX_PGSHIFT];
1319: ptd = &pt[end >> VAX_PGSHIFT];
1.40 ragge 1320: #ifdef DEBUG
1321: if (((int)pts - (int)pt) & 7)
1322: panic("pmap_remove: pts not even");
1323: if (((int)ptd - (int)pt) & 7)
1324: panic("pmap_remove: ptd not even");
1325: #endif
1326:
1.70 ragge 1327: while (pts < ptd) {
1328: if (kvtopte(pts)->pg_pfn && *(int *)pts) {
1329: if (prot == VM_PROT_NONE) {
1.69 ragge 1330: RECURSEEND;
1.48 ragge 1331: if ((*(int *)pts & PG_SREF) == 0)
1.111 ragge 1332: rmpage(pmap, (u_int *)pts);
1333: #ifdef DEBUG
1334: else
1335: panic("pmap_remove PG_SREF page");
1336: #endif
1.69 ragge 1337: RECURSESTART;
1.56 ragge 1338: bzero(pts, sizeof(struct pte) * LTOHPN);
1.110 ragge 1339: if (pt != Sysmap) {
1.124 ragge 1340: if (ptpinuse(pts) == 0)
1.111 ragge 1341: rmptep(pts);
1.110 ragge 1342: }
1.70 ragge 1343: } else {
1.40 ragge 1344: pts[0].pg_prot = pr;
1345: pts[1].pg_prot = pr;
1.53 ragge 1346: pts[2].pg_prot = pr;
1347: pts[3].pg_prot = pr;
1348: pts[4].pg_prot = pr;
1349: pts[5].pg_prot = pr;
1350: pts[6].pg_prot = pr;
1351: pts[7].pg_prot = pr;
1.40 ragge 1352: }
1353: }
1.70 ragge 1354: pts += LTOHPN;
1.2 ragge 1355: }
1.69 ragge 1356: RECURSEEND;
1.101 ragge 1357: #ifdef MULTIPROCESSOR
1358: cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1359: #endif
1.85 ragge 1360: mtpr(0, PR_TBIA);
1.45 ragge 1361: }
1362:
1.64 ragge 1363: int pmap_simulref(int bits, int addr);
1.57 ragge 1364: /*
1365: * Called from interrupt vector routines if we get a page invalid fault.
1366: * Note: the save mask must be or'ed with 0x3f for this function.
1367: * Returns 0 if normal call, 1 if CVAX bug detected.
1368: */
1369: int
1.64 ragge 1370: pmap_simulref(int bits, int addr)
1.57 ragge 1371: {
1.64 ragge 1372: u_int *pte;
1.110 ragge 1373: struct pv_entry *pv;
1374: paddr_t pa;
1375:
1376: PMDEBUG(("pmap_simulref: bits %x addr %x\n", bits, addr));
1.57 ragge 1377:
1378: #ifdef DEBUG
1379: if (bits & 1)
1380: panic("pte trans len");
1381: #endif
1.64 ragge 1382: /* Set addess on logical page boundary */
1383: addr &= ~PGOFSET;
1.57 ragge 1384: /* First decode userspace addr */
1385: if (addr >= 0) {
1386: if ((addr << 1) < 0)
1.64 ragge 1387: pte = (u_int *)mfpr(PR_P1BR);
1.57 ragge 1388: else
1.64 ragge 1389: pte = (u_int *)mfpr(PR_P0BR);
1390: pte += PG_PFNUM(addr);
1.57 ragge 1391: if (bits & 2) { /* PTE reference */
1.124 ragge 1392: pte = (u_int *)kvtopte(vax_trunc_page(pte));
1.64 ragge 1393: if (pte[0] == 0) /* Check for CVAX bug */
1394: return 1;
1.124 ragge 1395: panic("pmap_simulref");
1.64 ragge 1396: pa = (u_int)pte & ~KERNBASE;
1397: } else
1398: pa = Sysmap[PG_PFNUM(pte)].pg_pfn << VAX_PGSHIFT;
1399: } else {
1400: pte = (u_int *)kvtopte(addr);
1401: pa = (u_int)pte & ~KERNBASE;
1402: }
1403: pte[0] |= PG_V;
1404: pte[1] |= PG_V;
1405: pte[2] |= PG_V;
1406: pte[3] |= PG_V;
1407: pte[4] |= PG_V;
1408: pte[5] |= PG_V;
1409: pte[6] |= PG_V;
1410: pte[7] |= PG_V;
1.92 ragge 1411: if (IOSPACE(pa) == 0) { /* No pv_table fiddling in iospace */
1.101 ragge 1412: PVTABLE_LOCK;
1.92 ragge 1413: pv = pv_table + (pa >> PGSHIFT);
1414: pv->pv_attr |= PG_V; /* Referenced */
1415: if (bits & 4) /* (will be) modified. XXX page tables */
1416: pv->pv_attr |= PG_M;
1.101 ragge 1417: PVTABLE_UNLOCK;
1.92 ragge 1418: }
1.64 ragge 1419: return 0;
1420: }
1.57 ragge 1421:
1.45 ragge 1422: /*
1423: * Clears valid bit in all ptes referenced to this physical page.
1424: */
1.48 ragge 1425: boolean_t
1.111 ragge 1426: pmap_clear_reference_long(struct pv_entry *pv)
1.48 ragge 1427: {
1.110 ragge 1428: struct pte *pte;
1.93 ragge 1429: int ref = 0;
1.45 ragge 1430:
1.111 ragge 1431: PMDEBUG(("pmap_clear_reference: pv_entry %p\n", pv));
1.45 ragge 1432:
1.69 ragge 1433: RECURSESTART;
1.101 ragge 1434: PVTABLE_LOCK;
1.110 ragge 1435: if (pv->pv_pmap != NULL) {
1436: pte = vaddrtopte(pv);
1437: if (pte->pg_w == 0) {
1438: pte[0].pg_v = pte[1].pg_v = pte[2].pg_v =
1439: pte[3].pg_v = pte[4].pg_v = pte[5].pg_v =
1440: pte[6].pg_v = pte[7].pg_v = 0;
1441: }
1442: }
1443:
1444: while ((pv = pv->pv_next)) {
1445: pte = vaddrtopte(pv);
1446: if (pte[0].pg_w == 0) {
1447: pte[0].pg_v = pte[1].pg_v =
1448: pte[2].pg_v = pte[3].pg_v =
1449: pte[4].pg_v = pte[5].pg_v =
1450: pte[6].pg_v = pte[7].pg_v = 0;
1451: }
1452: }
1.101 ragge 1453: PVTABLE_UNLOCK;
1.69 ragge 1454: RECURSEEND;
1.101 ragge 1455: #ifdef MULTIPROCESSOR
1456: cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1.110 ragge 1457: #endif
1.85 ragge 1458: mtpr(0, PR_TBIA);
1.93 ragge 1459: return ref;
1.1 ragge 1460: }
1461:
1.40 ragge 1462: /*
1463: * Checks if page is modified; returns true or false depending on result.
1464: */
1.48 ragge 1465: boolean_t
1.111 ragge 1466: pmap_is_modified_long(struct pv_entry *pv)
1.48 ragge 1467: {
1.110 ragge 1468: struct pte *pte;
1469:
1.111 ragge 1470: PMDEBUG(("pmap_is_modified: pv_entry %p ", pv));
1.54 ragge 1471:
1.101 ragge 1472: PVTABLE_LOCK;
1.110 ragge 1473: if (pv->pv_pmap != NULL) {
1474: pte = vaddrtopte(pv);
1475: if ((pte[0].pg_m | pte[1].pg_m | pte[2].pg_m | pte[3].pg_m |
1476: pte[4].pg_m | pte[5].pg_m | pte[6].pg_m | pte[7].pg_m)) {
1477: PMDEBUG(("Yes: (1)\n"));
1.101 ragge 1478: PVTABLE_UNLOCK;
1.40 ragge 1479: return 1;
1.54 ragge 1480: }
1.110 ragge 1481: }
1.2 ragge 1482:
1.40 ragge 1483: while ((pv = pv->pv_next)) {
1.110 ragge 1484: pte = vaddrtopte(pv);
1485: if ((pte[0].pg_m | pte[1].pg_m | pte[2].pg_m | pte[3].pg_m
1486: | pte[4].pg_m | pte[5].pg_m | pte[6].pg_m | pte[7].pg_m)) {
1487: PMDEBUG(("Yes: (2)\n"));
1.101 ragge 1488: PVTABLE_UNLOCK;
1.40 ragge 1489: return 1;
1.54 ragge 1490: }
1.40 ragge 1491: }
1.101 ragge 1492: PVTABLE_UNLOCK;
1.110 ragge 1493: PMDEBUG(("No\n"));
1.55 ragge 1494: return 0;
1.1 ragge 1495: }
1496:
1497: /*
1.40 ragge 1498: * Clears modify bit in all ptes referenced to this physical page.
1.1 ragge 1499: */
1.48 ragge 1500: boolean_t
1.111 ragge 1501: pmap_clear_modify_long(struct pv_entry *pv)
1.48 ragge 1502: {
1.110 ragge 1503: struct pte *pte;
1.103 chs 1504: boolean_t rv = FALSE;
1.5 ragge 1505:
1.111 ragge 1506: PMDEBUG(("pmap_clear_modify: pv_entry %p\n", pv));
1.1 ragge 1507:
1.103 chs 1508: PVTABLE_LOCK;
1.110 ragge 1509: if (pv->pv_pmap != NULL) {
1510: pte = vaddrtopte(pv);
1511: if (pte[0].pg_m | pte[1].pg_m | pte[2].pg_m | pte[3].pg_m |
1512: pte[4].pg_m | pte[5].pg_m | pte[6].pg_m | pte[7].pg_m) {
1.103 chs 1513: rv = TRUE;
1514: }
1.110 ragge 1515: pte[0].pg_m = pte[1].pg_m = pte[2].pg_m = pte[3].pg_m =
1516: pte[4].pg_m = pte[5].pg_m = pte[6].pg_m = pte[7].pg_m = 0;
1.103 chs 1517: }
1.1 ragge 1518:
1.103 chs 1519: while ((pv = pv->pv_next)) {
1.110 ragge 1520: pte = vaddrtopte(pv);
1521: if (pte[0].pg_m | pte[1].pg_m | pte[2].pg_m | pte[3].pg_m |
1522: pte[4].pg_m | pte[5].pg_m | pte[6].pg_m | pte[7].pg_m) {
1.103 chs 1523: rv = TRUE;
1524: }
1.110 ragge 1525: pte[0].pg_m = pte[1].pg_m = pte[2].pg_m = pte[3].pg_m =
1526: pte[4].pg_m = pte[5].pg_m = pte[6].pg_m = pte[7].pg_m = 0;
1.103 chs 1527: }
1.101 ragge 1528: PVTABLE_UNLOCK;
1.103 chs 1529: return rv;
1.1 ragge 1530: }
1531:
1532: /*
1.40 ragge 1533: * Lower the permission for all mappings to a given page.
1534: * Lower permission can only mean setting protection to either read-only
1535: * or none; where none is unmapping of the page.
1.1 ragge 1536: */
1.48 ragge 1537: void
1.111 ragge 1538: pmap_page_protect_long(struct pv_entry *pv, vm_prot_t prot)
1.48 ragge 1539: {
1.53 ragge 1540: struct pte *pt;
1.111 ragge 1541: struct pv_entry *opv, *pl;
1.54 ragge 1542: int s, *g;
1.110 ragge 1543:
1.111 ragge 1544: PMDEBUG(("pmap_page_protect: pv %p, prot %x\n", pv, prot));
1.110 ragge 1545:
1.2 ragge 1546:
1.40 ragge 1547: if (prot == VM_PROT_ALL) /* 'cannot happen' */
1548: return;
1.2 ragge 1549:
1.69 ragge 1550: RECURSESTART;
1.101 ragge 1551: PVTABLE_LOCK;
1.40 ragge 1552: if (prot == VM_PROT_NONE) {
1.96 thorpej 1553: s = splvm();
1.110 ragge 1554: g = (int *)vaddrtopte(pv);
1.54 ragge 1555: if (g) {
1.107 chs 1556: simple_lock(&pv->pv_pmap->pm_lock);
1557: pv->pv_pmap->pm_stats.resident_count--;
1558: if (g[0] & PG_W) {
1559: pv->pv_pmap->pm_stats.wired_count--;
1560: }
1561: simple_unlock(&pv->pv_pmap->pm_lock);
1.70 ragge 1562: if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
1.54 ragge 1563: pv->pv_attr |=
1564: g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
1.56 ragge 1565: bzero(g, sizeof(struct pte) * LTOHPN);
1.110 ragge 1566: if (pv->pv_pmap != pmap_kernel()) {
1.124 ragge 1567: if (ptpinuse(g) == 0)
1.111 ragge 1568: rmptep((void *)g);
1.110 ragge 1569: }
1570: pv->pv_vaddr = NOVADDR;
1571: pv->pv_pmap = NULL;
1.54 ragge 1572: }
1573: pl = pv->pv_next;
1574: pv->pv_pmap = 0;
1575: pv->pv_next = 0;
1576: while (pl) {
1.110 ragge 1577: g = (int *)vaddrtopte(pl);
1.107 chs 1578: simple_lock(&pl->pv_pmap->pm_lock);
1579: pl->pv_pmap->pm_stats.resident_count--;
1580: if (g[0] & PG_W) {
1581: pl->pv_pmap->pm_stats.wired_count--;
1582: }
1583: simple_unlock(&pl->pv_pmap->pm_lock);
1.70 ragge 1584: if ((pv->pv_attr & (PG_V|PG_M)) != (PG_V|PG_M))
1.54 ragge 1585: pv->pv_attr |=
1586: g[0]|g[1]|g[2]|g[3]|g[4]|g[5]|g[6]|g[7];
1.56 ragge 1587: bzero(g, sizeof(struct pte) * LTOHPN);
1.110 ragge 1588: if (pl->pv_pmap != pmap_kernel()) {
1.124 ragge 1589: if (ptpinuse(g) == 0)
1.111 ragge 1590: rmptep((void *)g);
1.110 ragge 1591: }
1.54 ragge 1592: opv = pl;
1593: pl = pl->pv_next;
1.70 ragge 1594: free_pventry(opv);
1.54 ragge 1595: }
1.40 ragge 1596: splx(s);
1597: } else { /* read-only */
1598: do {
1.110 ragge 1599: pt = vaddrtopte(pv);
1.40 ragge 1600: if (pt == 0)
1601: continue;
1602: pt[0].pg_prot = pt[1].pg_prot =
1.53 ragge 1603: pt[2].pg_prot = pt[3].pg_prot =
1604: pt[4].pg_prot = pt[5].pg_prot =
1605: pt[6].pg_prot = pt[7].pg_prot =
1.110 ragge 1606: ((vaddr_t)pt < ptemapstart ? PROT_KR : PROT_RO);
1.40 ragge 1607: } while ((pv = pv->pv_next));
1.2 ragge 1608: }
1.101 ragge 1609: PVTABLE_UNLOCK;
1.69 ragge 1610: RECURSEEND;
1.101 ragge 1611: #ifdef MULTIPROCESSOR
1612: cpu_send_ipi(IPI_DEST_ALL, IPI_TBIA);
1613: #endif
1.22 ragge 1614: mtpr(0, PR_TBIA);
1.42 thorpej 1615: }
1616:
1617: /*
1.44 ragge 1618: * Activate the address space for the specified process.
1619: * Note that if the process to activate is the current process, then
1620: * the processor internal registers must also be loaded; otherwise
1621: * the current process will have wrong pagetables.
1.42 thorpej 1622: */
1623: void
1.119 thorpej 1624: pmap_activate(struct lwp *l)
1.42 thorpej 1625: {
1.110 ragge 1626: struct pm_share *ps;
1.48 ragge 1627: pmap_t pmap;
1628: struct pcb *pcb;
1629:
1.123 ragge 1630: PMDEBUG(("pmap_activate: l %p\n", l));
1.48 ragge 1631:
1.119 thorpej 1632: pmap = l->l_proc->p_vmspace->vm_map.pmap;
1633: pcb = &l->l_addr->u_pcb;
1.42 thorpej 1634:
1635: pcb->P0BR = pmap->pm_p0br;
1.110 ragge 1636: pcb->P0LR = pmap->pm_p0lr|AST_PCB;
1.42 thorpej 1637: pcb->P1BR = pmap->pm_p1br;
1638: pcb->P1LR = pmap->pm_p1lr;
1.44 ragge 1639:
1.110 ragge 1640: ps = (struct pm_share *)get_pventry();
1641: ps->ps_next = pmap->pm_share;
1642: pmap->pm_share = ps;
1643: ps->ps_pcb = pcb;
1644:
1.119 thorpej 1645: if (l == curlwp) {
1.140 ! matt 1646: mtpr((uintptr_t)pmap->pm_p0br, PR_P0BR);
1.110 ragge 1647: mtpr(pmap->pm_p0lr|AST_PCB, PR_P0LR);
1.140 ! matt 1648: mtpr((uintptr_t)pmap->pm_p1br, PR_P1BR);
1.44 ragge 1649: mtpr(pmap->pm_p1lr, PR_P1LR);
1.113 ragge 1650: mtpr(0, PR_TBIA);
1.44 ragge 1651: }
1.110 ragge 1652: }
1653:
1.111 ragge 1654: void
1.119 thorpej 1655: pmap_deactivate(struct lwp *l)
1.110 ragge 1656: {
1.119 thorpej 1657: struct proc *p = l->l_proc;
1.110 ragge 1658: struct pm_share *ps, *ops;
1659: pmap_t pmap;
1660: struct pcb *pcb;
1661:
1.123 ragge 1662: PMDEBUG(("pmap_deactivate: l %p\n", l));
1.110 ragge 1663:
1664: pmap = p->p_vmspace->vm_map.pmap;
1.119 thorpej 1665: pcb = &l->l_addr->u_pcb;
1.110 ragge 1666:
1667: ps = pmap->pm_share;
1668: if (ps->ps_pcb == pcb) {
1669: pmap->pm_share = ps->ps_next;
1670: free_pventry((struct pv_entry *)ps);
1671: return;
1672: }
1673: ops = ps;
1674: ps = ps->ps_next;
1675: while (ps != NULL) {
1676: if (ps->ps_pcb == pcb) {
1677: ops->ps_next = ps->ps_next;
1678: free_pventry((struct pv_entry *)ps);
1679: return;
1680: }
1681: ops = ps;
1682: ps = ps->ps_next;
1683: }
1684: #ifdef DEBUG
1685: panic("pmap_deactivate: not in list");
1686: #endif
1.86 ragge 1687: }
1688:
1689: /*
1690: * removes the wired bit from a bunch of PTE's.
1691: */
1692: void
1693: pmap_unwire(pmap_t pmap, vaddr_t v)
1694: {
1695: int *pte;
1696:
1.110 ragge 1697: PMDEBUG(("pmap_unwire: pmap %p v %lx\n", pmap, v));
1698:
1699: RECURSESTART;
1.86 ragge 1700: if (v & KERNBASE) {
1701: pte = (int *)kvtopte(v);
1702: } else {
1703: if (v < 0x40000000)
1704: pte = (int *)&pmap->pm_p0br[PG_PFNUM(v)];
1705: else
1706: pte = (int *)&pmap->pm_p1br[PG_PFNUM(v)];
1707: }
1.107 chs 1708: pte[0] &= ~PG_W;
1.110 ragge 1709: RECURSEEND;
1.107 chs 1710: pmap->pm_stats.wired_count--;
1.70 ragge 1711: }
1712:
1.101 ragge 1713: /*
1714: * pv_entry functions.
1715: */
1.70 ragge 1716: struct pv_entry *pv_list;
1717:
1.101 ragge 1718: /*
1719: * get_pventry().
1720: * The pv_table lock must be held before calling this.
1721: */
1.70 ragge 1722: struct pv_entry *
1723: get_pventry()
1724: {
1725: struct pv_entry *tmp;
1726:
1727: if (pventries == 0)
1728: panic("get_pventry");
1729:
1730: tmp = pv_list;
1731: pv_list = tmp->pv_next;
1732: pventries--;
1733: return tmp;
1734: }
1735:
1.101 ragge 1736: /*
1737: * free_pventry().
1738: * The pv_table lock must be held before calling this.
1739: */
1.70 ragge 1740: void
1741: free_pventry(pv)
1742: struct pv_entry *pv;
1743: {
1744: pv->pv_next = pv_list;
1745: pv_list = pv;
1746: pventries++;
1747: }
1748:
1.101 ragge 1749: /*
1750: * more_pventries().
1751: * The pv_table lock must _not_ be held before calling this.
1752: */
1.70 ragge 1753: void
1754: more_pventries()
1755: {
1756: struct pv_entry *pv;
1757: int s, i, count;
1758:
1.111 ragge 1759: pv = (struct pv_entry *)getpage(NOWAIT);
1760: if (pv == NULL)
1.70 ragge 1761: return;
1.125 thorpej 1762: count = PAGE_SIZE/sizeof(struct pv_entry);
1.70 ragge 1763:
1764: for (i = 0; i < count; i++)
1765: pv[i].pv_next = &pv[i + 1];
1766:
1.96 thorpej 1767: s = splvm();
1.101 ragge 1768: PVTABLE_LOCK;
1.70 ragge 1769: pv[count - 1].pv_next = pv_list;
1770: pv_list = pv;
1771: pventries += count;
1.101 ragge 1772: PVTABLE_UNLOCK;
1.70 ragge 1773: splx(s);
1.1 ragge 1774: }
1.110 ragge 1775:
1.124 ragge 1776: static int *ptpp;
1777:
1778: /*
1779: * Get a (vax-size) page, to use for page tables.
1780: */
1781: vaddr_t
1782: get_ptp(int w)
1783: {
1784: int *a;
1785:
1786: if ((a = ptpp)) {
1787: ptpp = (int *)*ptpp;
1788: bzero(a, VAX_NBPG);
1789: return (vaddr_t)a;
1790: }
1791: a = (int *)getpage(w);
1792: if (a != NULL) {
1793: a[128] = (int)&a[256];
1794: a[256] = (int)&a[384];
1795: a[384] = (int)&a[512];
1796: a[512] = (int)&a[640];
1797: a[640] = (int)&a[768];
1798: a[768] = (int)&a[896];
1799: a[896] = (int)ptpp;
1800: ptpp = &a[128];
1801: }
1802: return (vaddr_t)a;
1803: }
1804:
1805: /*
1806: * Put a page table page on the free list.
1807: * The address v is in the direct-mapped area.
1808: */
1809: void
1810: free_ptp(paddr_t v)
1811: {
1812: v |= KERNBASE;
1813: *(int *)v = (int)ptpp;
1814: ptpp = (int *)v;
1815: }
1816:
1.110 ragge 1817: /*
1818: * Called when a process is about to be swapped, to remove the page tables.
1819: */
1820: void
1.119 thorpej 1821: cpu_swapout(struct lwp *l)
1.110 ragge 1822: {
1.119 thorpej 1823: struct proc *p = l->l_proc;
1.110 ragge 1824: pmap_t pm;
1825:
1826: PMDEBUG(("Swapout pid %d\n", p->p_pid));
1827:
1828: pm = p->p_vmspace->vm_map.pmap;
1829: rmspace(pm);
1.119 thorpej 1830: pmap_deactivate(l);
1.110 ragge 1831: }
1832:
1833: /*
1834: * Kernel stack red zone need to be set when a process is swapped in.
1835: * Be sure that all pages are valid.
1836: */
1837: void
1.119 thorpej 1838: cpu_swapin(struct lwp *l)
1.110 ragge 1839: {
1840: struct pte *pte;
1841: int i;
1842:
1.119 thorpej 1843: PMDEBUG(("Swapin pid %d.%d\n", l->l_proc->p_pid, l->l_lid));
1.110 ragge 1844:
1.119 thorpej 1845: pte = kvtopte((vaddr_t)l->l_addr);
1.110 ragge 1846: for (i = 0; i < (USPACE/VAX_NBPG); i ++)
1847: pte[i].pg_v = 1;
1.119 thorpej 1848: kvtopte((vaddr_t)l->l_addr + REDZONEADDR)->pg_v = 0;
1849: pmap_activate(l);
1.110 ragge 1850: }
1851:
CVSweb <webmaster@jp.NetBSD.org>