Annotation of src/sys/arch/i386/include/pmap.h, Revision 1.56
1.56 ! thorpej 1: /* $NetBSD: pmap.h,v 1.55 2001/04/22 23:19:27 thorpej Exp $ */
1.38 mycroft 2:
1.40 thorpej 3: /*
4: *
5: * Copyright (c) 1997 Charles D. Cranor and Washington University.
1.38 mycroft 6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
1.40 thorpej 17: * must display the following acknowledgment:
18: * This product includes software developed by Charles D. Cranor and
19: * Washington University.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission.
1.1 cgd 22: *
1.40 thorpej 23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1.1 cgd 33: */
34:
35: /*
1.40 thorpej 36: * pmap.h: see pmap.c for the history of this pmap module.
1.1 cgd 37: */
1.34 mrg 38:
1.40 thorpej 39: #ifndef _I386_PMAP_H_
40: #define _I386_PMAP_H_
41:
1.34 mrg 42: #if defined(_KERNEL) && !defined(_LKM)
1.39 thorpej 43: #include "opt_user_ldt.h"
1.48 thorpej 44: #include "opt_largepages.h"
1.34 mrg 45: #endif
1.1 cgd 46:
1.14 mycroft 47: #include <machine/cpufunc.h>
1.6 mycroft 48: #include <machine/pte.h>
1.39 thorpej 49: #include <machine/segments.h>
1.40 thorpej 50: #include <uvm/uvm_object.h>
1.1 cgd 51:
52: /*
1.40 thorpej 53: * see pte.h for a description of i386 MMU terminology and hardware
54: * interface.
55: *
56: * a pmap describes a processes' 4GB virtual address space. this
57: * virtual address space can be broken up into 1024 4MB regions which
1.41 chs 58: * are described by PDEs in the PDP. the PDEs are defined as follows:
1.40 thorpej 59: *
60: * (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
1.43 thorpej 61: * (the following assumes that KERNBASE is 0xc0000000)
1.40 thorpej 62: *
63: * PDE#s VA range usage
1.43 thorpej 64: * 0->767 0x0 -> 0xbfc00000 user address space, note that the
65: * max user address is 0xbfbfe000
1.40 thorpej 66: * the final two pages in the last 4MB
67: * used to be reserved for the UAREA
68: * but now are no longer used
1.43 thorpej 69: * 768 0xbfc00000-> recursive mapping of PDP (used for
70: * 0xc0000000 linear mapping of PTPs)
71: * 768->1023 0xc0000000-> kernel address space (constant
1.40 thorpej 72: * 0xffc00000 across all pmap's/processes)
73: * 1023 0xffc00000-> "alternate" recursive PDP mapping
74: * <end> (for other pmaps)
75: *
76: *
77: * note: a recursive PDP mapping provides a way to map all the PTEs for
1.41 chs 78: * a 4GB address space into a linear chunk of virtual memory. in other
79: * words, the PTE for page 0 is the first int mapped into the 4MB recursive
80: * area. the PTE for page 1 is the second int. the very last int in the
1.40 thorpej 81: * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
82: * address).
83: *
1.43 thorpej 84: * all pmap's PD's must have the same values in slots 768->1023 so that
1.41 chs 85: * the kernel is always mapped in every process. these values are loaded
1.40 thorpej 86: * into the PD at pmap creation time.
87: *
1.41 chs 88: * at any one time only one pmap can be active on a processor. this is
89: * the pmap whose PDP is pointed to by processor register %cr3. this pmap
1.40 thorpej 90: * will have all its PTEs mapped into memory at the recursive mapping
1.43 thorpej 91: * point (slot #767 as show above). when the pmap code wants to find the
1.40 thorpej 92: * PTE for a virtual address, all it has to do is the following:
93: *
1.43 thorpej 94: * address of PTE = (767 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
95: * = 0xbfc00000 + (VA / 4096) * 4
1.40 thorpej 96: *
97: * what happens if the pmap layer is asked to perform an operation
1.41 chs 98: * on a pmap that is not the one which is currently active? in that
99: * case we take the PA of the PDP of non-active pmap and put it in
100: * slot 1023 of the active pmap. this causes the non-active pmap's
1.40 thorpej 101: * PTEs to get mapped in the final 4MB of the 4GB address space
102: * (e.g. starting at 0xffc00000).
103: *
104: * the following figure shows the effects of the recursive PDP mapping:
105: *
106: * PDP (%cr3)
107: * +----+
108: * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
109: * | |
110: * | |
1.43 thorpej 111: * | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000
112: * | 768| -> first kernel PTP (maps 0xc0000000 -> 0xf0400000)
1.40 thorpej 113: * | |
114: * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
115: * +----+
116: *
1.43 thorpej 117: * note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE"
1.40 thorpej 118: * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
119: *
1.43 thorpej 120: * starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a
1.40 thorpej 121: * PTP:
122: *
1.43 thorpej 123: * PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000
1.40 thorpej 124: * +----+
1.43 thorpej 125: * | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000
1.40 thorpej 126: * | |
127: * | |
1.43 thorpej 128: * | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbffbf000
129: * | 768| -> maps contents of first kernel PTP
1.40 thorpej 130: * | |
131: * |1023|
132: * +----+
133: *
1.41 chs 134: * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
1.40 thorpej 135: * defined as "PDP_BASE".... within that mapping there are two
1.41 chs 136: * defines:
1.40 thorpej 137: * "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
1.41 chs 138: * which points back to itself.
1.40 thorpej 139: * "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
140: * establishes the recursive mapping of the alternate pmap.
141: * to set the alternate PDP, one just has to put the correct
142: * PA info in *APDP_PDE.
143: *
1.41 chs 144: * note that in the APTE_BASE space, the APDP appears at VA
1.40 thorpej 145: * "APDP_BASE" (0xfffff000).
1.1 cgd 146: */
1.33 mrg 147:
148: /*
1.40 thorpej 149: * the following defines identify the slots used as described above.
1.33 mrg 150: */
151:
1.43 thorpej 152: #define PDSLOT_PTE ((KERNBASE/NBPD)-1) /* 767: for recursive PDP map */
153: #define PDSLOT_KERN (KERNBASE/NBPD) /* 768: start of kernel space */
1.40 thorpej 154: #define PDSLOT_APTE ((unsigned)1023) /* 1023: alternative recursive slot */
1.1 cgd 155:
156: /*
1.41 chs 157: * the following defines give the virtual addresses of various MMU
1.40 thorpej 158: * data structures:
159: * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
160: * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
161: * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
1.1 cgd 162: */
1.29 fvdl 163:
1.40 thorpej 164: #define PTE_BASE ((pt_entry_t *) (PDSLOT_PTE * NBPD) )
165: #define APTE_BASE ((pt_entry_t *) (PDSLOT_APTE * NBPD) )
1.41 chs 166: #define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
167: #define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
1.40 thorpej 168: #define PDP_PDE (PDP_BASE + PDSLOT_PTE)
169: #define APDP_PDE (PDP_BASE + PDSLOT_APTE)
1.1 cgd 170:
171: /*
1.40 thorpej 172: * XXXCDC: tmp xlate from old names:
173: * PTDPTDI -> PDSLOT_PTE
174: * KPTDI -> PDSLOT_KERN
175: * APTDPTDI -> PDSLOT_APTE
1.1 cgd 176: */
1.40 thorpej 177:
178: /*
179: * the follow define determines how many PTPs should be set up for the
1.41 chs 180: * kernel by locore.s at boot time. this should be large enough to
181: * get the VM system running. once the VM system is running, the
1.40 thorpej 182: * pmap module can add more PTPs to the kernel area on demand.
183: */
184:
185: #ifndef NKPTP
186: #define NKPTP 4 /* 16MB to start */
1.1 cgd 187: #endif
1.40 thorpej 188: #define NKPTP_MIN 4 /* smallest value we allow */
189: #define NKPTP_MAX (1024 - (KERNBASE/NBPD) - 1)
190: /* largest value (-1 for APTP space) */
1.1 cgd 191:
192: /*
1.40 thorpej 193: * pdei/ptei: generate index into PDP/PTP from a VA
1.1 cgd 194: */
1.40 thorpej 195: #define pdei(VA) (((VA) & PD_MASK) >> PDSHIFT)
196: #define ptei(VA) (((VA) & PT_MASK) >> PGSHIFT)
1.1 cgd 197:
198: /*
1.40 thorpej 199: * PTP macros:
200: * a PTP's index is the PD index of the PDE that points to it
201: * a PTP's offset is the byte-offset in the PTE space that this PTP is at
202: * a PTP's VA is the first VA mapped by that PTP
203: *
204: * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
205: * NBPD == number of bytes a PTP can map (4MB)
1.1 cgd 206: */
1.39 thorpej 207:
1.40 thorpej 208: #define ptp_i2o(I) ((I) * NBPG) /* index => offset */
209: #define ptp_o2i(O) ((O) / NBPG) /* offset => index */
210: #define ptp_i2v(I) ((I) * NBPD) /* index => VA */
211: #define ptp_v2i(V) ((V) / NBPD) /* VA => index (same as pdei) */
1.39 thorpej 212:
1.40 thorpej 213: /*
214: * PG_AVAIL usage: we make use of the ignored bits of the PTE
215: */
216:
217: #define PG_W PG_AVAIL1 /* "wired" mapping */
218: #define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
219: /* PG_AVAIL3 not used */
220:
221: #ifdef _KERNEL
222: /*
223: * pmap data structures: see pmap.c for details of locking.
224: */
225:
226: struct pmap;
227: typedef struct pmap *pmap_t;
228:
229: /*
230: * we maintain a list of all non-kernel pmaps
231: */
232:
233: LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
234:
235: /*
236: * the pmap structure
237: *
238: * note that the pm_obj contains the simple_lock, the reference count,
239: * page list, and number of PTPs within the pmap.
240: */
241:
242: struct pmap {
1.41 chs 243: struct uvm_object pm_obj; /* object (lck by object lock) */
1.40 thorpej 244: #define pm_lock pm_obj.vmobjlock
1.41 chs 245: LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
246: pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
247: u_int32_t pm_pdirpa; /* PA of PD (read-only after create) */
248: struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */
249: struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
250:
251: int pm_flags; /* see below */
252:
253: union descriptor *pm_ldt; /* user-set LDT */
254: int pm_ldt_len; /* number of LDT entries */
255: int pm_ldt_sel; /* LDT selector */
1.40 thorpej 256: };
1.1 cgd 257:
1.39 thorpej 258: /* pm_flags */
259: #define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
260:
1.1 cgd 261: /*
1.40 thorpej 262: * for each managed physical page we maintain a list of <PMAP,VA>'s
1.41 chs 263: * which it is mapped at. the list is headed by a pv_head structure.
1.40 thorpej 264: * there is one pv_head per managed phys page (allocated at boot time).
265: * the pv_head structure points to a list of pv_entry structures (each
266: * describes one mapping).
1.1 cgd 267: */
1.40 thorpej 268:
269: struct pv_entry;
270:
271: struct pv_head {
1.41 chs 272: simple_lock_data_t pvh_lock; /* locks every pv on this list */
273: struct pv_entry *pvh_list; /* head of list (locked by pvh_lock) */
1.40 thorpej 274: };
275:
1.41 chs 276: struct pv_entry { /* locked by its list's pvh_lock */
277: struct pv_entry *pv_next; /* next entry */
278: struct pmap *pv_pmap; /* the pmap */
279: vaddr_t pv_va; /* the virtual address */
280: struct vm_page *pv_ptp; /* the vm_page of the PTP */
1.11 mycroft 281: };
282:
1.40 thorpej 283: /*
284: * pv_entrys are dynamically allocated in chunks from a single page.
285: * we keep track of how many pv_entrys are in use for each page and
1.41 chs 286: * we can free pv_entry pages if needed. there is one lock for the
1.40 thorpej 287: * entire allocation system.
288: */
1.11 mycroft 289:
290: struct pv_page_info {
1.41 chs 291: TAILQ_ENTRY(pv_page) pvpi_list;
292: struct pv_entry *pvpi_pvfree;
293: int pvpi_nfree;
1.11 mycroft 294: };
1.1 cgd 295:
1.11 mycroft 296: /*
1.40 thorpej 297: * number of pv_entry's in a pv_page
298: * (note: won't work on systems where NPBG isn't a constant)
299: */
300:
1.41 chs 301: #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
302: sizeof(struct pv_entry))
1.40 thorpej 303:
304: /*
305: * a pv_page: where pv_entrys are allocated from
1.11 mycroft 306: */
1.1 cgd 307:
1.11 mycroft 308: struct pv_page {
1.41 chs 309: struct pv_page_info pvinfo;
310: struct pv_entry pvents[PVE_PER_PVPAGE];
1.40 thorpej 311: };
312:
313: /*
314: * pmap_remove_record: a record of VAs that have been unmapped, used to
1.41 chs 315: * flush TLB. if we have more than PMAP_RR_MAX then we stop recording.
1.40 thorpej 316: */
317:
318: #define PMAP_RR_MAX 16 /* max of 16 pages (64K) */
319:
320: struct pmap_remove_record {
1.41 chs 321: int prr_npages;
322: vaddr_t prr_vas[PMAP_RR_MAX];
1.40 thorpej 323: };
324:
325: /*
326: * global kernel variables
327: */
328:
329: /* PTDpaddr: is the physical address of the kernel's PDP */
330: extern u_long PTDpaddr;
331:
332: extern struct pmap kernel_pmap_store; /* kernel pmap */
333: extern int nkpde; /* current # of PDEs for kernel */
334: extern int pmap_pg_g; /* do we support PG_G? */
335:
336: /*
337: * macros
338: */
1.1 cgd 339:
1.18 mycroft 340: #define pmap_kernel() (&kernel_pmap_store)
1.1 cgd 341: #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
1.50 is 342: #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
1.55 thorpej 343: #define pmap_update() /* nothing (yet) */
1.11 mycroft 344:
1.40 thorpej 345: #define pmap_clear_modify(pg) pmap_change_attrs(pg, 0, PG_M)
346: #define pmap_clear_reference(pg) pmap_change_attrs(pg, 0, PG_U)
1.45 mycroft 347: #define pmap_copy(DP,SP,D,L,S)
1.40 thorpej 348: #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
349: #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
1.45 mycroft 350: #define pmap_move(DP,SP,D,L,S)
1.40 thorpej 351: #define pmap_phys_address(ppn) i386_ptob(ppn)
352: #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
353:
354:
355: /*
356: * prototypes
357: */
358:
359: void pmap_activate __P((struct proc *));
360: void pmap_bootstrap __P((vaddr_t));
361: boolean_t pmap_change_attrs __P((struct vm_page *, int, int));
362: void pmap_deactivate __P((struct proc *));
363: static void pmap_page_protect __P((struct vm_page *, vm_prot_t));
364: void pmap_page_remove __P((struct vm_page *));
1.41 chs 365: static void pmap_protect __P((struct pmap *, vaddr_t,
1.40 thorpej 366: vaddr_t, vm_prot_t));
367: void pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
368: boolean_t pmap_test_attrs __P((struct vm_page *, int));
369: static void pmap_update_pg __P((vaddr_t));
370: static void pmap_update_2pg __P((vaddr_t,vaddr_t));
1.41 chs 371: void pmap_write_protect __P((struct pmap *, vaddr_t,
1.40 thorpej 372: vaddr_t, vm_prot_t));
373:
374: vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
375:
376: #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
1.44 thorpej 377:
378: /*
379: * Do idle page zero'ing uncached to avoid polluting the cache.
380: */
1.56 ! thorpej 381: boolean_t pmap_pageidlezero __P((paddr_t));
! 382: #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
1.40 thorpej 383:
384: /*
385: * inline functions
386: */
387:
388: /*
389: * pmap_update_pg: flush one page from the TLB (or flush the whole thing
390: * if hardware doesn't support one-page flushing)
391: */
392:
1.41 chs 393: __inline static void
394: pmap_update_pg(va)
395: vaddr_t va;
1.11 mycroft 396: {
1.40 thorpej 397: #if defined(I386_CPU)
1.41 chs 398: if (cpu_class == CPUCLASS_386)
1.52 thorpej 399: tlbflush();
1.41 chs 400: else
1.40 thorpej 401: #endif
1.41 chs 402: invlpg((u_int) va);
1.11 mycroft 403: }
404:
1.40 thorpej 405: /*
406: * pmap_update_2pg: flush two pages from the TLB
407: */
408:
1.41 chs 409: __inline static void
410: pmap_update_2pg(va, vb)
411: vaddr_t va, vb;
1.11 mycroft 412: {
1.40 thorpej 413: #if defined(I386_CPU)
1.41 chs 414: if (cpu_class == CPUCLASS_386)
1.52 thorpej 415: tlbflush();
1.41 chs 416: else
1.40 thorpej 417: #endif
1.41 chs 418: {
419: invlpg((u_int) va);
420: invlpg((u_int) vb);
421: }
1.11 mycroft 422: }
423:
1.40 thorpej 424: /*
425: * pmap_page_protect: change the protection of all recorded mappings
426: * of a managed page
427: *
428: * => this function is a frontend for pmap_page_remove/pmap_change_attrs
429: * => we only have to worry about making the page more protected.
430: * unprotecting a page is done on-demand at fault time.
431: */
432:
1.41 chs 433: __inline static void
434: pmap_page_protect(pg, prot)
435: struct vm_page *pg;
436: vm_prot_t prot;
1.11 mycroft 437: {
1.41 chs 438: if ((prot & VM_PROT_WRITE) == 0) {
439: if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
440: (void) pmap_change_attrs(pg, PG_RO, PG_RW);
441: } else {
442: pmap_page_remove(pg);
443: }
444: }
1.11 mycroft 445: }
446:
1.40 thorpej 447: /*
448: * pmap_protect: change the protection of pages in a pmap
449: *
450: * => this function is a frontend for pmap_remove/pmap_write_protect
451: * => we only have to worry about making the page more protected.
452: * unprotecting a page is done on-demand at fault time.
453: */
454:
1.41 chs 455: __inline static void
456: pmap_protect(pmap, sva, eva, prot)
457: struct pmap *pmap;
458: vaddr_t sva, eva;
459: vm_prot_t prot;
1.11 mycroft 460: {
1.41 chs 461: if ((prot & VM_PROT_WRITE) == 0) {
462: if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
463: pmap_write_protect(pmap, sva, eva, prot);
464: } else {
465: pmap_remove(pmap, sva, eva);
466: }
467: }
1.47 thorpej 468: }
469:
470: /*
471: * various address inlines
472: *
473: * vtopte: return a pointer to the PTE mapping a VA, works only for
474: * user and PT addresses
475: *
476: * kvtopte: return a pointer to the PTE mapping a kernel VA
477: */
478:
479: #include <lib/libkern/libkern.h>
480:
481: static __inline pt_entry_t *
482: vtopte(vaddr_t va)
483: {
484:
485: KASSERT(va < (PDSLOT_KERN << PDSHIFT));
486:
487: return (PTE_BASE + i386_btop(va));
488: }
489:
490: static __inline pt_entry_t *
491: kvtopte(vaddr_t va)
492: {
493:
494: KASSERT(va >= (PDSLOT_KERN << PDSHIFT));
1.48 thorpej 495:
496: #ifdef LARGEPAGES
497: {
498: pd_entry_t *pde;
499:
1.51 chs 500: pde = PDP_BASE + pdei(va);
1.48 thorpej 501: if (*pde & PG_PS)
502: return ((pt_entry_t *)pde);
503: }
504: #endif
1.47 thorpej 505:
506: return (PTE_BASE + i386_btop(va));
1.41 chs 507: }
1.35 cgd 508:
1.46 thorpej 509: paddr_t vtophys __P((vaddr_t));
1.41 chs 510: vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
1.39 thorpej 511:
512: #if defined(USER_LDT)
513: void pmap_ldt_cleanup __P((struct proc *));
514: #define PMAP_FORK
515: #endif /* USER_LDT */
1.1 cgd 516:
1.40 thorpej 517: #endif /* _KERNEL */
518: #endif /* _I386_PMAP_H_ */
CVSweb <webmaster@jp.NetBSD.org>