Annotation of src/sys/arch/i386/include/pmap.h, Revision 1.104
1.104 ! jym 1: /* $NetBSD: pmap.h,v 1.103 2008/10/26 06:57:30 mrg Exp $ */
1.38 mycroft 2:
1.40 thorpej 3: /*
4: *
5: * Copyright (c) 1997 Charles D. Cranor and Washington University.
1.38 mycroft 6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
1.40 thorpej 17: * must display the following acknowledgment:
18: * This product includes software developed by Charles D. Cranor and
19: * Washington University.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission.
1.1 cgd 22: *
1.40 thorpej 23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1.1 cgd 33: */
34:
35: /*
1.94 yamt 36: * Copyright (c) 2001 Wasabi Systems, Inc.
37: * All rights reserved.
38: *
39: * Written by Frank van der Linden for Wasabi Systems, Inc.
40: *
41: * Redistribution and use in source and binary forms, with or without
42: * modification, are permitted provided that the following conditions
43: * are met:
44: * 1. Redistributions of source code must retain the above copyright
45: * notice, this list of conditions and the following disclaimer.
46: * 2. Redistributions in binary form must reproduce the above copyright
47: * notice, this list of conditions and the following disclaimer in the
48: * documentation and/or other materials provided with the distribution.
49: * 3. All advertising materials mentioning features or use of this software
50: * must display the following acknowledgement:
51: * This product includes software developed for the NetBSD Project by
52: * Wasabi Systems, Inc.
53: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54: * or promote products derived from this software without specific prior
55: * written permission.
56: *
57: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
61: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67: * POSSIBILITY OF SUCH DAMAGE.
1.1 cgd 68: */
1.34 mrg 69:
1.40 thorpej 70: #ifndef _I386_PMAP_H_
71: #define _I386_PMAP_H_
72:
1.58 mrg 73: #if defined(_KERNEL_OPT)
1.39 thorpej 74: #include "opt_user_ldt.h"
1.98 bouyer 75: #include "opt_xen.h"
1.34 mrg 76: #endif
1.1 cgd 77:
1.96 ad 78: #include <sys/atomic.h>
79:
1.103 mrg 80: #include <i386/pte.h>
1.39 thorpej 81: #include <machine/segments.h>
1.92 ad 82: #if defined(_KERNEL)
1.91 ad 83: #include <machine/cpufunc.h>
84: #endif
1.90 ad 85:
1.40 thorpej 86: #include <uvm/uvm_object.h>
1.98 bouyer 87: #ifdef XEN
88: #include <xen/xenfunc.h>
89: #include <xen/xenpmap.h>
90: #endif /* XEN */
1.1 cgd 91:
92: /*
1.40 thorpej 93: * see pte.h for a description of i386 MMU terminology and hardware
94: * interface.
95: *
1.102 bouyer 96: * a pmap describes a processes' 4GB virtual address space. when PAE
97: * is not in use, this virtual address space can be broken up into 1024 4MB
98: * regions which are described by PDEs in the PDP. the PDEs are defined as
99: * follows:
1.40 thorpej 100: *
101: * (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
1.43 thorpej 102: * (the following assumes that KERNBASE is 0xc0000000)
1.40 thorpej 103: *
104: * PDE#s VA range usage
1.68 drochner 105: * 0->766 0x0 -> 0xbfc00000 user address space
1.61 yamt 106: * 767 0xbfc00000-> recursive mapping of PDP (used for
1.43 thorpej 107: * 0xc0000000 linear mapping of PTPs)
108: * 768->1023 0xc0000000-> kernel address space (constant
1.40 thorpej 109: * 0xffc00000 across all pmap's/processes)
110: * 1023 0xffc00000-> "alternate" recursive PDP mapping
111: * <end> (for other pmaps)
112: *
113: *
114: * note: a recursive PDP mapping provides a way to map all the PTEs for
1.41 chs 115: * a 4GB address space into a linear chunk of virtual memory. in other
116: * words, the PTE for page 0 is the first int mapped into the 4MB recursive
117: * area. the PTE for page 1 is the second int. the very last int in the
1.81 junyoung 118: * 4MB range is the PTE that maps VA 0xfffff000 (the last page in a 4GB
1.40 thorpej 119: * address).
120: *
1.43 thorpej 121: * all pmap's PD's must have the same values in slots 768->1023 so that
1.41 chs 122: * the kernel is always mapped in every process. these values are loaded
1.40 thorpej 123: * into the PD at pmap creation time.
124: *
1.41 chs 125: * at any one time only one pmap can be active on a processor. this is
126: * the pmap whose PDP is pointed to by processor register %cr3. this pmap
1.40 thorpej 127: * will have all its PTEs mapped into memory at the recursive mapping
1.43 thorpej 128: * point (slot #767 as show above). when the pmap code wants to find the
1.40 thorpej 129: * PTE for a virtual address, all it has to do is the following:
130: *
1.71 thorpej 131: * address of PTE = (767 * 4MB) + (VA / PAGE_SIZE) * sizeof(pt_entry_t)
1.43 thorpej 132: * = 0xbfc00000 + (VA / 4096) * 4
1.40 thorpej 133: *
134: * what happens if the pmap layer is asked to perform an operation
1.41 chs 135: * on a pmap that is not the one which is currently active? in that
136: * case we take the PA of the PDP of non-active pmap and put it in
137: * slot 1023 of the active pmap. this causes the non-active pmap's
1.40 thorpej 138: * PTEs to get mapped in the final 4MB of the 4GB address space
139: * (e.g. starting at 0xffc00000).
140: *
141: * the following figure shows the effects of the recursive PDP mapping:
142: *
143: * PDP (%cr3)
144: * +----+
145: * | 0| -> PTP#0 that maps VA 0x0 -> 0x400000
146: * | |
147: * | |
1.43 thorpej 148: * | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000
1.83 junyoung 149: * | 768| -> first kernel PTP (maps 0xc0000000 -> 0xc0400000)
1.40 thorpej 150: * | |
151: * |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
152: * +----+
153: *
1.43 thorpej 154: * note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE"
1.40 thorpej 155: * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
156: *
1.43 thorpej 157: * starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a
1.40 thorpej 158: * PTP:
159: *
1.43 thorpej 160: * PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000
1.40 thorpej 161: * +----+
1.43 thorpej 162: * | 0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000
1.40 thorpej 163: * | |
164: * | |
1.81 junyoung 165: * | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbfeff000
1.43 thorpej 166: * | 768| -> maps contents of first kernel PTP
1.40 thorpej 167: * | |
168: * |1023|
169: * +----+
170: *
1.81 junyoung 171: * note that mapping of the PDP at PTP#767's VA (0xbfeff000) is
1.40 thorpej 172: * defined as "PDP_BASE".... within that mapping there are two
1.41 chs 173: * defines:
1.59 chs 174: * "PDP_PDE" (0xbfeffbfc) is the VA of the PDE in the PDP
1.41 chs 175: * which points back to itself.
1.59 chs 176: * "APDP_PDE" (0xbfeffffc) is the VA of the PDE in the PDP which
1.40 thorpej 177: * establishes the recursive mapping of the alternate pmap.
178: * to set the alternate PDP, one just has to put the correct
179: * PA info in *APDP_PDE.
180: *
1.41 chs 181: * note that in the APTE_BASE space, the APDP appears at VA
1.40 thorpej 182: * "APDP_BASE" (0xfffff000).
1.102 bouyer 183: *
184: * When PAE is in use, the L3 page directory breaks up the address space in
185: * 4 1GB * regions, each of them broken in 512 2MB regions by the L2 PD
186: * (the size of the pages at the L1 level is still 4K).
187: * The kernel virtual space is mapped by the last entry in the L3 page,
188: * the first 3 entries mapping the user VA space.
189: * Because the L3 has only 4 entries of 1GB each, we can't use recursive
190: * mappings at this level for PDP_PDE and APDP_PDE (this would eat 2 of the
191: * 4GB virtual space). There's also restrictions imposed by Xen on the
192: * last entry of the L3 PD, which makes it hard to use one L3 page per pmap
193: * switch %cr3 to switch pmaps. So we use one static L3 page which is
194: * always loaded in %cr3, and we use it as 2 virtual PD pointers: one for
1.104 ! jym 195: * kernel space (L3[3], always loaded), and one for user space (in fact the
1.102 bouyer 196: * first 3 entries of the L3 PD), and we claim the VM has only a 2-level
197: * PTP (with the L2 index extended by 2 bytes).
198: * PTE_BASE and APTE_BASE will need 4 entries in the L2 page table.
199: * In addition, we can't recursively map L3[3] (Xen wants the ref count on
200: * this page to be exactly once), so we use a shadow PD page for the last
201: * L2 PD. The shadow page could be static too, but to make pm_pdir[]
202: * contigous we'll allocate/copy one page per pmap.
1.1 cgd 203: */
1.65 fvdl 204: /* XXX MP should we allocate one APDP_PDE per processor?? */
1.33 mrg 205:
206: /*
1.94 yamt 207: * Mask to get rid of the sign-extended part of addresses.
208: */
209: #define VA_SIGN_MASK 0
210: #define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK)
211: /*
212: * XXXfvdl this one's not right.
213: */
214: #define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK)
215:
216: /*
1.40 thorpej 217: * the following defines identify the slots used as described above.
1.33 mrg 218: */
1.102 bouyer 219: #ifdef PAE
220: #define L2_SLOT_PTE (KERNBASE/NBPD_L2-4) /* 1532: for recursive PDP map */
221: #define L2_SLOT_KERN (KERNBASE/NBPD_L2) /* 1536: start of kernel space */
222: #define L2_SLOT_KERNBASE L2_SLOT_KERN
223: #define L2_SLOT_APTE 1960 /* 1964-2047 reserved by Xen */
224: #else /* PAE */
225: #define L2_SLOT_PTE (KERNBASE/NBPD_L2-1) /* 767: for recursive PDP map */
226: #define L2_SLOT_KERN (KERNBASE/NBPD_L2) /* 768: start of kernel space */
1.94 yamt 227: #define L2_SLOT_KERNBASE L2_SLOT_KERN
1.98 bouyer 228: #ifndef XEN
1.102 bouyer 229: #define L2_SLOT_APTE 1023 /* 1023: alternative recursive slot */
1.98 bouyer 230: #else
231: #define L2_SLOT_APTE 1007 /* 1008-1023 reserved by Xen */
232: #endif
1.102 bouyer 233: #endif /* PAE */
1.98 bouyer 234:
1.94 yamt 235:
236: #define PDIR_SLOT_KERN L2_SLOT_KERN
237: #define PDIR_SLOT_PTE L2_SLOT_PTE
238: #define PDIR_SLOT_APTE L2_SLOT_APTE
1.1 cgd 239:
240: /*
1.41 chs 241: * the following defines give the virtual addresses of various MMU
1.40 thorpej 242: * data structures:
243: * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
1.81 junyoung 244: * PDP_BASE and APDP_BASE: the base VA of the recursive mapping of the PDP
1.40 thorpej 245: * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
1.1 cgd 246: */
1.29 fvdl 247:
1.102 bouyer 248: #define PTE_BASE ((pt_entry_t *) (PDIR_SLOT_PTE * NBPD_L2))
249: #define APTE_BASE ((pt_entry_t *) (VA_SIGN_NEG((PDIR_SLOT_APTE * NBPD_L2))))
1.40 thorpej 250:
1.94 yamt 251: #define L1_BASE PTE_BASE
252: #define AL1_BASE APTE_BASE
1.40 thorpej 253:
1.94 yamt 254: #define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L2_SLOT_PTE * NBPD_L1))
255: #define AL2_BASE ((pd_entry_t *)((char *)AL1_BASE + L2_SLOT_PTE * NBPD_L1))
1.40 thorpej 256:
1.94 yamt 257: #define PDP_PDE (L2_BASE + PDIR_SLOT_PTE)
1.102 bouyer 258: #ifdef PAE
259: /*
260: * when PAE is in use we can't write APDP_PDE though the recursive mapping,
261: * because it points to the shadow PD. Use the kernel PD instead, which is
262: * static
263: */
264: #define APDP_PDE (&pmap_kl2pd[l2tol2(PDIR_SLOT_APTE)])
265: #define APDP_PDE_SHADOW (L2_BASE + PDIR_SLOT_APTE)
266: #else /* PAE */
1.94 yamt 267: #define APDP_PDE (L2_BASE + PDIR_SLOT_APTE)
1.102 bouyer 268: #endif /* PAE */
1.40 thorpej 269:
1.94 yamt 270: #define PDP_BASE L2_BASE
271: #define APDP_BASE AL2_BASE
1.1 cgd 272:
1.94 yamt 273: /* largest value (-1 for APTP space) */
274: #define NKL2_MAX_ENTRIES (NTOPLEVEL_PDES - (KERNBASE/NBPD_L2) - 1)
275: #define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * NPDPG)
1.39 thorpej 276:
1.94 yamt 277: #define NKL2_KIMG_ENTRIES 0 /* XXX unused */
1.40 thorpej 278:
1.94 yamt 279: #define NKL2_START_ENTRIES 0 /* XXX computed on runtime */
280: #define NKL1_START_ENTRIES 0 /* XXX unused */
1.11 mycroft 281:
1.102 bouyer 282: #ifdef PAE
283: #define NTOPLEVEL_PDES (PAGE_SIZE * 4 / (sizeof (pd_entry_t)))
284: #else
1.94 yamt 285: #define NTOPLEVEL_PDES (PAGE_SIZE / (sizeof (pd_entry_t)))
1.102 bouyer 286: #endif
1.11 mycroft 287:
1.94 yamt 288: #define NPDPG (PAGE_SIZE / sizeof (pd_entry_t))
1.1 cgd 289:
1.94 yamt 290: #define PTP_MASK_INITIALIZER { L1_FRAME, L2_FRAME }
291: #define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT }
292: #define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES }
293: #define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES }
294: #define NBPD_INITIALIZER { NBPD_L1, NBPD_L2 }
295: #define PDES_INITIALIZER { L2_BASE }
296: #define APDES_INITIALIZER { AL2_BASE }
1.40 thorpej 297:
1.94 yamt 298: #define PTP_LEVELS 2
1.40 thorpej 299:
300: /*
1.94 yamt 301: * PG_AVAIL usage: we make use of the ignored bits of the PTE
1.11 mycroft 302: */
1.1 cgd 303:
1.94 yamt 304: #define PG_W PG_AVAIL1 /* "wired" mapping */
305: #define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
306: #define PG_X PG_AVAIL3 /* executable mapping */
1.40 thorpej 307:
308: /*
1.94 yamt 309: * Number of PTE's per cache line. 4 byte pte, 32-byte cache line
310: * Used to avoid false sharing of cache lines.
1.40 thorpej 311: */
1.102 bouyer 312: #ifdef PAE
313: #define NPTECL 4
314: #else
1.94 yamt 315: #define NPTECL 8
1.102 bouyer 316: #endif
1.70 fvdl 317:
1.98 bouyer 318: #include <x86/pmap.h>
319:
320: #ifndef XEN
1.95 bouyer 321: #define pmap_pa2pte(a) (a)
322: #define pmap_pte2pa(a) ((a) & PG_FRAME)
323: #define pmap_pte_set(p, n) do { *(p) = (n); } while (0)
1.100 yamt 324: #define pmap_pte_cas(p, o, n) atomic_cas_32((p), (o), (n))
1.96 ad 325: #define pmap_pte_testset(p, n) \
326: atomic_swap_ulong((volatile unsigned long *)p, n)
327: #define pmap_pte_setbits(p, b) \
328: atomic_or_ulong((volatile unsigned long *)p, b)
329: #define pmap_pte_clearbits(p, b) \
330: atomic_and_ulong((volatile unsigned long *)p, ~(b))
1.95 bouyer 331: #define pmap_pte_flush() /* nothing */
1.98 bouyer 332: #else
333: static __inline pt_entry_t
334: pmap_pa2pte(paddr_t pa)
335: {
336: return (pt_entry_t)xpmap_ptom_masked(pa);
337: }
338:
339: static __inline paddr_t
340: pmap_pte2pa(pt_entry_t pte)
341: {
342: return xpmap_mtop_masked(pte & PG_FRAME);
343: }
344: static __inline void
345: pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
346: {
347: int s = splvm();
1.102 bouyer 348: xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
1.98 bouyer 349: splx(s);
350: }
351:
352: static __inline pt_entry_t
1.101 bouyer 353: pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
1.100 yamt 354: {
355: int s = splvm();
356: pt_entry_t opte = *ptep;
357:
358: if (opte == o) {
1.102 bouyer 359: xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
1.100 yamt 360: xpq_flush_queue();
361: }
362: splx(s);
363: return opte;
364: }
365:
366: static __inline pt_entry_t
1.98 bouyer 367: pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
368: {
369: int s = splvm();
370: pt_entry_t opte = *pte;
1.102 bouyer 371: xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
1.98 bouyer 372: npte);
373: xpq_flush_queue();
374: splx(s);
375: return opte;
376: }
377:
378: static __inline void
379: pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
380: {
381: int s = splvm();
1.102 bouyer 382: xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
1.98 bouyer 383: xpq_flush_queue();
384: splx(s);
385: }
386:
387: static __inline void
388: pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
389: {
390: int s = splvm();
1.102 bouyer 391: xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
1.98 bouyer 392: (*pte) & ~bits);
393: xpq_flush_queue();
394: splx(s);
395: }
396:
397: static __inline void
398: pmap_pte_flush(void)
399: {
400: int s = splvm();
401: xpq_flush_queue();
402: splx(s);
403: }
1.102 bouyer 404:
1.98 bouyer 405: #endif
1.73 thorpej 406:
1.102 bouyer 407: #ifdef PAE
408: /* addresses of static pages used for PAE pmap: */
409: /* the L3 page */
410: pd_entry_t *pmap_l3pd;
411: paddr_t pmap_l3paddr;
412: /* the kernel's L2 page */
413: pd_entry_t *pmap_kl2pd;
414: paddr_t pmap_kl2paddr;
415: #endif
416:
417:
1.94 yamt 418: struct trapframe;
1.1 cgd 419:
1.94 yamt 420: int pmap_exec_fixup(struct vm_map *, struct trapframe *, struct pcb *);
421: void pmap_ldt_cleanup(struct lwp *);
1.90 ad 422:
1.40 thorpej 423: #endif /* _I386_PMAP_H_ */
CVSweb <webmaster@jp.NetBSD.org>