Annotation of src/sys/arch/x86/include/pmap.h, Revision 1.2.6.6
1.2.6.6 ! yamt 1: /* $NetBSD: pmap.h,v 1.2.6.5 2008/01/21 09:40:08 yamt Exp $ */
1.2.6.2 yamt 2:
3: /*
4: *
5: * Copyright (c) 1997 Charles D. Cranor and Washington University.
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgment:
18: * This product includes software developed by Charles D. Cranor and
19: * Washington University.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33: */
34:
35: /*
36: * Copyright (c) 2001 Wasabi Systems, Inc.
37: * All rights reserved.
38: *
39: * Written by Frank van der Linden for Wasabi Systems, Inc.
40: *
41: * Redistribution and use in source and binary forms, with or without
42: * modification, are permitted provided that the following conditions
43: * are met:
44: * 1. Redistributions of source code must retain the above copyright
45: * notice, this list of conditions and the following disclaimer.
46: * 2. Redistributions in binary form must reproduce the above copyright
47: * notice, this list of conditions and the following disclaimer in the
48: * documentation and/or other materials provided with the distribution.
49: * 3. All advertising materials mentioning features or use of this software
50: * must display the following acknowledgement:
51: * This product includes software developed for the NetBSD Project by
52: * Wasabi Systems, Inc.
53: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54: * or promote products derived from this software without specific prior
55: * written permission.
56: *
57: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
61: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67: * POSSIBILITY OF SUCH DAMAGE.
68: */
69:
70: /*
71: * pmap.h: see pmap.c for the history of this pmap module.
72: */
73:
74: #ifndef _X86_PMAP_H_
75: #define _X86_PMAP_H_
76:
77: #define ptei(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
78:
79: /*
80: * pl*_pi: index in the ptp page for a pde mapping a VA.
81: * (pl*_i below is the index in the virtual array of all pdes per level)
82: */
83: #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
84: #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
85: #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
86: #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
87:
88: /*
89: * pl*_i: generate index into pde/pte arrays in virtual space
90: */
91: #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
92: #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
93: #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
94: #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
95: #define pl_i(va, lvl) \
96: (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
97:
98: #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
99:
100: /*
101: * PTP macros:
102: * a PTP's index is the PD index of the PDE that points to it
103: * a PTP's offset is the byte-offset in the PTE space that this PTP is at
104: * a PTP's VA is the first VA mapped by that PTP
105: */
106:
107: #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
108:
1.2.6.6 ! yamt 109: /* size of a PDP: usually one page, exept for PAE */
! 110: #ifdef PAE
! 111: #define PDP_SIZE 4
! 112: #else
! 113: #define PDP_SIZE 1
! 114: #endif
! 115:
! 116:
1.2.6.2 yamt 117: #if defined(_KERNEL)
118: /*
119: * pmap data structures: see pmap.c for details of locking.
120: */
121:
122: struct pmap;
123: typedef struct pmap *pmap_t;
124:
125: /*
126: * we maintain a list of all non-kernel pmaps
127: */
128:
129: LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
130:
131: /*
132: * the pmap structure
133: *
134: * note that the pm_obj contains the simple_lock, the reference count,
135: * page list, and number of PTPs within the pmap.
136: *
137: * pm_lock is the same as the spinlock for vm object 0. Changes to
138: * the other objects may only be made if that lock has been taken
139: * (the other object locks are only used when uvm_pagealloc is called)
140: *
141: * XXX If we ever support processor numbers higher than 31, we'll have
142: * XXX to rethink the CPU mask.
143: */
144:
145: struct pmap {
146: struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
147: #define pm_lock pm_obj[0].vmobjlock
148: LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */
149: pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */
1.2.6.6 ! yamt 150: #ifdef PAE
! 151: paddr_t pm_pdirpa[PDP_SIZE];
! 152: #else
1.2.6.2 yamt 153: paddr_t pm_pdirpa; /* PA of PD (read-only after create) */
1.2.6.6 ! yamt 154: #endif
1.2.6.2 yamt 155: struct vm_page *pm_ptphint[PTP_LEVELS-1];
156: /* pointer to a PTP in our pmap */
157: struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */
158:
159: #if !defined(__x86_64__)
160: vaddr_t pm_hiexec; /* highest executable mapping */
161: #endif /* !defined(__x86_64__) */
162: int pm_flags; /* see below */
163:
164: union descriptor *pm_ldt; /* user-set LDT */
165: int pm_ldt_len; /* number of LDT entries */
166: int pm_ldt_sel; /* LDT selector */
167: uint32_t pm_cpus; /* mask of CPUs using pmap */
168: uint32_t pm_kernel_cpus; /* mask of CPUs using kernel part
169: of pmap */
170: };
171:
172: /* pm_flags */
173: #define PMF_USER_LDT 0x01 /* pmap has user-set LDT */
174:
1.2.6.6 ! yamt 175: /* macro to access pm_pdirpa */
! 176: #ifdef PAE
! 177: #define pmap_pdirpa(pmap, index) \
! 178: ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
! 179: #else
! 180: #define pmap_pdirpa(pmap, index) \
! 181: ((pmap)->pm_pdirpa + (index) * sizeof(pd_entry_t))
! 182: #endif
! 183:
1.2.6.2 yamt 184: /*
185: * global kernel variables
186: */
187:
188: /* PDPpaddr: is the physical address of the kernel's PDP */
189: extern u_long PDPpaddr;
190:
191: extern struct pmap kernel_pmap_store; /* kernel pmap */
192: extern int pmap_pg_g; /* do we support PG_G? */
193: extern long nkptp[PTP_LEVELS];
194:
195: /*
196: * macros
197: */
198:
199: #define pmap_kernel() (&kernel_pmap_store)
200: #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
201: #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
202:
203: #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M)
204: #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U)
205: #define pmap_copy(DP,SP,D,L,S)
206: #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M)
207: #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U)
208: #define pmap_move(DP,SP,D,L,S)
209: #define pmap_phys_address(ppn) x86_ptob(ppn)
210: #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
211:
212:
213: /*
214: * prototypes
215: */
216:
217: void pmap_activate(struct lwp *);
218: void pmap_bootstrap(vaddr_t);
219: bool pmap_clear_attrs(struct vm_page *, unsigned);
220: void pmap_deactivate(struct lwp *);
221: void pmap_page_remove (struct vm_page *);
222: void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
223: bool pmap_test_attrs(struct vm_page *, unsigned);
224: void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
225: void pmap_load(void);
1.2.6.5 yamt 226: paddr_t pmap_init_tmp_pgtbl(paddr_t);
1.2.6.2 yamt 227:
228: vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
229:
230: void pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
231: void pmap_tlb_shootwait(void);
232:
233: #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
234:
235: /*
236: * Do idle page zero'ing uncached to avoid polluting the cache.
237: */
238: bool pmap_pageidlezero(paddr_t);
239: #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
240:
241: /*
242: * inline functions
243: */
244:
245: /*ARGSUSED*/
246: static __inline void
247: pmap_remove_all(struct pmap *pmap)
248: {
249: /* Nothing. */
250: }
251:
252: /*
253: * pmap_update_pg: flush one page from the TLB (or flush the whole thing
254: * if hardware doesn't support one-page flushing)
255: */
256:
1.2.6.5 yamt 257: __inline static void __unused
1.2.6.2 yamt 258: pmap_update_pg(vaddr_t va)
259: {
1.2.6.4 yamt 260: invlpg(va);
1.2.6.2 yamt 261: }
262:
263: /*
264: * pmap_update_2pg: flush two pages from the TLB
265: */
266:
1.2.6.5 yamt 267: __inline static void __unused
1.2.6.2 yamt 268: pmap_update_2pg(vaddr_t va, vaddr_t vb)
269: {
1.2.6.4 yamt 270: invlpg(va);
271: invlpg(vb);
1.2.6.2 yamt 272: }
273:
274: /*
275: * pmap_page_protect: change the protection of all recorded mappings
276: * of a managed page
277: *
278: * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
279: * => we only have to worry about making the page more protected.
280: * unprotecting a page is done on-demand at fault time.
281: */
282:
1.2.6.5 yamt 283: __inline static void __unused
1.2.6.2 yamt 284: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
285: {
286: if ((prot & VM_PROT_WRITE) == 0) {
287: if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
288: (void) pmap_clear_attrs(pg, PG_RW);
289: } else {
290: pmap_page_remove(pg);
291: }
292: }
293: }
294:
295: /*
296: * pmap_protect: change the protection of pages in a pmap
297: *
298: * => this function is a frontend for pmap_remove/pmap_write_protect
299: * => we only have to worry about making the page more protected.
300: * unprotecting a page is done on-demand at fault time.
301: */
302:
1.2.6.5 yamt 303: __inline static void __unused
1.2.6.2 yamt 304: pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
305: {
306: if ((prot & VM_PROT_WRITE) == 0) {
307: if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
308: pmap_write_protect(pmap, sva, eva, prot);
309: } else {
310: pmap_remove(pmap, sva, eva);
311: }
312: }
313: }
314:
315: /*
316: * various address inlines
317: *
318: * vtopte: return a pointer to the PTE mapping a VA, works only for
319: * user and PT addresses
320: *
321: * kvtopte: return a pointer to the PTE mapping a kernel VA
322: */
323:
324: #include <lib/libkern/libkern.h>
325:
1.2.6.5 yamt 326: static __inline pt_entry_t * __unused
1.2.6.2 yamt 327: vtopte(vaddr_t va)
328: {
329:
330: KASSERT(va < VM_MIN_KERNEL_ADDRESS);
331:
332: return (PTE_BASE + pl1_i(va));
333: }
334:
1.2.6.5 yamt 335: static __inline pt_entry_t * __unused
1.2.6.2 yamt 336: kvtopte(vaddr_t va)
337: {
338: pd_entry_t *pde;
339:
340: KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
341:
342: pde = L2_BASE + pl2_i(va);
343: if (*pde & PG_PS)
344: return ((pt_entry_t *)pde);
345:
346: return (PTE_BASE + pl1_i(va));
347: }
348:
349: paddr_t vtophys(vaddr_t);
350: vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
351: void pmap_cpu_init_early(struct cpu_info *);
352: void pmap_cpu_init_late(struct cpu_info *);
353: void sse2_zero_page(void *);
354: void sse2_copy_page(void *, void *);
355:
1.2.6.4 yamt 356:
357: #ifdef XEN
358:
359: #define XPTE_MASK L1_FRAME
1.2.6.5 yamt 360: /* XPTE_SHIFT = L1_SHIFT - log2(sizeof(pt_entry_t)) */
1.2.6.6 ! yamt 361: #if defined(__x86_64__) || defined(PAE)
1.2.6.4 yamt 362: #define XPTE_SHIFT 9
1.2.6.5 yamt 363: #else
364: #define XPTE_SHIFT 10
365: #endif
1.2.6.4 yamt 366:
367: /* PTE access inline fuctions */
368:
369: /*
370: * Get the machine address of the pointed pte
371: * We use hardware MMU to get value so works only for levels 1-3
372: */
373:
374: static __inline paddr_t
375: xpmap_ptetomach(pt_entry_t *pte)
376: {
377: pt_entry_t *up_pte;
378: vaddr_t va = (vaddr_t) pte;
379:
380: va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
381: up_pte = (pt_entry_t *) va;
382:
383: return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
384: }
385:
386: /*
387: * xpmap_update()
388: * Update an active pt entry with Xen
389: * Equivalent to *pte = npte
390: */
391:
392: static __inline void
393: xpmap_update (pt_entry_t *pte, pt_entry_t npte)
394: {
395: int s = splvm();
396:
1.2.6.6 ! yamt 397: xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
1.2.6.4 yamt 398: xpq_flush_queue();
399: splx(s);
400: }
401:
402:
403: /* Xen helpers to change bits of a pte */
404: #define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
405:
406: /* pmap functions with machine addresses */
407: void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t);
408: int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
409: vm_prot_t, int, int);
410: bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
411: paddr_t vtomach(vaddr_t);
412:
413: #endif /* XEN */
414:
1.2.6.2 yamt 415: /*
416: * Hooks for the pool allocator.
417: */
418: #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
419:
420: /*
421: * TLB shootdown mailbox.
422: */
423:
424: struct pmap_mbox {
425: volatile void *mb_pointer;
426: volatile uintptr_t mb_addr1;
427: volatile uintptr_t mb_addr2;
428: volatile uintptr_t mb_head;
429: volatile uintptr_t mb_tail;
430: volatile uintptr_t mb_global;
431: };
432:
433: #endif /* _KERNEL */
434:
435: #endif /* _X86_PMAP_H_ */
CVSweb <webmaster@jp.NetBSD.org>