Annotation of src/sys/arch/powerpc/oea/pmap.c, Revision 1.75.2.2
1.75.2.2! bouyer 1: /* $NetBSD$ */
1.1 matt 2: /*-
3: * Copyright (c) 2001 The NetBSD Foundation, Inc.
4: * All rights reserved.
5: *
6: * This code is derived from software contributed to The NetBSD Foundation
7: * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8: *
1.38 sanjayl 9: * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com>
10: * of Kyma Systems LLC.
11: *
1.1 matt 12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: *
21: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31: * POSSIBILITY OF SUCH DAMAGE.
32: */
33:
34: /*
35: * Copyright (C) 1995, 1996 Wolfgang Solfrank.
36: * Copyright (C) 1995, 1996 TooLs GmbH.
37: * All rights reserved.
38: *
39: * Redistribution and use in source and binary forms, with or without
40: * modification, are permitted provided that the following conditions
41: * are met:
42: * 1. Redistributions of source code must retain the above copyright
43: * notice, this list of conditions and the following disclaimer.
44: * 2. Redistributions in binary form must reproduce the above copyright
45: * notice, this list of conditions and the following disclaimer in the
46: * documentation and/or other materials provided with the distribution.
47: * 3. All advertising materials mentioning features or use of this software
48: * must display the following acknowledgement:
49: * This product includes software developed by TooLs GmbH.
50: * 4. The name of TooLs GmbH may not be used to endorse or promote products
51: * derived from this software without specific prior written permission.
52: *
53: * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
54: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56: * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57: * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
58: * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
59: * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
60: * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
61: * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
62: * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63: */
1.11 lukem 64:
65: #include <sys/cdefs.h>
1.75.2.2! bouyer 66: __KERNEL_RCSID(0, "$NetBSD$");
1.53 garbled 67:
68: #define PMAP_NOOPNAMES
1.1 matt 69:
1.18 matt 70: #include "opt_ppcarch.h"
1.1 matt 71: #include "opt_altivec.h"
1.57 matt 72: #include "opt_multiprocessor.h"
1.1 matt 73: #include "opt_pmap.h"
1.57 matt 74:
1.1 matt 75: #include <sys/param.h>
76: #include <sys/malloc.h>
77: #include <sys/proc.h>
78: #include <sys/pool.h>
79: #include <sys/queue.h>
80: #include <sys/device.h> /* for evcnt */
81: #include <sys/systm.h>
1.50 ad 82: #include <sys/atomic.h>
1.1 matt 83:
84: #include <uvm/uvm.h>
85:
86: #include <machine/pcb.h>
87: #include <machine/powerpc.h>
88: #include <powerpc/spr.h>
89: #include <powerpc/bat.h>
1.38 sanjayl 90: #include <powerpc/stdarg.h>
1.71 matt 91: #include <powerpc/oea/spr.h>
92: #include <powerpc/oea/sr_601.h>
1.1 matt 93:
94: #ifdef ALTIVEC
95: int pmap_use_altivec;
96: #endif
97:
1.2 matt 98: volatile struct pteg *pmap_pteg_table;
1.1 matt 99: unsigned int pmap_pteg_cnt;
100: unsigned int pmap_pteg_mask;
1.21 aymeric 101: #ifdef PMAP_MEMLIMIT
1.53 garbled 102: static paddr_t pmap_memlimit = PMAP_MEMLIMIT;
1.21 aymeric 103: #else
1.53 garbled 104: static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */
1.21 aymeric 105: #endif
1.1 matt 106:
107: struct pmap kernel_pmap_;
108: unsigned int pmap_pages_stolen;
109: u_long pmap_pte_valid;
110: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
111: u_long pmap_pvo_enter_depth;
112: u_long pmap_pvo_remove_depth;
113: #endif
114:
115: #ifndef MSGBUFADDR
116: extern paddr_t msgbuf_paddr;
117: #endif
118:
119: static struct mem_region *mem, *avail;
120: static u_int mem_cnt, avail_cnt;
121:
1.53 garbled 122: #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
123: # define PMAP_OEA 1
124: #endif
125:
126: #if defined(PMAP_OEA)
127: #define _PRIxpte "lx"
128: #else
129: #define _PRIxpte PRIx64
130: #endif
131: #define _PRIxpa "lx"
132: #define _PRIxva "lx"
1.54 mlelstv 133: #define _PRIsr "lx"
1.53 garbled 134:
1.75.2.1 bouyer 135: #ifdef PMAP_NEEDS_FIXUP
1.53 garbled 136: #if defined(PMAP_OEA)
137: #define PMAPNAME(name) pmap32_##name
138: #elif defined(PMAP_OEA64)
139: #define PMAPNAME(name) pmap64_##name
140: #elif defined(PMAP_OEA64_BRIDGE)
141: #define PMAPNAME(name) pmap64bridge_##name
142: #else
143: #error unknown variant for pmap
144: #endif
1.75.2.1 bouyer 145: #endif /* PMAP_NEEDS_FIXUP */
1.53 garbled 146:
1.75.2.1 bouyer 147: #ifdef PMAPNAME
1.53 garbled 148: #define STATIC static
149: #define pmap_pte_spill PMAPNAME(pte_spill)
150: #define pmap_real_memory PMAPNAME(real_memory)
151: #define pmap_init PMAPNAME(init)
152: #define pmap_virtual_space PMAPNAME(virtual_space)
153: #define pmap_create PMAPNAME(create)
154: #define pmap_reference PMAPNAME(reference)
155: #define pmap_destroy PMAPNAME(destroy)
156: #define pmap_copy PMAPNAME(copy)
157: #define pmap_update PMAPNAME(update)
158: #define pmap_enter PMAPNAME(enter)
159: #define pmap_remove PMAPNAME(remove)
160: #define pmap_kenter_pa PMAPNAME(kenter_pa)
161: #define pmap_kremove PMAPNAME(kremove)
162: #define pmap_extract PMAPNAME(extract)
163: #define pmap_protect PMAPNAME(protect)
164: #define pmap_unwire PMAPNAME(unwire)
165: #define pmap_page_protect PMAPNAME(page_protect)
166: #define pmap_query_bit PMAPNAME(query_bit)
167: #define pmap_clear_bit PMAPNAME(clear_bit)
168:
169: #define pmap_activate PMAPNAME(activate)
170: #define pmap_deactivate PMAPNAME(deactivate)
171:
172: #define pmap_pinit PMAPNAME(pinit)
173: #define pmap_procwr PMAPNAME(procwr)
174:
175: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
176: #define pmap_pte_print PMAPNAME(pte_print)
177: #define pmap_pteg_check PMAPNAME(pteg_check)
178: #define pmap_print_mmruregs PMAPNAME(print_mmuregs)
179: #define pmap_print_pte PMAPNAME(print_pte)
180: #define pmap_pteg_dist PMAPNAME(pteg_dist)
181: #endif
182: #if defined(DEBUG) || defined(PMAPCHECK)
183: #define pmap_pvo_verify PMAPNAME(pvo_verify)
1.56 phx 184: #define pmapcheck PMAPNAME(check)
185: #endif
186: #if defined(DEBUG) || defined(PMAPDEBUG)
187: #define pmapdebug PMAPNAME(debug)
1.53 garbled 188: #endif
189: #define pmap_steal_memory PMAPNAME(steal_memory)
190: #define pmap_bootstrap PMAPNAME(bootstrap)
191: #else
192: #define STATIC /* nothing */
193: #endif /* PMAPNAME */
194:
195: STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool);
196: STATIC void pmap_real_memory(paddr_t *, psize_t *);
197: STATIC void pmap_init(void);
198: STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *);
199: STATIC pmap_t pmap_create(void);
200: STATIC void pmap_reference(pmap_t);
201: STATIC void pmap_destroy(pmap_t);
202: STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
203: STATIC void pmap_update(pmap_t);
1.65 cegger 204: STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
1.53 garbled 205: STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
1.68 cegger 206: STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int);
1.53 garbled 207: STATIC void pmap_kremove(vaddr_t, vsize_t);
208: STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
209:
210: STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
211: STATIC void pmap_unwire(pmap_t, vaddr_t);
212: STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
213: STATIC bool pmap_query_bit(struct vm_page *, int);
214: STATIC bool pmap_clear_bit(struct vm_page *, int);
215:
216: STATIC void pmap_activate(struct lwp *);
217: STATIC void pmap_deactivate(struct lwp *);
218:
219: STATIC void pmap_pinit(pmap_t pm);
220: STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
221:
222: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
223: STATIC void pmap_pte_print(volatile struct pte *);
224: STATIC void pmap_pteg_check(void);
225: STATIC void pmap_print_mmuregs(void);
226: STATIC void pmap_print_pte(pmap_t, vaddr_t);
227: STATIC void pmap_pteg_dist(void);
228: #endif
229: #if defined(DEBUG) || defined(PMAPCHECK)
230: STATIC void pmap_pvo_verify(void);
231: #endif
232: STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
233: STATIC void pmap_bootstrap(paddr_t, paddr_t);
234:
235: #ifdef PMAPNAME
236: const struct pmap_ops PMAPNAME(ops) = {
237: .pmapop_pte_spill = pmap_pte_spill,
238: .pmapop_real_memory = pmap_real_memory,
239: .pmapop_init = pmap_init,
240: .pmapop_virtual_space = pmap_virtual_space,
241: .pmapop_create = pmap_create,
242: .pmapop_reference = pmap_reference,
243: .pmapop_destroy = pmap_destroy,
244: .pmapop_copy = pmap_copy,
245: .pmapop_update = pmap_update,
246: .pmapop_enter = pmap_enter,
247: .pmapop_remove = pmap_remove,
248: .pmapop_kenter_pa = pmap_kenter_pa,
249: .pmapop_kremove = pmap_kremove,
250: .pmapop_extract = pmap_extract,
251: .pmapop_protect = pmap_protect,
252: .pmapop_unwire = pmap_unwire,
253: .pmapop_page_protect = pmap_page_protect,
254: .pmapop_query_bit = pmap_query_bit,
255: .pmapop_clear_bit = pmap_clear_bit,
256: .pmapop_activate = pmap_activate,
257: .pmapop_deactivate = pmap_deactivate,
258: .pmapop_pinit = pmap_pinit,
259: .pmapop_procwr = pmap_procwr,
260: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
261: .pmapop_pte_print = pmap_pte_print,
262: .pmapop_pteg_check = pmap_pteg_check,
263: .pmapop_print_mmuregs = pmap_print_mmuregs,
264: .pmapop_print_pte = pmap_print_pte,
265: .pmapop_pteg_dist = pmap_pteg_dist,
266: #else
267: .pmapop_pte_print = NULL,
268: .pmapop_pteg_check = NULL,
269: .pmapop_print_mmuregs = NULL,
270: .pmapop_print_pte = NULL,
271: .pmapop_pteg_dist = NULL,
272: #endif
273: #if defined(DEBUG) || defined(PMAPCHECK)
274: .pmapop_pvo_verify = pmap_pvo_verify,
275: #else
276: .pmapop_pvo_verify = NULL,
1.1 matt 277: #endif
1.53 garbled 278: .pmapop_steal_memory = pmap_steal_memory,
279: .pmapop_bootstrap = pmap_bootstrap,
280: };
281: #endif /* !PMAPNAME */
1.1 matt 282:
283: /*
1.38 sanjayl 284: * The following structure is aligned to 32 bytes
1.1 matt 285: */
286: struct pvo_entry {
287: LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
288: TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
289: struct pte pvo_pte; /* Prebuilt PTE */
290: pmap_t pvo_pmap; /* ptr to owning pmap */
291: vaddr_t pvo_vaddr; /* VA of entry */
292: #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
293: #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
294: #define PVO_WIRED 0x0010 /* PVO entry is wired */
295: #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
296: #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
1.39 matt 297: #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED)
298: #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED)
299: #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
1.12 matt 300: #define PVO_ENTER_INSERT 0 /* PVO has been removed */
301: #define PVO_SPILL_UNSET 1 /* PVO has been evicted */
302: #define PVO_SPILL_SET 2 /* PVO has been spilled */
303: #define PVO_SPILL_INSERT 3 /* PVO has been inserted */
304: #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */
305: #define PVO_PMAP_PROTECT 5 /* PVO has changed */
306: #define PVO_REMOVE 6 /* PVO has been removed */
307: #define PVO_WHERE_MASK 15
308: #define PVO_WHERE_SHFT 8
1.38 sanjayl 309: } __attribute__ ((aligned (32)));
1.1 matt 310: #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
311: #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
312: #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
313: #define PVO_PTEGIDX_CLR(pvo) \
314: ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
315: #define PVO_PTEGIDX_SET(pvo,i) \
316: ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
1.12 matt 317: #define PVO_WHERE(pvo,w) \
318: ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
319: (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
1.1 matt 320:
321: TAILQ_HEAD(pvo_tqhead, pvo_entry);
322: struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
1.53 garbled 323: static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
324: static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
1.1 matt 325:
326: struct pool pmap_pool; /* pool for pmap structures */
327: struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */
328: struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */
329:
330: /*
331: * We keep a cache of unmanaged pages to be used for pvo entries for
332: * unmanaged pages.
333: */
334: struct pvo_page {
335: SIMPLEQ_ENTRY(pvo_page) pvop_link;
336: };
337: SIMPLEQ_HEAD(pvop_head, pvo_page);
1.53 garbled 338: static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
339: static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
1.1 matt 340: u_long pmap_upvop_free;
341: u_long pmap_upvop_maxfree;
342: u_long pmap_mpvop_free;
343: u_long pmap_mpvop_maxfree;
344:
1.53 garbled 345: static void *pmap_pool_ualloc(struct pool *, int);
346: static void *pmap_pool_malloc(struct pool *, int);
1.1 matt 347:
1.53 garbled 348: static void pmap_pool_ufree(struct pool *, void *);
349: static void pmap_pool_mfree(struct pool *, void *);
1.1 matt 350:
351: static struct pool_allocator pmap_pool_mallocator = {
1.43 garbled 352: .pa_alloc = pmap_pool_malloc,
353: .pa_free = pmap_pool_mfree,
354: .pa_pagesz = 0,
1.1 matt 355: };
356:
357: static struct pool_allocator pmap_pool_uallocator = {
1.43 garbled 358: .pa_alloc = pmap_pool_ualloc,
359: .pa_free = pmap_pool_ufree,
360: .pa_pagesz = 0,
1.1 matt 361: };
362:
363: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
1.2 matt 364: void pmap_pte_print(volatile struct pte *);
1.1 matt 365: void pmap_pteg_check(void);
366: void pmap_pteg_dist(void);
367: void pmap_print_pte(pmap_t, vaddr_t);
368: void pmap_print_mmuregs(void);
369: #endif
370:
371: #if defined(DEBUG) || defined(PMAPCHECK)
372: #ifdef PMAPCHECK
373: int pmapcheck = 1;
374: #else
375: int pmapcheck = 0;
376: #endif
377: void pmap_pvo_verify(void);
1.53 garbled 378: static void pmap_pvo_check(const struct pvo_entry *);
1.1 matt 379: #define PMAP_PVO_CHECK(pvo) \
380: do { \
381: if (pmapcheck) \
382: pmap_pvo_check(pvo); \
383: } while (0)
384: #else
385: #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
386: #endif
1.53 garbled 387: static int pmap_pte_insert(int, struct pte *);
388: static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
1.2 matt 389: vaddr_t, paddr_t, register_t, int);
1.53 garbled 390: static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
391: static void pmap_pvo_free(struct pvo_entry *);
392: static void pmap_pvo_free_list(struct pvo_head *);
393: static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
394: static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
395: static struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
396: static void pvo_set_exec(struct pvo_entry *);
397: static void pvo_clear_exec(struct pvo_entry *);
1.1 matt 398:
1.53 garbled 399: static void tlbia(void);
1.1 matt 400:
1.53 garbled 401: static void pmap_release(pmap_t);
402: static paddr_t pmap_boot_find_memory(psize_t, psize_t, int);
1.1 matt 403:
1.25 chs 404: static uint32_t pmap_pvo_reclaim_nextidx;
405: #ifdef DEBUG
406: static int pmap_pvo_reclaim_debugctr;
407: #endif
408:
1.1 matt 409: #define VSID_NBPW (sizeof(uint32_t) * 8)
410: static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
411:
412: static int pmap_initialized;
413:
414: #if defined(DEBUG) || defined(PMAPDEBUG)
415: #define PMAPDEBUG_BOOT 0x0001
416: #define PMAPDEBUG_PTE 0x0002
417: #define PMAPDEBUG_EXEC 0x0008
418: #define PMAPDEBUG_PVOENTER 0x0010
419: #define PMAPDEBUG_PVOREMOVE 0x0020
420: #define PMAPDEBUG_ACTIVATE 0x0100
421: #define PMAPDEBUG_CREATE 0x0200
422: #define PMAPDEBUG_ENTER 0x1000
423: #define PMAPDEBUG_KENTER 0x2000
424: #define PMAPDEBUG_KREMOVE 0x4000
425: #define PMAPDEBUG_REMOVE 0x8000
1.38 sanjayl 426:
1.1 matt 427: unsigned int pmapdebug = 0;
1.38 sanjayl 428:
1.1 matt 429: # define DPRINTF(x) printf x
430: # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x
431: #else
432: # define DPRINTF(x)
433: # define DPRINTFN(n, x)
434: #endif
435:
436:
437: #ifdef PMAPCOUNTERS
438: /*
439: * From pmap_subr.c
440: */
1.53 garbled 441: extern struct evcnt pmap_evcnt_mappings;
442: extern struct evcnt pmap_evcnt_unmappings;
443:
444: extern struct evcnt pmap_evcnt_kernel_mappings;
445: extern struct evcnt pmap_evcnt_kernel_unmappings;
446:
447: extern struct evcnt pmap_evcnt_mappings_replaced;
448:
449: extern struct evcnt pmap_evcnt_exec_mappings;
450: extern struct evcnt pmap_evcnt_exec_cached;
451:
452: extern struct evcnt pmap_evcnt_exec_synced;
453: extern struct evcnt pmap_evcnt_exec_synced_clear_modify;
454: extern struct evcnt pmap_evcnt_exec_synced_pvo_remove;
455:
456: extern struct evcnt pmap_evcnt_exec_uncached_page_protect;
457: extern struct evcnt pmap_evcnt_exec_uncached_clear_modify;
458: extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
459: extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
460: extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove;
461:
462: extern struct evcnt pmap_evcnt_updates;
463: extern struct evcnt pmap_evcnt_collects;
464: extern struct evcnt pmap_evcnt_copies;
465:
466: extern struct evcnt pmap_evcnt_ptes_spilled;
467: extern struct evcnt pmap_evcnt_ptes_unspilled;
468: extern struct evcnt pmap_evcnt_ptes_evicted;
469:
470: extern struct evcnt pmap_evcnt_ptes_primary[8];
471: extern struct evcnt pmap_evcnt_ptes_secondary[8];
472: extern struct evcnt pmap_evcnt_ptes_removed;
473: extern struct evcnt pmap_evcnt_ptes_changed;
474: extern struct evcnt pmap_evcnt_pvos_reclaimed;
475: extern struct evcnt pmap_evcnt_pvos_failed;
476:
1.1 matt 477: extern struct evcnt pmap_evcnt_zeroed_pages;
478: extern struct evcnt pmap_evcnt_copied_pages;
479: extern struct evcnt pmap_evcnt_idlezeroed_pages;
1.26 matt 480:
1.53 garbled 481: #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
482: #define PMAPCOUNT2(ev) ((ev).ev_count++)
1.1 matt 483: #else
484: #define PMAPCOUNT(ev) ((void) 0)
485: #define PMAPCOUNT2(ev) ((void) 0)
486: #endif
487:
1.35 perry 488: #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va))
1.38 sanjayl 489:
490: /* XXXSL: this needs to be moved to assembler */
491: #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va))
492:
1.35 perry 493: #define TLBSYNC() __asm volatile("tlbsync")
494: #define SYNC() __asm volatile("sync")
495: #define EIEIO() __asm volatile("eieio")
1.57 matt 496: #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va))
1.1 matt 497: #define MFMSR() mfmsr()
498: #define MTMSR(psl) mtmsr(psl)
499: #define MFPVR() mfpvr()
500: #define MFSRIN(va) mfsrin(va)
501: #define MFTB() mfrtcltbl()
502:
1.53 garbled 503: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.35 perry 504: static inline register_t
1.1 matt 505: mfsrin(vaddr_t va)
506: {
1.2 matt 507: register_t sr;
1.35 perry 508: __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
1.1 matt 509: return sr;
510: }
1.53 garbled 511: #endif /* PMAP_OEA*/
1.38 sanjayl 512:
1.53 garbled 513: #if defined (PMAP_OEA64_BRIDGE)
1.38 sanjayl 514: extern void mfmsr64 (register64_t *result);
1.53 garbled 515: #endif /* PMAP_OEA64_BRIDGE */
1.38 sanjayl 516:
1.50 ad 517: #define PMAP_LOCK() KERNEL_LOCK(1, NULL)
518: #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL)
1.1 matt 519:
1.35 perry 520: static inline register_t
1.1 matt 521: pmap_interrupts_off(void)
522: {
1.2 matt 523: register_t msr = MFMSR();
1.1 matt 524: if (msr & PSL_EE)
525: MTMSR(msr & ~PSL_EE);
526: return msr;
527: }
528:
529: static void
1.2 matt 530: pmap_interrupts_restore(register_t msr)
1.1 matt 531: {
532: if (msr & PSL_EE)
533: MTMSR(msr);
534: }
535:
1.35 perry 536: static inline u_int32_t
1.1 matt 537: mfrtcltbl(void)
538: {
1.55 garbled 539: #ifdef PPC_OEA601
1.1 matt 540: if ((MFPVR() >> 16) == MPC601)
541: return (mfrtcl() >> 7);
542: else
1.55 garbled 543: #endif
1.1 matt 544: return (mftbl());
545: }
546:
547: /*
548: * These small routines may have to be replaced,
549: * if/when we support processors other that the 604.
550: */
551:
552: void
553: tlbia(void)
554: {
1.47 macallan 555: char *i;
1.1 matt 556:
557: SYNC();
1.53 garbled 558: #if defined(PMAP_OEA)
1.1 matt 559: /*
560: * Why not use "tlbia"? Because not all processors implement it.
561: *
1.20 wiz 562: * This needs to be a per-CPU callback to do the appropriate thing
1.1 matt 563: * for the CPU. XXX
564: */
1.47 macallan 565: for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
1.1 matt 566: TLBIE(i);
567: EIEIO();
568: SYNC();
569: }
1.53 garbled 570: #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
1.38 sanjayl 571: /* This is specifically for the 970, 970UM v1.6 pp. 140. */
1.51 garbled 572: for (i = 0; i <= (char *)0xFF000; i += 0x00001000) {
1.38 sanjayl 573: TLBIEL(i);
574: EIEIO();
575: SYNC();
576: }
577: #endif
1.1 matt 578: TLBSYNC();
579: SYNC();
580: }
581:
1.35 perry 582: static inline register_t
1.2 matt 583: va_to_vsid(const struct pmap *pm, vaddr_t addr)
1.1 matt 584: {
1.53 garbled 585: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.38 sanjayl 586: return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
1.53 garbled 587: #else /* PMAP_OEA64 */
1.18 matt 588: #if 0
589: const struct ste *ste;
590: register_t hash;
591: int i;
592:
593: hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
594:
595: /*
596: * Try the primary group first
597: */
598: ste = pm->pm_stes[hash].stes;
599: for (i = 0; i < 8; i++, ste++) {
600: if (ste->ste_hi & STE_V) &&
601: (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
602: return ste;
603: }
604:
605: /*
606: * Then the secondary group.
607: */
608: ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
609: for (i = 0; i < 8; i++, ste++) {
610: if (ste->ste_hi & STE_V) &&
611: (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
612: return addr;
613: }
614:
615: return NULL;
616: #else
617: /*
618: * Rather than searching the STE groups for the VSID, we know
619: * how we generate that from the ESID and so do that.
620: */
621: return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
622: #endif
1.53 garbled 623: #endif /* PMAP_OEA */
1.1 matt 624: }
625:
1.35 perry 626: static inline register_t
1.2 matt 627: va_to_pteg(const struct pmap *pm, vaddr_t addr)
1.1 matt 628: {
1.2 matt 629: register_t hash;
630:
631: hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
1.1 matt 632: return hash & pmap_pteg_mask;
633: }
634:
635: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
636: /*
637: * Given a PTE in the page table, calculate the VADDR that hashes to it.
638: * The only bit of magic is that the top 4 bits of the address doesn't
639: * technically exist in the PTE. But we know we reserved 4 bits of the
640: * VSID for it so that's how we get it.
641: */
642: static vaddr_t
1.2 matt 643: pmap_pte_to_va(volatile const struct pte *pt)
1.1 matt 644: {
645: vaddr_t va;
646: uintptr_t ptaddr = (uintptr_t) pt;
647:
648: if (pt->pte_hi & PTE_HID)
1.2 matt 649: ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
1.1 matt 650:
1.18 matt 651: /* PPC Bits 10-19 PPC64 Bits 42-51 */
1.53 garbled 652: #if defined(PMAP_OEA)
1.4 matt 653: va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
1.53 garbled 654: #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
1.38 sanjayl 655: va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
656: #endif
1.1 matt 657: va <<= ADDR_PIDX_SHFT;
658:
1.18 matt 659: /* PPC Bits 4-9 PPC64 Bits 36-41 */
1.1 matt 660: va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
661:
1.53 garbled 662: #if defined(PMAP_OEA64)
1.18 matt 663: /* PPC63 Bits 0-35 */
664: /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
1.53 garbled 665: #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
1.1 matt 666: /* PPC Bits 0-3 */
667: va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
1.18 matt 668: #endif
1.1 matt 669:
670: return va;
671: }
672: #endif
673:
1.35 perry 674: static inline struct pvo_head *
1.1 matt 675: pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
676: {
677: struct vm_page *pg;
1.72 uebayasi 678: struct vm_page_md *md;
1.1 matt 679:
680: pg = PHYS_TO_VM_PAGE(pa);
681: if (pg_p != NULL)
682: *pg_p = pg;
683: if (pg == NULL)
684: return &pmap_pvo_unmanaged;
1.72 uebayasi 685: md = VM_PAGE_TO_MD(pg);
686: return &md->mdpg_pvoh;
1.1 matt 687: }
688:
1.35 perry 689: static inline struct pvo_head *
1.1 matt 690: vm_page_to_pvoh(struct vm_page *pg)
691: {
1.72 uebayasi 692: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
693:
694: return &md->mdpg_pvoh;
1.1 matt 695: }
696:
697:
1.35 perry 698: static inline void
1.1 matt 699: pmap_attr_clear(struct vm_page *pg, int ptebit)
700: {
1.72 uebayasi 701: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
702:
703: md->mdpg_attrs &= ~ptebit;
1.1 matt 704: }
705:
1.35 perry 706: static inline int
1.1 matt 707: pmap_attr_fetch(struct vm_page *pg)
708: {
1.72 uebayasi 709: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
710:
711: return md->mdpg_attrs;
1.1 matt 712: }
713:
1.35 perry 714: static inline void
1.1 matt 715: pmap_attr_save(struct vm_page *pg, int ptebit)
716: {
1.72 uebayasi 717: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
718:
719: md->mdpg_attrs |= ptebit;
1.1 matt 720: }
721:
1.35 perry 722: static inline int
1.2 matt 723: pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
1.1 matt 724: {
725: if (pt->pte_hi == pvo_pt->pte_hi
726: #if 0
727: && ((pt->pte_lo ^ pvo_pt->pte_lo) &
728: ~(PTE_REF|PTE_CHG)) == 0
729: #endif
730: )
731: return 1;
732: return 0;
733: }
734:
1.35 perry 735: static inline void
1.2 matt 736: pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
1.1 matt 737: {
738: /*
739: * Construct the PTE. Default to IMB initially. Valid bit
740: * only gets set when the real pte is set in memory.
741: *
742: * Note: Don't set the valid bit for correct operation of tlb update.
743: */
1.53 garbled 744: #if defined(PMAP_OEA)
1.2 matt 745: pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
746: | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
1.1 matt 747: pt->pte_lo = pte_lo;
1.53 garbled 748: #elif defined (PMAP_OEA64_BRIDGE)
1.38 sanjayl 749: pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
750: | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
751: pt->pte_lo = (u_int64_t) pte_lo;
1.53 garbled 752: #elif defined (PMAP_OEA64)
753: #error PMAP_OEA64 not supported
754: #endif /* PMAP_OEA */
1.1 matt 755: }
756:
1.35 perry 757: static inline void
1.2 matt 758: pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
1.1 matt 759: {
760: pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
761: }
762:
1.35 perry 763: static inline void
1.2 matt 764: pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
1.1 matt 765: {
766: /*
767: * As shown in Section 7.6.3.2.3
768: */
769: pt->pte_lo &= ~ptebit;
770: TLBIE(va);
771: SYNC();
772: EIEIO();
773: TLBSYNC();
774: SYNC();
1.57 matt 775: #ifdef MULTIPROCESSOR
776: DCBST(pt);
777: #endif
1.1 matt 778: }
779:
1.35 perry 780: static inline void
1.2 matt 781: pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
1.1 matt 782: {
783: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
784: if (pvo_pt->pte_hi & PTE_VALID)
785: panic("pte_set: setting an already valid pte %p", pvo_pt);
786: #endif
787: pvo_pt->pte_hi |= PTE_VALID;
1.38 sanjayl 788:
1.1 matt 789: /*
790: * Update the PTE as defined in section 7.6.3.1
791: * Note that the REF/CHG bits are from pvo_pt and thus should
792: * have been saved so this routine can restore them (if desired).
793: */
794: pt->pte_lo = pvo_pt->pte_lo;
795: EIEIO();
796: pt->pte_hi = pvo_pt->pte_hi;
1.38 sanjayl 797: TLBSYNC();
1.1 matt 798: SYNC();
1.57 matt 799: #ifdef MULTIPROCESSOR
800: DCBST(pt);
801: #endif
1.1 matt 802: pmap_pte_valid++;
803: }
804:
1.35 perry 805: static inline void
1.2 matt 806: pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
1.1 matt 807: {
808: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
809: if ((pvo_pt->pte_hi & PTE_VALID) == 0)
810: panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
811: if ((pt->pte_hi & PTE_VALID) == 0)
812: panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
813: #endif
814:
815: pvo_pt->pte_hi &= ~PTE_VALID;
816: /*
817: * Force the ref & chg bits back into the PTEs.
818: */
819: SYNC();
820: /*
821: * Invalidate the pte ... (Section 7.6.3.3)
822: */
823: pt->pte_hi &= ~PTE_VALID;
824: SYNC();
825: TLBIE(va);
826: SYNC();
827: EIEIO();
828: TLBSYNC();
829: SYNC();
830: /*
831: * Save the ref & chg bits ...
832: */
833: pmap_pte_synch(pt, pvo_pt);
834: pmap_pte_valid--;
835: }
836:
1.35 perry 837: static inline void
1.2 matt 838: pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
1.1 matt 839: {
840: /*
841: * Invalidate the PTE
842: */
843: pmap_pte_unset(pt, pvo_pt, va);
844: pmap_pte_set(pt, pvo_pt);
845: }
846:
847: /*
848: * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
849: * (either primary or secondary location).
850: *
851: * Note: both the destination and source PTEs must not have PTE_VALID set.
852: */
853:
1.53 garbled 854: static int
1.2 matt 855: pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
1.1 matt 856: {
1.2 matt 857: volatile struct pte *pt;
1.1 matt 858: int i;
859:
860: #if defined(DEBUG)
1.54 mlelstv 861: DPRINTFN(PTE, ("pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n",
1.53 garbled 862: ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo));
1.1 matt 863: #endif
864: /*
865: * First try primary hash.
866: */
867: for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
868: if ((pt->pte_hi & PTE_VALID) == 0) {
869: pvo_pt->pte_hi &= ~PTE_HID;
870: pmap_pte_set(pt, pvo_pt);
871: return i;
872: }
873: }
874:
875: /*
876: * Now try secondary hash.
877: */
878: ptegidx ^= pmap_pteg_mask;
879: for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
880: if ((pt->pte_hi & PTE_VALID) == 0) {
881: pvo_pt->pte_hi |= PTE_HID;
882: pmap_pte_set(pt, pvo_pt);
883: return i;
884: }
885: }
886: return -1;
887: }
888:
889: /*
890: * Spill handler.
891: *
892: * Tries to spill a page table entry from the overflow area.
893: * This runs in either real mode (if dealing with a exception spill)
894: * or virtual mode when dealing with manually spilling one of the
895: * kernel's pte entries. In either case, interrupts are already
896: * disabled.
897: */
1.14 chs 898:
1.1 matt 899: int
1.44 thorpej 900: pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
1.1 matt 901: {
902: struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
903: struct pvo_entry *pvo;
1.15 dyoung 904: /* XXX: gcc -- vpvoh is always set at either *1* or *2* */
905: struct pvo_tqhead *pvoh, *vpvoh = NULL;
1.1 matt 906: int ptegidx, i, j;
1.2 matt 907: volatile struct pteg *pteg;
908: volatile struct pte *pt;
1.1 matt 909:
1.50 ad 910: PMAP_LOCK();
911:
1.2 matt 912: ptegidx = va_to_pteg(pm, addr);
1.1 matt 913:
914: /*
915: * Have to substitute some entry. Use the primary hash for this.
1.12 matt 916: * Use low bits of timebase as random generator. Make sure we are
917: * not picking a kernel pte for replacement.
1.1 matt 918: */
919: pteg = &pmap_pteg_table[ptegidx];
920: i = MFTB() & 7;
1.12 matt 921: for (j = 0; j < 8; j++) {
922: pt = &pteg->pt[i];
1.53 garbled 923: if ((pt->pte_hi & PTE_VALID) == 0)
924: break;
925: if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
926: < PHYSMAP_VSIDBITS)
1.12 matt 927: break;
928: i = (i + 1) & 7;
929: }
930: KASSERT(j < 8);
1.1 matt 931:
932: source_pvo = NULL;
933: victim_pvo = NULL;
934: pvoh = &pmap_pvo_table[ptegidx];
935: TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
936:
937: /*
938: * We need to find pvo entry for this address...
939: */
940: PMAP_PVO_CHECK(pvo); /* sanity check */
941:
942: /*
943: * If we haven't found the source and we come to a PVO with
944: * a valid PTE, then we know we can't find it because all
945: * evicted PVOs always are first in the list.
946: */
947: if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
948: break;
1.2 matt 949: if (source_pvo == NULL && pm == pvo->pvo_pmap &&
950: addr == PVO_VADDR(pvo)) {
1.1 matt 951:
952: /*
953: * Now we have found the entry to be spilled into the
954: * pteg. Attempt to insert it into the page table.
955: */
956: j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
957: if (j >= 0) {
958: PVO_PTEGIDX_SET(pvo, j);
959: PMAP_PVO_CHECK(pvo); /* sanity check */
1.12 matt 960: PVO_WHERE(pvo, SPILL_INSERT);
1.1 matt 961: pvo->pvo_pmap->pm_evictions--;
962: PMAPCOUNT(ptes_spilled);
963: PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
964: ? pmap_evcnt_ptes_secondary
965: : pmap_evcnt_ptes_primary)[j]);
966:
967: /*
968: * Since we keep the evicted entries at the
969: * from of the PVO list, we need move this
970: * (now resident) PVO after the evicted
971: * entries.
972: */
973: next_pvo = TAILQ_NEXT(pvo, pvo_olink);
974:
975: /*
1.5 matt 976: * If we don't have to move (either we were the
977: * last entry or the next entry was valid),
1.1 matt 978: * don't change our position. Otherwise
979: * move ourselves to the tail of the queue.
980: */
981: if (next_pvo != NULL &&
982: !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
983: TAILQ_REMOVE(pvoh, pvo, pvo_olink);
984: TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
985: }
1.50 ad 986: PMAP_UNLOCK();
1.1 matt 987: return 1;
988: }
989: source_pvo = pvo;
1.39 matt 990: if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
1.14 chs 991: return 0;
992: }
1.1 matt 993: if (victim_pvo != NULL)
994: break;
995: }
996:
997: /*
998: * We also need the pvo entry of the victim we are replacing
999: * so save the R & C bits of the PTE.
1000: */
1001: if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
1002: pmap_pte_compare(pt, &pvo->pvo_pte)) {
1.15 dyoung 1003: vpvoh = pvoh; /* *1* */
1.1 matt 1004: victim_pvo = pvo;
1005: if (source_pvo != NULL)
1006: break;
1007: }
1008: }
1009:
1010: if (source_pvo == NULL) {
1011: PMAPCOUNT(ptes_unspilled);
1.50 ad 1012: PMAP_UNLOCK();
1.1 matt 1013: return 0;
1014: }
1015:
1016: if (victim_pvo == NULL) {
1017: if ((pt->pte_hi & PTE_HID) == 0)
1018: panic("pmap_pte_spill: victim p-pte (%p) has "
1019: "no pvo entry!", pt);
1020:
1021: /*
1022: * If this is a secondary PTE, we need to search
1023: * its primary pvo bucket for the matching PVO.
1024: */
1.15 dyoung 1025: vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
1.1 matt 1026: TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
1027: PMAP_PVO_CHECK(pvo); /* sanity check */
1028:
1029: /*
1030: * We also need the pvo entry of the victim we are
1031: * replacing so save the R & C bits of the PTE.
1032: */
1033: if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
1034: victim_pvo = pvo;
1035: break;
1036: }
1037: }
1038: if (victim_pvo == NULL)
1039: panic("pmap_pte_spill: victim s-pte (%p) has "
1040: "no pvo entry!", pt);
1041: }
1042:
1043: /*
1.12 matt 1044: * The victim should be not be a kernel PVO/PTE entry.
1045: */
1046: KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
1047: KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
1048: KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
1049:
1050: /*
1.1 matt 1051: * We are invalidating the TLB entry for the EA for the
1052: * we are replacing even though its valid; If we don't
1053: * we lose any ref/chg bit changes contained in the TLB
1054: * entry.
1055: */
1056: source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
1057:
1058: /*
1059: * To enforce the PVO list ordering constraint that all
1060: * evicted entries should come before all valid entries,
1061: * move the source PVO to the tail of its list and the
1062: * victim PVO to the head of its list (which might not be
1063: * the same list, if the victim was using the secondary hash).
1064: */
1065: TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
1066: TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
1067: TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
1068: TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
1069: pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
1070: pmap_pte_set(pt, &source_pvo->pvo_pte);
1071: victim_pvo->pvo_pmap->pm_evictions++;
1072: source_pvo->pvo_pmap->pm_evictions--;
1.12 matt 1073: PVO_WHERE(victim_pvo, SPILL_UNSET);
1074: PVO_WHERE(source_pvo, SPILL_SET);
1.1 matt 1075:
1076: PVO_PTEGIDX_CLR(victim_pvo);
1077: PVO_PTEGIDX_SET(source_pvo, i);
1078: PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
1079: PMAPCOUNT(ptes_spilled);
1080: PMAPCOUNT(ptes_evicted);
1081: PMAPCOUNT(ptes_removed);
1082:
1083: PMAP_PVO_CHECK(victim_pvo);
1084: PMAP_PVO_CHECK(source_pvo);
1.50 ad 1085:
1086: PMAP_UNLOCK();
1.1 matt 1087: return 1;
1088: }
1089:
1090: /*
1091: * Restrict given range to physical memory
1092: */
1093: void
1094: pmap_real_memory(paddr_t *start, psize_t *size)
1095: {
1096: struct mem_region *mp;
1097:
1098: for (mp = mem; mp->size; mp++) {
1099: if (*start + *size > mp->start
1100: && *start < mp->start + mp->size) {
1101: if (*start < mp->start) {
1102: *size -= mp->start - *start;
1103: *start = mp->start;
1104: }
1105: if (*start + *size > mp->start + mp->size)
1106: *size = mp->start + mp->size - *start;
1107: return;
1108: }
1109: }
1110: *size = 0;
1111: }
1112:
1113: /*
1114: * Initialize anything else for pmap handling.
1115: * Called during vm_init().
1116: */
1117: void
1118: pmap_init(void)
1119: {
1120: pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
1121: sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
1.48 ad 1122: &pmap_pool_mallocator, IPL_NONE);
1.1 matt 1123:
1124: pool_setlowat(&pmap_mpvo_pool, 1008);
1125:
1126: pmap_initialized = 1;
1127:
1128: }
1129:
1130: /*
1.10 thorpej 1131: * How much virtual space does the kernel get?
1132: */
1133: void
1134: pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1135: {
1136: /*
1137: * For now, reserve one segment (minus some overhead) for kernel
1138: * virtual memory
1139: */
1140: *start = VM_MIN_KERNEL_ADDRESS;
1141: *end = VM_MAX_KERNEL_ADDRESS;
1142: }
1143:
1144: /*
1.1 matt 1145: * Allocate, initialize, and return a new physical map.
1146: */
1147: pmap_t
1148: pmap_create(void)
1149: {
1150: pmap_t pm;
1.38 sanjayl 1151:
1.1 matt 1152: pm = pool_get(&pmap_pool, PR_WAITOK);
1.46 christos 1153: memset((void *)pm, 0, sizeof *pm);
1.1 matt 1154: pmap_pinit(pm);
1155:
1156: DPRINTFN(CREATE,("pmap_create: pm %p:\n"
1.54 mlelstv 1157: "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1158: " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n"
1159: "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1160: " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n",
1161: pm,
1162: pm->pm_sr[0], pm->pm_sr[1],
1163: pm->pm_sr[2], pm->pm_sr[3],
1164: pm->pm_sr[4], pm->pm_sr[5],
1165: pm->pm_sr[6], pm->pm_sr[7],
1166: pm->pm_sr[8], pm->pm_sr[9],
1167: pm->pm_sr[10], pm->pm_sr[11],
1168: pm->pm_sr[12], pm->pm_sr[13],
1169: pm->pm_sr[14], pm->pm_sr[15]));
1.1 matt 1170: return pm;
1171: }
1172:
1173: /*
1174: * Initialize a preallocated and zeroed pmap structure.
1175: */
1176: void
1177: pmap_pinit(pmap_t pm)
1178: {
1.2 matt 1179: register_t entropy = MFTB();
1180: register_t mask;
1181: int i;
1.1 matt 1182:
1183: /*
1184: * Allocate some segment registers for this pmap.
1185: */
1186: pm->pm_refs = 1;
1.50 ad 1187: PMAP_LOCK();
1.2 matt 1188: for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1189: static register_t pmap_vsidcontext;
1190: register_t hash;
1191: unsigned int n;
1.1 matt 1192:
1193: /* Create a new value by multiplying by a prime adding in
1194: * entropy from the timebase register. This is to make the
1195: * VSID more random so that the PT Hash function collides
1196: * less often. (note that the prime causes gcc to do shifts
1197: * instead of a multiply)
1198: */
1199: pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1200: hash = pmap_vsidcontext & (NPMAPS - 1);
1.23 aymeric 1201: if (hash == 0) { /* 0 is special, avoid it */
1202: entropy += 0xbadf00d;
1.1 matt 1203: continue;
1.23 aymeric 1204: }
1.1 matt 1205: n = hash >> 5;
1.2 matt 1206: mask = 1L << (hash & (VSID_NBPW-1));
1207: hash = pmap_vsidcontext;
1.1 matt 1208: if (pmap_vsid_bitmap[n] & mask) { /* collision? */
1209: /* anything free in this bucket? */
1.2 matt 1210: if (~pmap_vsid_bitmap[n] == 0) {
1.23 aymeric 1211: entropy = hash ^ (hash >> 16);
1.1 matt 1212: continue;
1213: }
1214: i = ffs(~pmap_vsid_bitmap[n]) - 1;
1.2 matt 1215: mask = 1L << i;
1216: hash &= ~(VSID_NBPW-1);
1.1 matt 1217: hash |= i;
1218: }
1.18 matt 1219: hash &= PTE_VSID >> PTE_VSID_SHFT;
1.1 matt 1220: pmap_vsid_bitmap[n] |= mask;
1.18 matt 1221: pm->pm_vsid = hash;
1.53 garbled 1222: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.1 matt 1223: for (i = 0; i < 16; i++)
1.14 chs 1224: pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
1225: SR_NOEXEC;
1.18 matt 1226: #endif
1.50 ad 1227: PMAP_UNLOCK();
1.1 matt 1228: return;
1229: }
1.50 ad 1230: PMAP_UNLOCK();
1.1 matt 1231: panic("pmap_pinit: out of segments");
1232: }
1233:
1234: /*
1235: * Add a reference to the given pmap.
1236: */
1237: void
1238: pmap_reference(pmap_t pm)
1239: {
1.50 ad 1240: atomic_inc_uint(&pm->pm_refs);
1.1 matt 1241: }
1242:
1243: /*
1244: * Retire the given pmap from service.
1245: * Should only be called if the map contains no valid mappings.
1246: */
1247: void
1248: pmap_destroy(pmap_t pm)
1249: {
1.50 ad 1250: if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
1.1 matt 1251: pmap_release(pm);
1252: pool_put(&pmap_pool, pm);
1253: }
1254: }
1255:
1256: /*
1257: * Release any resources held by the given physical map.
1258: * Called when a pmap initialized by pmap_pinit is being released.
1259: */
1260: void
1261: pmap_release(pmap_t pm)
1262: {
1263: int idx, mask;
1.39 matt 1264:
1265: KASSERT(pm->pm_stats.resident_count == 0);
1266: KASSERT(pm->pm_stats.wired_count == 0);
1.1 matt 1267:
1.50 ad 1268: PMAP_LOCK();
1.1 matt 1269: if (pm->pm_sr[0] == 0)
1270: panic("pmap_release");
1.22 aymeric 1271: idx = pm->pm_vsid & (NPMAPS-1);
1.1 matt 1272: mask = 1 << (idx % VSID_NBPW);
1273: idx /= VSID_NBPW;
1.22 aymeric 1274:
1275: KASSERT(pmap_vsid_bitmap[idx] & mask);
1.1 matt 1276: pmap_vsid_bitmap[idx] &= ~mask;
1.50 ad 1277: PMAP_UNLOCK();
1.1 matt 1278: }
1279:
1280: /*
1281: * Copy the range specified by src_addr/len
1282: * from the source map to the range dst_addr/len
1283: * in the destination map.
1284: *
1285: * This routine is only advisory and need not do anything.
1286: */
1287: void
1288: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1289: vsize_t len, vaddr_t src_addr)
1290: {
1291: PMAPCOUNT(copies);
1292: }
1293:
1294: /*
1295: * Require that all active physical maps contain no
1296: * incorrect entries NOW.
1297: */
1298: void
1299: pmap_update(struct pmap *pmap)
1300: {
1301: PMAPCOUNT(updates);
1302: TLBSYNC();
1303: }
1304:
1.35 perry 1305: static inline int
1.1 matt 1306: pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1307: {
1308: int pteidx;
1309: /*
1310: * We can find the actual pte entry without searching by
1311: * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1312: * and by noticing the HID bit.
1313: */
1314: pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1315: if (pvo->pvo_pte.pte_hi & PTE_HID)
1316: pteidx ^= pmap_pteg_mask * 8;
1317: return pteidx;
1318: }
1319:
1.2 matt 1320: volatile struct pte *
1.1 matt 1321: pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1322: {
1.2 matt 1323: volatile struct pte *pt;
1.1 matt 1324:
1325: #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1326: if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1327: return NULL;
1328: #endif
1329:
1330: /*
1331: * If we haven't been supplied the ptegidx, calculate it.
1332: */
1333: if (pteidx == -1) {
1334: int ptegidx;
1.2 matt 1335: ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1.1 matt 1336: pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1337: }
1338:
1339: pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1340:
1341: #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1342: return pt;
1343: #else
1344: if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1345: panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1346: "pvo but no valid pte index", pvo);
1347: }
1348: if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1349: panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1350: "pvo but no valid pte", pvo);
1351: }
1352:
1353: if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1354: if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1355: #if defined(DEBUG) || defined(PMAPCHECK)
1356: pmap_pte_print(pt);
1357: #endif
1358: panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1359: "pmap_pteg_table %p but invalid in pvo",
1360: pvo, pt);
1361: }
1362: if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1363: #if defined(DEBUG) || defined(PMAPCHECK)
1364: pmap_pte_print(pt);
1365: #endif
1366: panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1367: "not match pte %p in pmap_pteg_table",
1368: pvo, pt);
1369: }
1370: return pt;
1371: }
1372:
1373: if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1374: #if defined(DEBUG) || defined(PMAPCHECK)
1375: pmap_pte_print(pt);
1376: #endif
1.12 matt 1377: panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
1.1 matt 1378: "pmap_pteg_table but valid in pvo", pvo, pt);
1379: }
1380: return NULL;
1381: #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1382: }
1383:
1384: struct pvo_entry *
1385: pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1386: {
1387: struct pvo_entry *pvo;
1388: int ptegidx;
1389:
1390: va &= ~ADDR_POFF;
1.2 matt 1391: ptegidx = va_to_pteg(pm, va);
1.1 matt 1392:
1393: TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1394: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1395: if ((uintptr_t) pvo >= SEGMENT_LENGTH)
1396: panic("pmap_pvo_find_va: invalid pvo %p on "
1397: "list %#x (%p)", pvo, ptegidx,
1398: &pmap_pvo_table[ptegidx]);
1399: #endif
1400: if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1401: if (pteidx_p)
1402: *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1403: return pvo;
1404: }
1405: }
1.38 sanjayl 1406: if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
1.54 mlelstv 1407: panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n",
1.53 garbled 1408: __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
1.1 matt 1409: return NULL;
1410: }
1411:
1412: #if defined(DEBUG) || defined(PMAPCHECK)
1413: void
1414: pmap_pvo_check(const struct pvo_entry *pvo)
1415: {
1416: struct pvo_head *pvo_head;
1417: struct pvo_entry *pvo0;
1.2 matt 1418: volatile struct pte *pt;
1.1 matt 1419: int failed = 0;
1420:
1.50 ad 1421: PMAP_LOCK();
1422:
1.1 matt 1423: if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
1424: panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1425:
1426: if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
1427: printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1428: pvo, pvo->pvo_pmap);
1429: failed = 1;
1430: }
1431:
1432: if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
1433: (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1434: printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1435: pvo, TAILQ_NEXT(pvo, pvo_olink));
1436: failed = 1;
1437: }
1438:
1439: if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
1440: (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1441: printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1442: pvo, LIST_NEXT(pvo, pvo_vlink));
1443: failed = 1;
1444: }
1445:
1.39 matt 1446: if (PVO_MANAGED_P(pvo)) {
1.1 matt 1447: pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1448: } else {
1449: if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
1450: printf("pmap_pvo_check: pvo %p: non kernel address "
1451: "on kernel unmanaged list\n", pvo);
1452: failed = 1;
1453: }
1454: pvo_head = &pmap_pvo_kunmanaged;
1455: }
1456: LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1457: if (pvo0 == pvo)
1458: break;
1459: }
1460: if (pvo0 == NULL) {
1461: printf("pmap_pvo_check: pvo %p: not present "
1462: "on its vlist head %p\n", pvo, pvo_head);
1463: failed = 1;
1464: }
1465: if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1466: printf("pmap_pvo_check: pvo %p: not present "
1467: "on its olist head\n", pvo);
1468: failed = 1;
1469: }
1470: pt = pmap_pvo_to_pte(pvo, -1);
1471: if (pt == NULL) {
1472: if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1473: printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1474: "no PTE\n", pvo);
1475: failed = 1;
1476: }
1477: } else {
1478: if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1479: (uintptr_t) pt >=
1480: (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1481: printf("pmap_pvo_check: pvo %p: pte %p not in "
1482: "pteg table\n", pvo, pt);
1483: failed = 1;
1484: }
1485: if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1486: printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1487: "no PTE\n", pvo);
1488: failed = 1;
1489: }
1490: if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1491: printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1.54 mlelstv 1492: "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1493: pvo->pvo_pte.pte_hi,
1494: pt->pte_hi);
1.1 matt 1495: failed = 1;
1496: }
1497: if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1498: (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1499: printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1.54 mlelstv 1500: "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1501: (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
1502: (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
1.1 matt 1503: failed = 1;
1504: }
1505: if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1.53 garbled 1506: printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva ""
1507: " doesn't not match PVO's VA %#" _PRIxva "\n",
1.1 matt 1508: pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1509: failed = 1;
1510: }
1511: if (failed)
1512: pmap_pte_print(pt);
1513: }
1514: if (failed)
1515: panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1516: pvo->pvo_pmap);
1.50 ad 1517:
1518: PMAP_UNLOCK();
1.1 matt 1519: }
1520: #endif /* DEBUG || PMAPCHECK */
1521:
1522: /*
1.25 chs 1523: * Search the PVO table looking for a non-wired entry.
1524: * If we find one, remove it and return it.
1525: */
1526:
1527: struct pvo_entry *
1528: pmap_pvo_reclaim(struct pmap *pm)
1529: {
1530: struct pvo_tqhead *pvoh;
1531: struct pvo_entry *pvo;
1532: uint32_t idx, endidx;
1533:
1534: endidx = pmap_pvo_reclaim_nextidx;
1535: for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
1536: idx = (idx + 1) & pmap_pteg_mask) {
1537: pvoh = &pmap_pvo_table[idx];
1538: TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
1.39 matt 1539: if (!PVO_WIRED_P(pvo)) {
1.33 chs 1540: pmap_pvo_remove(pvo, -1, NULL);
1.25 chs 1541: pmap_pvo_reclaim_nextidx = idx;
1.26 matt 1542: PMAPCOUNT(pvos_reclaimed);
1.25 chs 1543: return pvo;
1544: }
1545: }
1546: }
1547: return NULL;
1548: }
1549:
1550: /*
1.1 matt 1551: * This returns whether this is the first mapping of a page.
1552: */
1553: int
1554: pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1.2 matt 1555: vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1.1 matt 1556: {
1557: struct pvo_entry *pvo;
1558: struct pvo_tqhead *pvoh;
1.2 matt 1559: register_t msr;
1.1 matt 1560: int ptegidx;
1561: int i;
1562: int poolflags = PR_NOWAIT;
1563:
1.28 chs 1564: /*
1565: * Compute the PTE Group index.
1566: */
1567: va &= ~ADDR_POFF;
1568: ptegidx = va_to_pteg(pm, va);
1569:
1570: msr = pmap_interrupts_off();
1571:
1.1 matt 1572: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1573: if (pmap_pvo_remove_depth > 0)
1574: panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1575: if (++pmap_pvo_enter_depth > 1)
1576: panic("pmap_pvo_enter: called recursively!");
1577: #endif
1578:
1579: /*
1580: * Remove any existing mapping for this page. Reuse the
1581: * pvo entry if there a mapping.
1582: */
1583: TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1584: if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1585: #ifdef DEBUG
1586: if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1587: ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1588: ~(PTE_REF|PTE_CHG)) == 0 &&
1589: va < VM_MIN_KERNEL_ADDRESS) {
1.56 phx 1590: printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n",
1.54 mlelstv 1591: pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1.56 phx 1592: printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n",
1.54 mlelstv 1593: pvo->pvo_pte.pte_hi,
1594: pm->pm_sr[va >> ADDR_SR_SHFT]);
1.1 matt 1595: pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1596: #ifdef DDBX
1597: Debugger();
1598: #endif
1599: }
1600: #endif
1601: PMAPCOUNT(mappings_replaced);
1.33 chs 1602: pmap_pvo_remove(pvo, -1, NULL);
1.1 matt 1603: break;
1604: }
1605: }
1606:
1607: /*
1608: * If we aren't overwriting an mapping, try to allocate
1609: */
1.26 matt 1610: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1611: --pmap_pvo_enter_depth;
1612: #endif
1.1 matt 1613: pmap_interrupts_restore(msr);
1.33 chs 1614: if (pvo) {
1615: pmap_pvo_free(pvo);
1616: }
1.1 matt 1617: pvo = pool_get(pl, poolflags);
1.25 chs 1618:
1619: #ifdef DEBUG
1620: /*
1621: * Exercise pmap_pvo_reclaim() a little.
1622: */
1623: if (pvo && (flags & PMAP_CANFAIL) != 0 &&
1624: pmap_pvo_reclaim_debugctr++ > 0x1000 &&
1625: (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
1626: pool_put(pl, pvo);
1627: pvo = NULL;
1628: }
1629: #endif
1630:
1.1 matt 1631: msr = pmap_interrupts_off();
1.26 matt 1632: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1633: ++pmap_pvo_enter_depth;
1634: #endif
1.1 matt 1635: if (pvo == NULL) {
1636: pvo = pmap_pvo_reclaim(pm);
1637: if (pvo == NULL) {
1638: if ((flags & PMAP_CANFAIL) == 0)
1639: panic("pmap_pvo_enter: failed");
1640: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1641: pmap_pvo_enter_depth--;
1642: #endif
1.26 matt 1643: PMAPCOUNT(pvos_failed);
1.1 matt 1644: pmap_interrupts_restore(msr);
1645: return ENOMEM;
1646: }
1647: }
1.25 chs 1648:
1.1 matt 1649: pvo->pvo_vaddr = va;
1650: pvo->pvo_pmap = pm;
1651: pvo->pvo_vaddr &= ~ADDR_POFF;
1652: if (flags & VM_PROT_EXECUTE) {
1653: PMAPCOUNT(exec_mappings);
1.14 chs 1654: pvo_set_exec(pvo);
1.1 matt 1655: }
1656: if (flags & PMAP_WIRED)
1657: pvo->pvo_vaddr |= PVO_WIRED;
1658: if (pvo_head != &pmap_pvo_kunmanaged) {
1659: pvo->pvo_vaddr |= PVO_MANAGED;
1660: PMAPCOUNT(mappings);
1661: } else {
1662: PMAPCOUNT(kernel_mappings);
1663: }
1.2 matt 1664: pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1.1 matt 1665:
1666: LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1.39 matt 1667: if (PVO_WIRED_P(pvo))
1.1 matt 1668: pvo->pvo_pmap->pm_stats.wired_count++;
1669: pvo->pvo_pmap->pm_stats.resident_count++;
1670: #if defined(DEBUG)
1.38 sanjayl 1671: /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
1.1 matt 1672: DPRINTFN(PVOENTER,
1.53 garbled 1673: ("pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n",
1.1 matt 1674: pvo, pm, va, pa));
1675: #endif
1676:
1677: /*
1678: * We hope this succeeds but it isn't required.
1679: */
1680: pvoh = &pmap_pvo_table[ptegidx];
1681: i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1682: if (i >= 0) {
1683: PVO_PTEGIDX_SET(pvo, i);
1.12 matt 1684: PVO_WHERE(pvo, ENTER_INSERT);
1.1 matt 1685: PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1686: ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1687: TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1.38 sanjayl 1688:
1.1 matt 1689: } else {
1690: /*
1691: * Since we didn't have room for this entry (which makes it
1692: * and evicted entry), place it at the head of the list.
1693: */
1694: TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1695: PMAPCOUNT(ptes_evicted);
1696: pm->pm_evictions++;
1.12 matt 1697: /*
1698: * If this is a kernel page, make sure it's active.
1699: */
1700: if (pm == pmap_kernel()) {
1.45 thorpej 1701: i = pmap_pte_spill(pm, va, false);
1.12 matt 1702: KASSERT(i);
1703: }
1.1 matt 1704: }
1705: PMAP_PVO_CHECK(pvo); /* sanity check */
1706: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1707: pmap_pvo_enter_depth--;
1708: #endif
1709: pmap_interrupts_restore(msr);
1710: return 0;
1711: }
1712:
1.53 garbled 1713: static void
1.33 chs 1714: pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
1.1 matt 1715: {
1.2 matt 1716: volatile struct pte *pt;
1.1 matt 1717: int ptegidx;
1718:
1719: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1720: if (++pmap_pvo_remove_depth > 1)
1721: panic("pmap_pvo_remove: called recursively!");
1722: #endif
1723:
1724: /*
1725: * If we haven't been supplied the ptegidx, calculate it.
1726: */
1727: if (pteidx == -1) {
1.2 matt 1728: ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1.1 matt 1729: pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1730: } else {
1731: ptegidx = pteidx >> 3;
1732: if (pvo->pvo_pte.pte_hi & PTE_HID)
1733: ptegidx ^= pmap_pteg_mask;
1734: }
1735: PMAP_PVO_CHECK(pvo); /* sanity check */
1736:
1737: /*
1738: * If there is an active pte entry, we need to deactivate it
1739: * (and save the ref & chg bits).
1740: */
1741: pt = pmap_pvo_to_pte(pvo, pteidx);
1742: if (pt != NULL) {
1743: pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1.12 matt 1744: PVO_WHERE(pvo, REMOVE);
1.1 matt 1745: PVO_PTEGIDX_CLR(pvo);
1746: PMAPCOUNT(ptes_removed);
1747: } else {
1748: KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1749: pvo->pvo_pmap->pm_evictions--;
1750: }
1751:
1752: /*
1.14 chs 1753: * Account for executable mappings.
1754: */
1.39 matt 1755: if (PVO_EXECUTABLE_P(pvo))
1.14 chs 1756: pvo_clear_exec(pvo);
1757:
1758: /*
1759: * Update our statistics.
1.1 matt 1760: */
1761: pvo->pvo_pmap->pm_stats.resident_count--;
1.39 matt 1762: if (PVO_WIRED_P(pvo))
1.1 matt 1763: pvo->pvo_pmap->pm_stats.wired_count--;
1764:
1765: /*
1766: * Save the REF/CHG bits into their cache if the page is managed.
1767: */
1.39 matt 1768: if (PVO_MANAGED_P(pvo)) {
1.2 matt 1769: register_t ptelo = pvo->pvo_pte.pte_lo;
1.1 matt 1770: struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1771:
1772: if (pg != NULL) {
1.37 matt 1773: /*
1774: * If this page was changed and it is mapped exec,
1775: * invalidate it.
1776: */
1777: if ((ptelo & PTE_CHG) &&
1778: (pmap_attr_fetch(pg) & PTE_EXEC)) {
1779: struct pvo_head *pvoh = vm_page_to_pvoh(pg);
1780: if (LIST_EMPTY(pvoh)) {
1781: DPRINTFN(EXEC, ("[pmap_pvo_remove: "
1.53 garbled 1782: "%#" _PRIxpa ": clear-exec]\n",
1.37 matt 1783: VM_PAGE_TO_PHYS(pg)));
1784: pmap_attr_clear(pg, PTE_EXEC);
1785: PMAPCOUNT(exec_uncached_pvo_remove);
1786: } else {
1787: DPRINTFN(EXEC, ("[pmap_pvo_remove: "
1.53 garbled 1788: "%#" _PRIxpa ": syncicache]\n",
1.37 matt 1789: VM_PAGE_TO_PHYS(pg)));
1790: pmap_syncicache(VM_PAGE_TO_PHYS(pg),
1791: PAGE_SIZE);
1792: PMAPCOUNT(exec_synced_pvo_remove);
1793: }
1794: }
1795:
1.1 matt 1796: pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1797: }
1798: PMAPCOUNT(unmappings);
1799: } else {
1800: PMAPCOUNT(kernel_unmappings);
1801: }
1802:
1803: /*
1804: * Remove the PVO from its lists and return it to the pool.
1805: */
1806: LIST_REMOVE(pvo, pvo_vlink);
1807: TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1.33 chs 1808: if (pvol) {
1809: LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
1.25 chs 1810: }
1.1 matt 1811: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1812: pmap_pvo_remove_depth--;
1813: #endif
1814: }
1815:
1.33 chs 1816: void
1817: pmap_pvo_free(struct pvo_entry *pvo)
1818: {
1819:
1.39 matt 1820: pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
1.33 chs 1821: }
1822:
1823: void
1824: pmap_pvo_free_list(struct pvo_head *pvol)
1825: {
1826: struct pvo_entry *pvo, *npvo;
1827:
1828: for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
1829: npvo = LIST_NEXT(pvo, pvo_vlink);
1830: LIST_REMOVE(pvo, pvo_vlink);
1831: pmap_pvo_free(pvo);
1832: }
1833: }
1834:
1.1 matt 1835: /*
1.14 chs 1836: * Mark a mapping as executable.
1837: * If this is the first executable mapping in the segment,
1838: * clear the noexec flag.
1839: */
1.53 garbled 1840: static void
1.14 chs 1841: pvo_set_exec(struct pvo_entry *pvo)
1842: {
1843: struct pmap *pm = pvo->pvo_pmap;
1844:
1.39 matt 1845: if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
1.14 chs 1846: return;
1847: }
1848: pvo->pvo_vaddr |= PVO_EXECUTABLE;
1.53 garbled 1849: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.18 matt 1850: {
1851: int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1852: if (pm->pm_exec[sr]++ == 0) {
1853: pm->pm_sr[sr] &= ~SR_NOEXEC;
1854: }
1.14 chs 1855: }
1.18 matt 1856: #endif
1.14 chs 1857: }
1858:
1859: /*
1860: * Mark a mapping as non-executable.
1861: * If this was the last executable mapping in the segment,
1862: * set the noexec flag.
1863: */
1.53 garbled 1864: static void
1.14 chs 1865: pvo_clear_exec(struct pvo_entry *pvo)
1866: {
1867: struct pmap *pm = pvo->pvo_pmap;
1868:
1.39 matt 1869: if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
1.14 chs 1870: return;
1871: }
1872: pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1.53 garbled 1873: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.18 matt 1874: {
1875: int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1876: if (--pm->pm_exec[sr] == 0) {
1877: pm->pm_sr[sr] |= SR_NOEXEC;
1878: }
1.14 chs 1879: }
1.18 matt 1880: #endif
1.14 chs 1881: }
1882:
1883: /*
1.1 matt 1884: * Insert physical page at pa into the given pmap at virtual address va.
1885: */
1886: int
1.65 cegger 1887: pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 matt 1888: {
1889: struct mem_region *mp;
1890: struct pvo_head *pvo_head;
1891: struct vm_page *pg;
1892: struct pool *pl;
1.2 matt 1893: register_t pte_lo;
1.1 matt 1894: int error;
1895: u_int pvo_flags;
1896: u_int was_exec = 0;
1897:
1.50 ad 1898: PMAP_LOCK();
1899:
1.1 matt 1900: if (__predict_false(!pmap_initialized)) {
1901: pvo_head = &pmap_pvo_kunmanaged;
1902: pl = &pmap_upvo_pool;
1903: pvo_flags = 0;
1904: pg = NULL;
1905: was_exec = PTE_EXEC;
1906: } else {
1907: pvo_head = pa_to_pvoh(pa, &pg);
1908: pl = &pmap_mpvo_pool;
1909: pvo_flags = PVO_MANAGED;
1910: }
1911:
1912: DPRINTFN(ENTER,
1.54 mlelstv 1913: ("pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
1.1 matt 1914: pm, va, pa, prot, flags));
1915:
1916: /*
1917: * If this is a managed page, and it's the first reference to the
1918: * page clear the execness of the page. Otherwise fetch the execness.
1919: */
1920: if (pg != NULL)
1921: was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1922:
1923: DPRINTFN(ENTER, (" was_exec=%d", was_exec));
1924:
1925: /*
1926: * Assume the page is cache inhibited and access is guarded unless
1927: * it's in our available memory array. If it is in the memory array,
1928: * asssume it's in memory coherent memory.
1929: */
1.75.2.1 bouyer 1930: if (flags & PMAP_MD_PREFETCHABLE) {
1931: pte_lo = 0;
1932: } else
1933: pte_lo = PTE_G;
1934:
1.75 matt 1935: if ((flags & PMAP_MD_NOCACHE) == 0) {
1.1 matt 1936: for (mp = mem; mp->size; mp++) {
1937: if (pa >= mp->start && pa < mp->start + mp->size) {
1938: pte_lo = PTE_M;
1939: break;
1940: }
1941: }
1.75.2.1 bouyer 1942: } else {
1943: pte_lo |= PTE_I;
1.1 matt 1944: }
1945:
1946: if (prot & VM_PROT_WRITE)
1947: pte_lo |= PTE_BW;
1948: else
1949: pte_lo |= PTE_BR;
1950:
1951: /*
1952: * If this was in response to a fault, "pre-fault" the PTE's
1953: * changed/referenced bit appropriately.
1954: */
1955: if (flags & VM_PROT_WRITE)
1956: pte_lo |= PTE_CHG;
1.30 chs 1957: if (flags & VM_PROT_ALL)
1.1 matt 1958: pte_lo |= PTE_REF;
1959:
1960: /*
1961: * We need to know if this page can be executable
1962: */
1963: flags |= (prot & VM_PROT_EXECUTE);
1964:
1965: /*
1966: * Record mapping for later back-translation and pte spilling.
1967: * This will overwrite any existing mapping.
1968: */
1969: error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
1970:
1971: /*
1972: * Flush the real page from the instruction cache if this page is
1973: * mapped executable and cacheable and has not been flushed since
1974: * the last time it was modified.
1975: */
1976: if (error == 0 &&
1977: (flags & VM_PROT_EXECUTE) &&
1978: (pte_lo & PTE_I) == 0 &&
1979: was_exec == 0) {
1980: DPRINTFN(ENTER, (" syncicache"));
1981: PMAPCOUNT(exec_synced);
1.6 thorpej 1982: pmap_syncicache(pa, PAGE_SIZE);
1.1 matt 1983: if (pg != NULL) {
1984: pmap_attr_save(pg, PTE_EXEC);
1985: PMAPCOUNT(exec_cached);
1986: #if defined(DEBUG) || defined(PMAPDEBUG)
1987: if (pmapdebug & PMAPDEBUG_ENTER)
1988: printf(" marked-as-exec");
1989: else if (pmapdebug & PMAPDEBUG_EXEC)
1.53 garbled 1990: printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n",
1.34 yamt 1991: VM_PAGE_TO_PHYS(pg));
1.1 matt 1992:
1993: #endif
1994: }
1995: }
1996:
1997: DPRINTFN(ENTER, (": error=%d\n", error));
1998:
1.50 ad 1999: PMAP_UNLOCK();
2000:
1.1 matt 2001: return error;
2002: }
2003:
2004: void
1.68 cegger 2005: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 matt 2006: {
2007: struct mem_region *mp;
1.2 matt 2008: register_t pte_lo;
1.1 matt 2009: int error;
2010:
1.53 garbled 2011: #if defined (PMAP_OEA64_BRIDGE)
1.1 matt 2012: if (va < VM_MIN_KERNEL_ADDRESS)
2013: panic("pmap_kenter_pa: attempt to enter "
1.53 garbled 2014: "non-kernel address %#" _PRIxva "!", va);
1.38 sanjayl 2015: #endif
1.1 matt 2016:
2017: DPRINTFN(KENTER,
1.53 garbled 2018: ("pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot));
1.1 matt 2019:
1.50 ad 2020: PMAP_LOCK();
2021:
1.1 matt 2022: /*
2023: * Assume the page is cache inhibited and access is guarded unless
2024: * it's in our available memory array. If it is in the memory array,
2025: * asssume it's in memory coherent memory.
2026: */
2027: pte_lo = PTE_IG;
1.75 matt 2028: if ((flags & PMAP_MD_NOCACHE) == 0) {
1.4 matt 2029: for (mp = mem; mp->size; mp++) {
2030: if (pa >= mp->start && pa < mp->start + mp->size) {
2031: pte_lo = PTE_M;
2032: break;
2033: }
1.1 matt 2034: }
2035: }
2036:
2037: if (prot & VM_PROT_WRITE)
2038: pte_lo |= PTE_BW;
2039: else
2040: pte_lo |= PTE_BR;
2041:
2042: /*
2043: * We don't care about REF/CHG on PVOs on the unmanaged list.
2044: */
2045: error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
2046: &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
2047:
2048: if (error != 0)
1.53 garbled 2049: panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
1.1 matt 2050: va, pa, error);
1.50 ad 2051:
2052: PMAP_UNLOCK();
1.1 matt 2053: }
2054:
2055: void
2056: pmap_kremove(vaddr_t va, vsize_t len)
2057: {
2058: if (va < VM_MIN_KERNEL_ADDRESS)
2059: panic("pmap_kremove: attempt to remove "
1.53 garbled 2060: "non-kernel address %#" _PRIxva "!", va);
1.1 matt 2061:
1.53 garbled 2062: DPRINTFN(KREMOVE,("pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len));
1.1 matt 2063: pmap_remove(pmap_kernel(), va, va + len);
2064: }
2065:
2066: /*
2067: * Remove the given range of mapping entries.
2068: */
2069: void
2070: pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
2071: {
1.33 chs 2072: struct pvo_head pvol;
1.1 matt 2073: struct pvo_entry *pvo;
1.2 matt 2074: register_t msr;
1.1 matt 2075: int pteidx;
2076:
1.50 ad 2077: PMAP_LOCK();
1.33 chs 2078: LIST_INIT(&pvol);
1.14 chs 2079: msr = pmap_interrupts_off();
1.1 matt 2080: for (; va < endva; va += PAGE_SIZE) {
2081: pvo = pmap_pvo_find_va(pm, va, &pteidx);
2082: if (pvo != NULL) {
1.33 chs 2083: pmap_pvo_remove(pvo, pteidx, &pvol);
1.1 matt 2084: }
2085: }
1.14 chs 2086: pmap_interrupts_restore(msr);
1.33 chs 2087: pmap_pvo_free_list(&pvol);
1.50 ad 2088: PMAP_UNLOCK();
1.1 matt 2089: }
2090:
2091: /*
2092: * Get the physical page address for the given pmap/virtual address.
2093: */
1.44 thorpej 2094: bool
1.1 matt 2095: pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
2096: {
2097: struct pvo_entry *pvo;
1.2 matt 2098: register_t msr;
1.7 matt 2099:
1.50 ad 2100: PMAP_LOCK();
1.38 sanjayl 2101:
1.7 matt 2102: /*
2103: * If this is a kernel pmap lookup, also check the battable
2104: * and if we get a hit, translate the VA to a PA using the
1.36 nathanw 2105: * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is
1.7 matt 2106: * that will wrap back to 0.
2107: */
2108: if (pm == pmap_kernel() &&
2109: (va < VM_MIN_KERNEL_ADDRESS ||
2110: (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
1.8 matt 2111: KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
1.53 garbled 2112: #if defined (PMAP_OEA)
1.55 garbled 2113: #ifdef PPC_OEA601
2114: if ((MFPVR() >> 16) == MPC601) {
1.24 kleink 2115: register_t batu = battable[va >> 23].batu;
2116: register_t batl = battable[va >> 23].batl;
2117: register_t sr = iosrtable[va >> ADDR_SR_SHFT];
2118: if (BAT601_VALID_P(batl) &&
2119: BAT601_VA_MATCH_P(batu, batl, va)) {
2120: register_t mask =
2121: (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
1.29 briggs 2122: if (pap)
2123: *pap = (batl & mask) | (va & ~mask);
1.50 ad 2124: PMAP_UNLOCK();
1.45 thorpej 2125: return true;
1.24 kleink 2126: } else if (SR601_VALID_P(sr) &&
2127: SR601_PA_MATCH_P(sr, va)) {
1.29 briggs 2128: if (pap)
2129: *pap = va;
1.50 ad 2130: PMAP_UNLOCK();
1.45 thorpej 2131: return true;
1.24 kleink 2132: }
1.55 garbled 2133: } else
2134: #endif /* PPC_OEA601 */
2135: {
2136: register_t batu = battable[va >> ADDR_SR_SHFT].batu;
2137: if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
2138: register_t batl =
2139: battable[va >> ADDR_SR_SHFT].batl;
2140: register_t mask =
2141: (~(batu & BAT_BL) << 15) & ~0x1ffffL;
2142: if (pap)
2143: *pap = (batl & mask) | (va & ~mask);
2144: PMAP_UNLOCK();
2145: return true;
2146: }
1.7 matt 2147: }
1.45 thorpej 2148: return false;
1.53 garbled 2149: #elif defined (PMAP_OEA64_BRIDGE)
1.52 garbled 2150: if (va >= SEGMENT_LENGTH)
2151: panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n",
2152: __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
2153: else {
2154: if (pap)
2155: *pap = va;
2156: PMAP_UNLOCK();
2157: return true;
2158: }
1.53 garbled 2159: #elif defined (PMAP_OEA64)
1.38 sanjayl 2160: #error PPC_OEA64 not supported
2161: #endif /* PPC_OEA */
1.7 matt 2162: }
1.1 matt 2163:
2164: msr = pmap_interrupts_off();
2165: pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2166: if (pvo != NULL) {
2167: PMAP_PVO_CHECK(pvo); /* sanity check */
1.29 briggs 2168: if (pap)
2169: *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
2170: | (va & ADDR_POFF);
1.1 matt 2171: }
2172: pmap_interrupts_restore(msr);
1.50 ad 2173: PMAP_UNLOCK();
1.1 matt 2174: return pvo != NULL;
2175: }
2176:
2177: /*
2178: * Lower the protection on the specified range of this pmap.
2179: */
2180: void
2181: pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
2182: {
2183: struct pvo_entry *pvo;
1.2 matt 2184: volatile struct pte *pt;
2185: register_t msr;
1.1 matt 2186: int pteidx;
2187:
2188: /*
2189: * Since this routine only downgrades protection, we should
1.14 chs 2190: * always be called with at least one bit not set.
1.1 matt 2191: */
1.14 chs 2192: KASSERT(prot != VM_PROT_ALL);
1.1 matt 2193:
2194: /*
2195: * If there is no protection, this is equivalent to
2196: * remove the pmap from the pmap.
2197: */
2198: if ((prot & VM_PROT_READ) == 0) {
2199: pmap_remove(pm, va, endva);
2200: return;
2201: }
2202:
1.50 ad 2203: PMAP_LOCK();
2204:
1.1 matt 2205: msr = pmap_interrupts_off();
1.6 thorpej 2206: for (; va < endva; va += PAGE_SIZE) {
1.1 matt 2207: pvo = pmap_pvo_find_va(pm, va, &pteidx);
2208: if (pvo == NULL)
2209: continue;
2210: PMAP_PVO_CHECK(pvo); /* sanity check */
2211:
2212: /*
2213: * Revoke executable if asked to do so.
2214: */
2215: if ((prot & VM_PROT_EXECUTE) == 0)
1.14 chs 2216: pvo_clear_exec(pvo);
1.1 matt 2217:
2218: #if 0
2219: /*
2220: * If the page is already read-only, no change
2221: * needs to be made.
2222: */
2223: if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
2224: continue;
2225: #endif
2226: /*
2227: * Grab the PTE pointer before we diddle with
2228: * the cached PTE copy.
2229: */
2230: pt = pmap_pvo_to_pte(pvo, pteidx);
2231: /*
2232: * Change the protection of the page.
2233: */
2234: pvo->pvo_pte.pte_lo &= ~PTE_PP;
2235: pvo->pvo_pte.pte_lo |= PTE_BR;
2236:
2237: /*
2238: * If the PVO is in the page table, update
2239: * that pte at well.
2240: */
2241: if (pt != NULL) {
2242: pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1.12 matt 2243: PVO_WHERE(pvo, PMAP_PROTECT);
1.1 matt 2244: PMAPCOUNT(ptes_changed);
2245: }
2246:
2247: PMAP_PVO_CHECK(pvo); /* sanity check */
2248: }
2249: pmap_interrupts_restore(msr);
1.50 ad 2250: PMAP_UNLOCK();
1.1 matt 2251: }
2252:
2253: void
2254: pmap_unwire(pmap_t pm, vaddr_t va)
2255: {
2256: struct pvo_entry *pvo;
1.2 matt 2257: register_t msr;
1.1 matt 2258:
1.50 ad 2259: PMAP_LOCK();
1.1 matt 2260: msr = pmap_interrupts_off();
2261: pvo = pmap_pvo_find_va(pm, va, NULL);
2262: if (pvo != NULL) {
1.39 matt 2263: if (PVO_WIRED_P(pvo)) {
1.1 matt 2264: pvo->pvo_vaddr &= ~PVO_WIRED;
2265: pm->pm_stats.wired_count--;
2266: }
2267: PMAP_PVO_CHECK(pvo); /* sanity check */
2268: }
2269: pmap_interrupts_restore(msr);
1.50 ad 2270: PMAP_UNLOCK();
1.1 matt 2271: }
2272:
2273: /*
2274: * Lower the protection on the specified physical page.
2275: */
2276: void
2277: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2278: {
1.33 chs 2279: struct pvo_head *pvo_head, pvol;
1.1 matt 2280: struct pvo_entry *pvo, *next_pvo;
1.2 matt 2281: volatile struct pte *pt;
2282: register_t msr;
1.1 matt 2283:
1.50 ad 2284: PMAP_LOCK();
2285:
1.14 chs 2286: KASSERT(prot != VM_PROT_ALL);
1.33 chs 2287: LIST_INIT(&pvol);
1.1 matt 2288: msr = pmap_interrupts_off();
2289:
2290: /*
2291: * When UVM reuses a page, it does a pmap_page_protect with
2292: * VM_PROT_NONE. At that point, we can clear the exec flag
2293: * since we know the page will have different contents.
2294: */
2295: if ((prot & VM_PROT_READ) == 0) {
1.53 garbled 2296: DPRINTFN(EXEC, ("[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
1.34 yamt 2297: VM_PAGE_TO_PHYS(pg)));
1.1 matt 2298: if (pmap_attr_fetch(pg) & PTE_EXEC) {
2299: PMAPCOUNT(exec_uncached_page_protect);
2300: pmap_attr_clear(pg, PTE_EXEC);
2301: }
2302: }
2303:
2304: pvo_head = vm_page_to_pvoh(pg);
2305: for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2306: next_pvo = LIST_NEXT(pvo, pvo_vlink);
2307: PMAP_PVO_CHECK(pvo); /* sanity check */
2308:
2309: /*
2310: * Downgrading to no mapping at all, we just remove the entry.
2311: */
2312: if ((prot & VM_PROT_READ) == 0) {
1.33 chs 2313: pmap_pvo_remove(pvo, -1, &pvol);
1.1 matt 2314: continue;
2315: }
2316:
2317: /*
2318: * If EXEC permission is being revoked, just clear the
2319: * flag in the PVO.
2320: */
2321: if ((prot & VM_PROT_EXECUTE) == 0)
1.14 chs 2322: pvo_clear_exec(pvo);
1.1 matt 2323:
2324: /*
2325: * If this entry is already RO, don't diddle with the
2326: * page table.
2327: */
2328: if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2329: PMAP_PVO_CHECK(pvo);
2330: continue;
2331: }
2332:
2333: /*
2334: * Grab the PTE before the we diddle the bits so
2335: * pvo_to_pte can verify the pte contents are as
2336: * expected.
2337: */
2338: pt = pmap_pvo_to_pte(pvo, -1);
2339: pvo->pvo_pte.pte_lo &= ~PTE_PP;
2340: pvo->pvo_pte.pte_lo |= PTE_BR;
2341: if (pt != NULL) {
2342: pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1.12 matt 2343: PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
1.1 matt 2344: PMAPCOUNT(ptes_changed);
2345: }
2346: PMAP_PVO_CHECK(pvo); /* sanity check */
2347: }
2348: pmap_interrupts_restore(msr);
1.33 chs 2349: pmap_pvo_free_list(&pvol);
1.50 ad 2350:
2351: PMAP_UNLOCK();
1.1 matt 2352: }
2353:
2354: /*
2355: * Activate the address space for the specified process. If the process
2356: * is the current process, load the new MMU context.
2357: */
2358: void
2359: pmap_activate(struct lwp *l)
2360: {
1.69 rmind 2361: struct pcb *pcb = lwp_getpcb(l);
1.1 matt 2362: pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2363:
2364: DPRINTFN(ACTIVATE,
2365: ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
2366:
2367: /*
1.70 skrll 2368: * XXX Normally performed in cpu_lwp_fork().
1.1 matt 2369: */
1.13 matt 2370: pcb->pcb_pm = pmap;
1.17 matt 2371:
2372: /*
2373: * In theory, the SR registers need only be valid on return
2374: * to user space wait to do them there.
2375: */
2376: if (l == curlwp) {
2377: /* Store pointer to new current pmap. */
2378: curpm = pmap;
2379: }
1.1 matt 2380: }
2381:
2382: /*
2383: * Deactivate the specified process's address space.
2384: */
2385: void
2386: pmap_deactivate(struct lwp *l)
2387: {
2388: }
2389:
1.44 thorpej 2390: bool
1.1 matt 2391: pmap_query_bit(struct vm_page *pg, int ptebit)
2392: {
2393: struct pvo_entry *pvo;
1.2 matt 2394: volatile struct pte *pt;
2395: register_t msr;
1.1 matt 2396:
1.50 ad 2397: PMAP_LOCK();
2398:
2399: if (pmap_attr_fetch(pg) & ptebit) {
2400: PMAP_UNLOCK();
1.45 thorpej 2401: return true;
1.50 ad 2402: }
1.14 chs 2403:
1.1 matt 2404: msr = pmap_interrupts_off();
2405: LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2406: PMAP_PVO_CHECK(pvo); /* sanity check */
2407: /*
2408: * See if we saved the bit off. If so cache, it and return
2409: * success.
2410: */
2411: if (pvo->pvo_pte.pte_lo & ptebit) {
2412: pmap_attr_save(pg, ptebit);
2413: PMAP_PVO_CHECK(pvo); /* sanity check */
2414: pmap_interrupts_restore(msr);
1.50 ad 2415: PMAP_UNLOCK();
1.45 thorpej 2416: return true;
1.1 matt 2417: }
2418: }
2419: /*
2420: * No luck, now go thru the hard part of looking at the ptes
2421: * themselves. Sync so any pending REF/CHG bits are flushed
2422: * to the PTEs.
2423: */
2424: SYNC();
2425: LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2426: PMAP_PVO_CHECK(pvo); /* sanity check */
2427: /*
2428: * See if this pvo have a valid PTE. If so, fetch the
2429: * REF/CHG bits from the valid PTE. If the appropriate
2430: * ptebit is set, cache, it and return success.
2431: */
2432: pt = pmap_pvo_to_pte(pvo, -1);
2433: if (pt != NULL) {
2434: pmap_pte_synch(pt, &pvo->pvo_pte);
2435: if (pvo->pvo_pte.pte_lo & ptebit) {
2436: pmap_attr_save(pg, ptebit);
2437: PMAP_PVO_CHECK(pvo); /* sanity check */
2438: pmap_interrupts_restore(msr);
1.50 ad 2439: PMAP_UNLOCK();
1.45 thorpej 2440: return true;
1.1 matt 2441: }
2442: }
2443: }
2444: pmap_interrupts_restore(msr);
1.50 ad 2445: PMAP_UNLOCK();
1.45 thorpej 2446: return false;
1.1 matt 2447: }
2448:
1.44 thorpej 2449: bool
1.1 matt 2450: pmap_clear_bit(struct vm_page *pg, int ptebit)
2451: {
2452: struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2453: struct pvo_entry *pvo;
1.2 matt 2454: volatile struct pte *pt;
2455: register_t msr;
1.1 matt 2456: int rv = 0;
2457:
1.50 ad 2458: PMAP_LOCK();
1.1 matt 2459: msr = pmap_interrupts_off();
2460:
2461: /*
2462: * Fetch the cache value
2463: */
2464: rv |= pmap_attr_fetch(pg);
2465:
2466: /*
2467: * Clear the cached value.
2468: */
2469: pmap_attr_clear(pg, ptebit);
2470:
2471: /*
2472: * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2473: * can reset the right ones). Note that since the pvo entries and
2474: * list heads are accessed via BAT0 and are never placed in the
2475: * page table, we don't have to worry about further accesses setting
2476: * the REF/CHG bits.
2477: */
2478: SYNC();
2479:
2480: /*
2481: * For each pvo entry, clear pvo's ptebit. If this pvo have a
2482: * valid PTE. If so, clear the ptebit from the valid PTE.
2483: */
2484: LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2485: PMAP_PVO_CHECK(pvo); /* sanity check */
2486: pt = pmap_pvo_to_pte(pvo, -1);
2487: if (pt != NULL) {
2488: /*
2489: * Only sync the PTE if the bit we are looking
2490: * for is not already set.
2491: */
2492: if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2493: pmap_pte_synch(pt, &pvo->pvo_pte);
2494: /*
2495: * If the bit we are looking for was already set,
2496: * clear that bit in the pte.
2497: */
2498: if (pvo->pvo_pte.pte_lo & ptebit)
2499: pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2500: }
2501: rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2502: pvo->pvo_pte.pte_lo &= ~ptebit;
2503: PMAP_PVO_CHECK(pvo); /* sanity check */
2504: }
2505: pmap_interrupts_restore(msr);
1.14 chs 2506:
1.1 matt 2507: /*
2508: * If we are clearing the modify bit and this page was marked EXEC
2509: * and the user of the page thinks the page was modified, then we
2510: * need to clean it from the icache if it's mapped or clear the EXEC
2511: * bit if it's not mapped. The page itself might not have the CHG
2512: * bit set if the modification was done via DMA to the page.
2513: */
2514: if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2515: if (LIST_EMPTY(pvoh)) {
1.53 garbled 2516: DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n",
1.34 yamt 2517: VM_PAGE_TO_PHYS(pg)));
1.1 matt 2518: pmap_attr_clear(pg, PTE_EXEC);
2519: PMAPCOUNT(exec_uncached_clear_modify);
2520: } else {
1.53 garbled 2521: DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n",
1.34 yamt 2522: VM_PAGE_TO_PHYS(pg)));
2523: pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
1.1 matt 2524: PMAPCOUNT(exec_synced_clear_modify);
2525: }
2526: }
1.50 ad 2527: PMAP_UNLOCK();
1.1 matt 2528: return (rv & ptebit) != 0;
2529: }
2530:
2531: void
2532: pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2533: {
2534: struct pvo_entry *pvo;
2535: size_t offset = va & ADDR_POFF;
2536: int s;
2537:
1.50 ad 2538: PMAP_LOCK();
1.1 matt 2539: s = splvm();
2540: while (len > 0) {
1.6 thorpej 2541: size_t seglen = PAGE_SIZE - offset;
1.1 matt 2542: if (seglen > len)
2543: seglen = len;
2544: pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
1.39 matt 2545: if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
1.1 matt 2546: pmap_syncicache(
2547: (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2548: PMAP_PVO_CHECK(pvo);
2549: }
2550: va += seglen;
2551: len -= seglen;
2552: offset = 0;
2553: }
2554: splx(s);
1.50 ad 2555: PMAP_UNLOCK();
1.1 matt 2556: }
2557:
2558: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2559: void
1.2 matt 2560: pmap_pte_print(volatile struct pte *pt)
1.1 matt 2561: {
2562: printf("PTE %p: ", pt);
1.38 sanjayl 2563:
1.53 garbled 2564: #if defined(PMAP_OEA)
1.1 matt 2565: /* High word: */
1.54 mlelstv 2566: printf("%#" _PRIxpte ": [", pt->pte_hi);
1.53 garbled 2567: #else
1.54 mlelstv 2568: printf("%#" _PRIxpte ": [", pt->pte_hi);
1.53 garbled 2569: #endif /* PMAP_OEA */
1.38 sanjayl 2570:
1.1 matt 2571: printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2572: printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
1.38 sanjayl 2573:
1.54 mlelstv 2574: printf("%#" _PRIxpte " %#" _PRIxpte "",
1.38 sanjayl 2575: (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2576: pt->pte_hi & PTE_API);
1.53 garbled 2577: #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
1.54 mlelstv 2578: printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
1.38 sanjayl 2579: #else
1.54 mlelstv 2580: printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
1.53 garbled 2581: #endif /* PMAP_OEA */
1.38 sanjayl 2582:
1.1 matt 2583: /* Low word: */
1.53 garbled 2584: #if defined (PMAP_OEA)
1.54 mlelstv 2585: printf(" %#" _PRIxpte ": [", pt->pte_lo);
2586: printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
1.53 garbled 2587: #else
1.54 mlelstv 2588: printf(" %#" _PRIxpte ": [", pt->pte_lo);
2589: printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
1.38 sanjayl 2590: #endif
1.1 matt 2591: printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2592: printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2593: printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2594: printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2595: printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2596: printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2597: switch (pt->pte_lo & PTE_PP) {
2598: case PTE_BR: printf("br]\n"); break;
2599: case PTE_BW: printf("bw]\n"); break;
2600: case PTE_SO: printf("so]\n"); break;
2601: case PTE_SW: printf("sw]\n"); break;
2602: }
2603: }
2604: #endif
2605:
2606: #if defined(DDB)
2607: void
2608: pmap_pteg_check(void)
2609: {
1.2 matt 2610: volatile struct pte *pt;
1.1 matt 2611: int i;
2612: int ptegidx;
2613: u_int p_valid = 0;
2614: u_int s_valid = 0;
2615: u_int invalid = 0;
1.38 sanjayl 2616:
1.1 matt 2617: for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2618: for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2619: if (pt->pte_hi & PTE_VALID) {
2620: if (pt->pte_hi & PTE_HID)
2621: s_valid++;
2622: else
1.38 sanjayl 2623: {
1.1 matt 2624: p_valid++;
1.38 sanjayl 2625: }
1.1 matt 2626: } else
2627: invalid++;
2628: }
2629: }
2630: printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2631: p_valid, p_valid, s_valid, s_valid,
2632: invalid, invalid);
2633: }
2634:
2635: void
2636: pmap_print_mmuregs(void)
2637: {
2638: int i;
2639: u_int cpuvers;
1.53 garbled 2640: #ifndef PMAP_OEA64
1.1 matt 2641: vaddr_t addr;
1.2 matt 2642: register_t soft_sr[16];
1.18 matt 2643: #endif
1.53 garbled 2644: #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
1.1 matt 2645: struct bat soft_ibat[4];
2646: struct bat soft_dbat[4];
1.38 sanjayl 2647: #endif
1.53 garbled 2648: paddr_t sdr1;
1.1 matt 2649:
2650: cpuvers = MFPVR() >> 16;
1.35 perry 2651: __asm volatile ("mfsdr1 %0" : "=r"(sdr1));
1.53 garbled 2652: #ifndef PMAP_OEA64
1.16 kleink 2653: addr = 0;
1.27 chs 2654: for (i = 0; i < 16; i++) {
1.1 matt 2655: soft_sr[i] = MFSRIN(addr);
2656: addr += (1 << ADDR_SR_SHFT);
2657: }
1.18 matt 2658: #endif
1.1 matt 2659:
1.53 garbled 2660: #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
1.1 matt 2661: /* read iBAT (601: uBAT) registers */
1.35 perry 2662: __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2663: __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2664: __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2665: __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2666: __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2667: __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2668: __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2669: __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
1.1 matt 2670:
2671:
2672: if (cpuvers != MPC601) {
2673: /* read dBAT registers */
1.35 perry 2674: __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2675: __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2676: __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2677: __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2678: __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2679: __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2680: __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2681: __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
1.1 matt 2682: }
1.38 sanjayl 2683: #endif
1.1 matt 2684:
1.54 mlelstv 2685: printf("SDR1:\t%#" _PRIxpa "\n", sdr1);
1.53 garbled 2686: #ifndef PMAP_OEA64
1.1 matt 2687: printf("SR[]:\t");
1.27 chs 2688: for (i = 0; i < 4; i++)
1.53 garbled 2689: printf("0x%08lx, ", soft_sr[i]);
1.1 matt 2690: printf("\n\t");
1.27 chs 2691: for ( ; i < 8; i++)
1.53 garbled 2692: printf("0x%08lx, ", soft_sr[i]);
1.1 matt 2693: printf("\n\t");
1.27 chs 2694: for ( ; i < 12; i++)
1.53 garbled 2695: printf("0x%08lx, ", soft_sr[i]);
1.1 matt 2696: printf("\n\t");
1.27 chs 2697: for ( ; i < 16; i++)
1.53 garbled 2698: printf("0x%08lx, ", soft_sr[i]);
1.1 matt 2699: printf("\n");
1.18 matt 2700: #endif
1.1 matt 2701:
1.53 garbled 2702: #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE)
1.1 matt 2703: printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
1.27 chs 2704: for (i = 0; i < 4; i++) {
1.2 matt 2705: printf("0x%08lx 0x%08lx, ",
1.1 matt 2706: soft_ibat[i].batu, soft_ibat[i].batl);
2707: if (i == 1)
2708: printf("\n\t");
2709: }
2710: if (cpuvers != MPC601) {
2711: printf("\ndBAT[]:\t");
1.27 chs 2712: for (i = 0; i < 4; i++) {
1.2 matt 2713: printf("0x%08lx 0x%08lx, ",
1.1 matt 2714: soft_dbat[i].batu, soft_dbat[i].batl);
2715: if (i == 1)
2716: printf("\n\t");
2717: }
2718: }
2719: printf("\n");
1.53 garbled 2720: #endif /* PMAP_OEA... */
1.1 matt 2721: }
2722:
2723: void
2724: pmap_print_pte(pmap_t pm, vaddr_t va)
2725: {
2726: struct pvo_entry *pvo;
1.2 matt 2727: volatile struct pte *pt;
1.1 matt 2728: int pteidx;
2729:
2730: pvo = pmap_pvo_find_va(pm, va, &pteidx);
2731: if (pvo != NULL) {
2732: pt = pmap_pvo_to_pte(pvo, pteidx);
2733: if (pt != NULL) {
1.53 garbled 2734: printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n",
1.38 sanjayl 2735: va, pt,
2736: pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2737: pt->pte_hi, pt->pte_lo);
1.1 matt 2738: } else {
2739: printf("No valid PTE found\n");
2740: }
2741: } else {
2742: printf("Address not in pmap\n");
2743: }
2744: }
2745:
2746: void
2747: pmap_pteg_dist(void)
2748: {
2749: struct pvo_entry *pvo;
2750: int ptegidx;
2751: int depth;
2752: int max_depth = 0;
2753: unsigned int depths[64];
2754:
2755: memset(depths, 0, sizeof(depths));
2756: for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2757: depth = 0;
2758: TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2759: depth++;
2760: }
2761: if (depth > max_depth)
2762: max_depth = depth;
2763: if (depth > 63)
2764: depth = 63;
2765: depths[depth]++;
2766: }
2767:
2768: for (depth = 0; depth < 64; depth++) {
2769: printf(" [%2d]: %8u", depth, depths[depth]);
2770: if ((depth & 3) == 3)
2771: printf("\n");
2772: if (depth == max_depth)
2773: break;
2774: }
2775: if ((depth & 3) != 3)
2776: printf("\n");
2777: printf("Max depth found was %d\n", max_depth);
2778: }
2779: #endif /* DEBUG */
2780:
2781: #if defined(PMAPCHECK) || defined(DEBUG)
2782: void
2783: pmap_pvo_verify(void)
2784: {
2785: int ptegidx;
2786: int s;
2787:
2788: s = splvm();
2789: for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2790: struct pvo_entry *pvo;
2791: TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2792: if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2793: panic("pmap_pvo_verify: invalid pvo %p "
2794: "on list %#x", pvo, ptegidx);
2795: pmap_pvo_check(pvo);
2796: }
2797: }
2798: splx(s);
2799: }
2800: #endif /* PMAPCHECK */
2801:
2802:
2803: void *
2804: pmap_pool_ualloc(struct pool *pp, int flags)
2805: {
2806: struct pvo_page *pvop;
2807:
1.50 ad 2808: if (uvm.page_init_done != true) {
2809: return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2810: }
2811:
2812: PMAP_LOCK();
1.1 matt 2813: pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
2814: if (pvop != NULL) {
2815: pmap_upvop_free--;
2816: SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
1.50 ad 2817: PMAP_UNLOCK();
1.1 matt 2818: return pvop;
2819: }
1.50 ad 2820: PMAP_UNLOCK();
1.1 matt 2821: return pmap_pool_malloc(pp, flags);
2822: }
2823:
2824: void *
2825: pmap_pool_malloc(struct pool *pp, int flags)
2826: {
2827: struct pvo_page *pvop;
2828: struct vm_page *pg;
2829:
1.50 ad 2830: PMAP_LOCK();
1.1 matt 2831: pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
2832: if (pvop != NULL) {
2833: pmap_mpvop_free--;
2834: SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
1.50 ad 2835: PMAP_UNLOCK();
1.1 matt 2836: return pvop;
2837: }
1.50 ad 2838: PMAP_UNLOCK();
1.1 matt 2839: again:
2840: pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2841: UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2842: if (__predict_false(pg == NULL)) {
2843: if (flags & PR_WAITOK) {
2844: uvm_wait("plpg");
2845: goto again;
2846: } else {
2847: return (0);
2848: }
2849: }
1.53 garbled 2850: KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
2851: return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg);
1.1 matt 2852: }
2853:
2854: void
2855: pmap_pool_ufree(struct pool *pp, void *va)
2856: {
2857: struct pvo_page *pvop;
2858: #if 0
2859: if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
2860: pmap_pool_mfree(va, size, tag);
2861: return;
2862: }
2863: #endif
1.50 ad 2864: PMAP_LOCK();
1.1 matt 2865: pvop = va;
2866: SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
2867: pmap_upvop_free++;
2868: if (pmap_upvop_free > pmap_upvop_maxfree)
2869: pmap_upvop_maxfree = pmap_upvop_free;
1.50 ad 2870: PMAP_UNLOCK();
1.1 matt 2871: }
2872:
2873: void
2874: pmap_pool_mfree(struct pool *pp, void *va)
2875: {
2876: struct pvo_page *pvop;
2877:
1.50 ad 2878: PMAP_LOCK();
1.1 matt 2879: pvop = va;
2880: SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
2881: pmap_mpvop_free++;
2882: if (pmap_mpvop_free > pmap_mpvop_maxfree)
2883: pmap_mpvop_maxfree = pmap_mpvop_free;
1.50 ad 2884: PMAP_UNLOCK();
1.1 matt 2885: #if 0
2886: uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2887: #endif
2888: }
2889:
2890: /*
2891: * This routine in bootstraping to steal to-be-managed memory (which will
2892: * then be unmanaged). We use it to grab from the first 256MB for our
2893: * pmap needs and above 256MB for other stuff.
2894: */
2895: vaddr_t
1.10 thorpej 2896: pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
1.1 matt 2897: {
2898: vsize_t size;
2899: vaddr_t va;
2900: paddr_t pa = 0;
2901: int npgs, bank;
2902: struct vm_physseg *ps;
2903:
1.45 thorpej 2904: if (uvm.page_init_done == true)
1.1 matt 2905: panic("pmap_steal_memory: called _after_ bootstrap");
2906:
1.10 thorpej 2907: *vstartp = VM_MIN_KERNEL_ADDRESS;
2908: *vendp = VM_MAX_KERNEL_ADDRESS;
2909:
1.1 matt 2910: size = round_page(vsize);
2911: npgs = atop(size);
2912:
2913: /*
2914: * PA 0 will never be among those given to UVM so we can use it
2915: * to indicate we couldn't steal any memory.
2916: */
1.73 uebayasi 2917: for (bank = 0; bank < vm_nphysseg; bank++) {
2918: ps = VM_PHYSMEM_PTR(bank);
1.1 matt 2919: if (ps->free_list == VM_FREELIST_FIRST256 &&
2920: ps->avail_end - ps->avail_start >= npgs) {
2921: pa = ptoa(ps->avail_start);
2922: break;
2923: }
2924: }
2925:
2926: if (pa == 0)
2927: panic("pmap_steal_memory: no approriate memory to steal!");
2928:
2929: ps->avail_start += npgs;
2930: ps->start += npgs;
2931:
2932: /*
2933: * If we've used up all the pages in the segment, remove it and
2934: * compact the list.
2935: */
2936: if (ps->avail_start == ps->end) {
2937: /*
2938: * If this was the last one, then a very bad thing has occurred
2939: */
2940: if (--vm_nphysseg == 0)
2941: panic("pmap_steal_memory: out of memory!");
2942:
2943: printf("pmap_steal_memory: consumed bank %d\n", bank);
2944: for (; bank < vm_nphysseg; bank++, ps++) {
2945: ps[0] = ps[1];
2946: }
2947: }
2948:
2949: va = (vaddr_t) pa;
1.46 christos 2950: memset((void *) va, 0, size);
1.1 matt 2951: pmap_pages_stolen += npgs;
2952: #ifdef DEBUG
2953: if (pmapdebug && npgs > 1) {
2954: u_int cnt = 0;
1.73 uebayasi 2955: for (bank = 0; bank < vm_nphysseg; bank++) {
2956: ps = VM_PHYSMEM_PTR(bank);
1.1 matt 2957: cnt += ps->avail_end - ps->avail_start;
1.73 uebayasi 2958: }
1.1 matt 2959: printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2960: npgs, pmap_pages_stolen, cnt);
2961: }
2962: #endif
2963:
2964: return va;
2965: }
2966:
2967: /*
2968: * Find a chuck of memory with right size and alignment.
2969: */
1.53 garbled 2970: paddr_t
1.1 matt 2971: pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2972: {
2973: struct mem_region *mp;
2974: paddr_t s, e;
2975: int i, j;
2976:
2977: size = round_page(size);
2978:
2979: DPRINTFN(BOOT,
1.54 mlelstv 2980: ("pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d",
1.1 matt 2981: size, alignment, at_end));
2982:
1.6 thorpej 2983: if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
1.54 mlelstv 2984: panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa,
1.1 matt 2985: alignment);
2986:
2987: if (at_end) {
1.6 thorpej 2988: if (alignment != PAGE_SIZE)
1.1 matt 2989: panic("pmap_boot_find_memory: invalid ending "
1.53 garbled 2990: "alignment %#" _PRIxpa, alignment);
1.1 matt 2991:
2992: for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
2993: s = mp->start + mp->size - size;
2994: if (s >= mp->start && mp->size >= size) {
1.54 mlelstv 2995: DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s));
1.1 matt 2996: DPRINTFN(BOOT,
2997: ("pmap_boot_find_memory: b-avail[%d] start "
1.54 mlelstv 2998: "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
1.1 matt 2999: mp->start, mp->size));
3000: mp->size -= size;
3001: DPRINTFN(BOOT,
3002: ("pmap_boot_find_memory: a-avail[%d] start "
1.54 mlelstv 3003: "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
1.1 matt 3004: mp->start, mp->size));
1.53 garbled 3005: return s;
1.1 matt 3006: }
3007: }
3008: panic("pmap_boot_find_memory: no available memory");
3009: }
3010:
3011: for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3012: s = (mp->start + alignment - 1) & ~(alignment-1);
3013: e = s + size;
3014:
3015: /*
3016: * Is the calculated region entirely within the region?
3017: */
3018: if (s < mp->start || e > mp->start + mp->size)
3019: continue;
3020:
1.54 mlelstv 3021: DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s));
1.1 matt 3022: if (s == mp->start) {
3023: /*
3024: * If the block starts at the beginning of region,
3025: * adjust the size & start. (the region may now be
3026: * zero in length)
3027: */
3028: DPRINTFN(BOOT,
3029: ("pmap_boot_find_memory: b-avail[%d] start "
1.54 mlelstv 3030: "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
1.1 matt 3031: mp->start += size;
3032: mp->size -= size;
3033: DPRINTFN(BOOT,
3034: ("pmap_boot_find_memory: a-avail[%d] start "
1.54 mlelstv 3035: "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
1.1 matt 3036: } else if (e == mp->start + mp->size) {
3037: /*
3038: * If the block starts at the beginning of region,
3039: * adjust only the size.
3040: */
3041: DPRINTFN(BOOT,
3042: ("pmap_boot_find_memory: b-avail[%d] start "
1.54 mlelstv 3043: "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
1.1 matt 3044: mp->size -= size;
3045: DPRINTFN(BOOT,
3046: ("pmap_boot_find_memory: a-avail[%d] start "
1.54 mlelstv 3047: "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
1.1 matt 3048: } else {
3049: /*
3050: * Block is in the middle of the region, so we
3051: * have to split it in two.
3052: */
3053: for (j = avail_cnt; j > i + 1; j--) {
3054: avail[j] = avail[j-1];
3055: }
3056: DPRINTFN(BOOT,
3057: ("pmap_boot_find_memory: b-avail[%d] start "
1.54 mlelstv 3058: "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
1.1 matt 3059: mp[1].start = e;
3060: mp[1].size = mp[0].start + mp[0].size - e;
3061: mp[0].size = s - mp[0].start;
3062: avail_cnt++;
3063: for (; i < avail_cnt; i++) {
3064: DPRINTFN(BOOT,
3065: ("pmap_boot_find_memory: a-avail[%d] "
1.54 mlelstv 3066: "start %#" _PRIxpa " size %#" _PRIxpa "\n", i,
1.1 matt 3067: avail[i].start, avail[i].size));
3068: }
3069: }
1.53 garbled 3070: KASSERT(s == (uintptr_t) s);
3071: return s;
1.1 matt 3072: }
3073: panic("pmap_boot_find_memory: not enough memory for "
1.54 mlelstv 3074: "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment);
1.1 matt 3075: }
3076:
1.38 sanjayl 3077: /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
1.53 garbled 3078: #if defined (PMAP_OEA64_BRIDGE)
1.38 sanjayl 3079: int
3080: pmap_setup_segment0_map(int use_large_pages, ...)
3081: {
3082: vaddr_t va;
3083:
3084: register_t pte_lo = 0x0;
3085: int ptegidx = 0, i = 0;
3086: struct pte pte;
3087: va_list ap;
3088:
3089: /* Coherent + Supervisor RW, no user access */
3090: pte_lo = PTE_M;
3091:
3092: /* XXXSL
3093: * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
3094: * these have to take priority.
3095: */
3096: for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
3097: ptegidx = va_to_pteg(pmap_kernel(), va);
3098: pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
3099: i = pmap_pte_insert(ptegidx, &pte);
3100: }
3101:
3102: va_start(ap, use_large_pages);
3103: while (1) {
3104: paddr_t pa;
3105: size_t size;
3106:
3107: va = va_arg(ap, vaddr_t);
3108:
3109: if (va == 0)
3110: break;
3111:
3112: pa = va_arg(ap, paddr_t);
3113: size = va_arg(ap, size_t);
3114:
3115: for (; va < (va + size); va += 0x1000, pa += 0x1000) {
3116: #if 0
1.54 mlelstv 3117: printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa);
1.38 sanjayl 3118: #endif
3119: ptegidx = va_to_pteg(pmap_kernel(), va);
3120: pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
3121: i = pmap_pte_insert(ptegidx, &pte);
3122: }
3123: }
3124:
3125: TLBSYNC();
3126: SYNC();
3127: return (0);
3128: }
1.53 garbled 3129: #endif /* PMAP_OEA64_BRIDGE */
1.38 sanjayl 3130:
1.1 matt 3131: /*
3132: * This is not part of the defined PMAP interface and is specific to the
3133: * PowerPC architecture. This is called during initppc, before the system
3134: * is really initialized.
3135: */
3136: void
3137: pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
3138: {
3139: struct mem_region *mp, tmp;
3140: paddr_t s, e;
3141: psize_t size;
3142: int i, j;
3143:
3144: /*
3145: * Get memory.
3146: */
3147: mem_regions(&mem, &avail);
3148: #if defined(DEBUG)
3149: if (pmapdebug & PMAPDEBUG_BOOT) {
3150: printf("pmap_bootstrap: memory configuration:\n");
3151: for (mp = mem; mp->size; mp++) {
1.54 mlelstv 3152: printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n",
1.1 matt 3153: mp->start, mp->size);
3154: }
3155: for (mp = avail; mp->size; mp++) {
1.54 mlelstv 3156: printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n",
1.1 matt 3157: mp->start, mp->size);
3158: }
3159: }
3160: #endif
3161:
3162: /*
3163: * Find out how much physical memory we have and in how many chunks.
3164: */
3165: for (mem_cnt = 0, mp = mem; mp->size; mp++) {
3166: if (mp->start >= pmap_memlimit)
3167: continue;
3168: if (mp->start + mp->size > pmap_memlimit) {
3169: size = pmap_memlimit - mp->start;
3170: physmem += btoc(size);
3171: } else {
3172: physmem += btoc(mp->size);
3173: }
3174: mem_cnt++;
3175: }
3176:
3177: /*
3178: * Count the number of available entries.
3179: */
3180: for (avail_cnt = 0, mp = avail; mp->size; mp++)
3181: avail_cnt++;
3182:
3183: /*
3184: * Page align all regions.
3185: */
3186: kernelstart = trunc_page(kernelstart);
3187: kernelend = round_page(kernelend);
3188: for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3189: s = round_page(mp->start);
3190: mp->size -= (s - mp->start);
3191: mp->size = trunc_page(mp->size);
3192: mp->start = s;
3193: e = mp->start + mp->size;
3194:
3195: DPRINTFN(BOOT,
1.54 mlelstv 3196: ("pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
1.1 matt 3197: i, mp->start, mp->size));
3198:
3199: /*
3200: * Don't allow the end to run beyond our artificial limit
3201: */
3202: if (e > pmap_memlimit)
3203: e = pmap_memlimit;
3204:
3205: /*
3206: * Is this region empty or strange? skip it.
3207: */
3208: if (e <= s) {
3209: mp->start = 0;
3210: mp->size = 0;
3211: continue;
3212: }
3213:
3214: /*
3215: * Does this overlap the beginning of kernel?
3216: * Does extend past the end of the kernel?
3217: */
3218: else if (s < kernelstart && e > kernelstart) {
3219: if (e > kernelend) {
3220: avail[avail_cnt].start = kernelend;
3221: avail[avail_cnt].size = e - kernelend;
3222: avail_cnt++;
3223: }
3224: mp->size = kernelstart - s;
3225: }
3226: /*
3227: * Check whether this region overlaps the end of the kernel.
3228: */
3229: else if (s < kernelend && e > kernelend) {
3230: mp->start = kernelend;
3231: mp->size = e - kernelend;
3232: }
3233: /*
3234: * Look whether this regions is completely inside the kernel.
3235: * Nuke it if it does.
3236: */
3237: else if (s >= kernelstart && e <= kernelend) {
3238: mp->start = 0;
3239: mp->size = 0;
3240: }
3241: /*
3242: * If the user imposed a memory limit, enforce it.
3243: */
3244: else if (s >= pmap_memlimit) {
1.6 thorpej 3245: mp->start = -PAGE_SIZE; /* let's know why */
1.1 matt 3246: mp->size = 0;
3247: }
3248: else {
3249: mp->start = s;
3250: mp->size = e - s;
3251: }
3252: DPRINTFN(BOOT,
1.54 mlelstv 3253: ("pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
1.1 matt 3254: i, mp->start, mp->size));
3255: }
3256:
3257: /*
3258: * Move (and uncount) all the null return to the end.
3259: */
3260: for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3261: if (mp->size == 0) {
3262: tmp = avail[i];
3263: avail[i] = avail[--avail_cnt];
3264: avail[avail_cnt] = avail[i];
3265: }
3266: }
3267:
3268: /*
1.61 skrll 3269: * (Bubble)sort them into ascending order.
1.1 matt 3270: */
3271: for (i = 0; i < avail_cnt; i++) {
3272: for (j = i + 1; j < avail_cnt; j++) {
3273: if (avail[i].start > avail[j].start) {
3274: tmp = avail[i];
3275: avail[i] = avail[j];
3276: avail[j] = tmp;
3277: }
3278: }
3279: }
3280:
3281: /*
3282: * Make sure they don't overlap.
3283: */
3284: for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
3285: if (mp[0].start + mp[0].size > mp[1].start) {
3286: mp[0].size = mp[1].start - mp[0].start;
3287: }
3288: DPRINTFN(BOOT,
1.54 mlelstv 3289: ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
1.1 matt 3290: i, mp->start, mp->size));
3291: }
3292: DPRINTFN(BOOT,
1.54 mlelstv 3293: ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
1.1 matt 3294: i, mp->start, mp->size));
3295:
3296: #ifdef PTEGCOUNT
3297: pmap_pteg_cnt = PTEGCOUNT;
3298: #else /* PTEGCOUNT */
1.38 sanjayl 3299:
1.1 matt 3300: pmap_pteg_cnt = 0x1000;
3301:
3302: while (pmap_pteg_cnt < physmem)
3303: pmap_pteg_cnt <<= 1;
3304:
3305: pmap_pteg_cnt >>= 1;
3306: #endif /* PTEGCOUNT */
3307:
1.38 sanjayl 3308: #ifdef DEBUG
3309: DPRINTFN(BOOT,
3310: ("pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt));
3311: #endif
3312:
1.1 matt 3313: /*
3314: * Find suitably aligned memory for PTEG hash table.
3315: */
1.2 matt 3316: size = pmap_pteg_cnt * sizeof(struct pteg);
1.53 garbled 3317: pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0);
1.38 sanjayl 3318:
3319: #ifdef DEBUG
3320: DPRINTFN(BOOT,
3321: ("PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table));
3322: #endif
3323:
3324:
1.1 matt 3325: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3326: if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
1.54 mlelstv 3327: panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB",
1.1 matt 3328: pmap_pteg_table, size);
3329: #endif
3330:
1.32 he 3331: memset(__UNVOLATILE(pmap_pteg_table), 0,
3332: pmap_pteg_cnt * sizeof(struct pteg));
1.1 matt 3333: pmap_pteg_mask = pmap_pteg_cnt - 1;
3334:
3335: /*
3336: * We cannot do pmap_steal_memory here since UVM hasn't been loaded
3337: * with pages. So we just steal them before giving them to UVM.
3338: */
3339: size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
1.53 garbled 3340: pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0);
1.1 matt 3341: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3342: if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
1.54 mlelstv 3343: panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB",
1.1 matt 3344: pmap_pvo_table, size);
3345: #endif
3346:
3347: for (i = 0; i < pmap_pteg_cnt; i++)
3348: TAILQ_INIT(&pmap_pvo_table[i]);
3349:
3350: #ifndef MSGBUFADDR
3351: /*
3352: * Allocate msgbuf in high memory.
3353: */
1.53 garbled 3354: msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
1.1 matt 3355: #endif
3356:
3357: for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
3358: paddr_t pfstart = atop(mp->start);
3359: paddr_t pfend = atop(mp->start + mp->size);
3360: if (mp->size == 0)
3361: continue;
3362: if (mp->start + mp->size <= SEGMENT_LENGTH) {
3363: uvm_page_physload(pfstart, pfend, pfstart, pfend,
3364: VM_FREELIST_FIRST256);
3365: } else if (mp->start >= SEGMENT_LENGTH) {
3366: uvm_page_physload(pfstart, pfend, pfstart, pfend,
3367: VM_FREELIST_DEFAULT);
3368: } else {
3369: pfend = atop(SEGMENT_LENGTH);
3370: uvm_page_physload(pfstart, pfend, pfstart, pfend,
3371: VM_FREELIST_FIRST256);
3372: pfstart = atop(SEGMENT_LENGTH);
3373: pfend = atop(mp->start + mp->size);
3374: uvm_page_physload(pfstart, pfend, pfstart, pfend,
3375: VM_FREELIST_DEFAULT);
3376: }
3377: }
3378:
3379: /*
3380: * Make sure kernel vsid is allocated as well as VSID 0.
3381: */
3382: pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3383: |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
1.53 garbled 3384: pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3385: |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW);
1.1 matt 3386: pmap_vsid_bitmap[0] |= 1;
3387:
3388: /*
3389: * Initialize kernel pmap and hardware.
3390: */
1.38 sanjayl 3391:
1.53 garbled 3392: /* PMAP_OEA64_BRIDGE does support these instructions */
3393: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.1 matt 3394: for (i = 0; i < 16; i++) {
1.38 sanjayl 3395: pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
1.35 perry 3396: __asm volatile ("mtsrin %0,%1"
1.38 sanjayl 3397: :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT));
1.1 matt 3398: }
3399:
3400: pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
1.35 perry 3401: __asm volatile ("mtsr %0,%1"
1.1 matt 3402: :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
3403: #ifdef KERNEL2_SR
3404: pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
1.35 perry 3405: __asm volatile ("mtsr %0,%1"
1.1 matt 3406: :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
3407: #endif
1.53 garbled 3408: #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
3409: #if defined (PMAP_OEA)
1.1 matt 3410: for (i = 0; i < 16; i++) {
3411: if (iosrtable[i] & SR601_T) {
3412: pmap_kernel()->pm_sr[i] = iosrtable[i];
1.35 perry 3413: __asm volatile ("mtsrin %0,%1"
1.1 matt 3414: :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
3415: }
3416: }
1.35 perry 3417: __asm volatile ("sync; mtsdr1 %0; isync"
1.2 matt 3418: :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
1.53 garbled 3419: #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
1.38 sanjayl 3420: __asm __volatile ("sync; mtsdr1 %0; isync"
3421: :: "r"((uintptr_t)pmap_pteg_table | (32 - cntlzw(pmap_pteg_mask >> 11))));
3422: #endif
1.1 matt 3423: tlbia();
3424:
3425: #ifdef ALTIVEC
3426: pmap_use_altivec = cpu_altivec;
3427: #endif
3428:
3429: #ifdef DEBUG
3430: if (pmapdebug & PMAPDEBUG_BOOT) {
3431: u_int cnt;
3432: int bank;
3433: char pbuf[9];
3434: for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
1.73 uebayasi 3435: cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start;
1.53 garbled 3436: printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
1.1 matt 3437: bank,
1.73 uebayasi 3438: ptoa(VM_PHYSMEM_PTR(bank)->avail_start),
3439: ptoa(VM_PHYSMEM_PTR(bank)->avail_end),
3440: ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start));
1.1 matt 3441: }
3442: format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3443: printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3444: pbuf, cnt);
3445: }
3446: #endif
3447:
3448: pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
3449: sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
1.60 chs 3450: &pmap_pool_uallocator, IPL_VM);
1.1 matt 3451:
3452: pool_setlowat(&pmap_upvo_pool, 252);
3453:
3454: pool_init(&pmap_pool, sizeof(struct pmap),
1.48 ad 3455: sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator,
3456: IPL_NONE);
1.41 matt 3457:
1.53 garbled 3458: #if defined(PMAP_NEED_MAPKERNEL) || 1
1.41 matt 3459: {
1.53 garbled 3460: struct pmap *pm = pmap_kernel();
1.58 garbled 3461: #if defined(PMAP_NEED_FULL_MAPKERNEL)
1.41 matt 3462: extern int etext[], kernel_text[];
3463: vaddr_t va, va_etext = (paddr_t) etext;
1.53 garbled 3464: #endif
3465: paddr_t pa, pa_end;
1.42 matt 3466: register_t sr;
1.53 garbled 3467: struct pte pt;
3468: unsigned int ptegidx;
3469: int bank;
1.42 matt 3470:
1.53 garbled 3471: sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY;
3472: pm->pm_sr[0] = sr;
1.41 matt 3473:
1.53 garbled 3474: for (bank = 0; bank < vm_nphysseg; bank++) {
1.73 uebayasi 3475: pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
3476: pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
1.53 garbled 3477: for (; pa < pa_end; pa += PAGE_SIZE) {
3478: ptegidx = va_to_pteg(pm, pa);
3479: pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
3480: pmap_pte_insert(ptegidx, &pt);
3481: }
3482: }
3483:
1.58 garbled 3484: #if defined(PMAP_NEED_FULL_MAPKERNEL)
1.41 matt 3485: va = (vaddr_t) kernel_text;
3486:
3487: for (pa = kernelstart; va < va_etext;
1.53 garbled 3488: pa += PAGE_SIZE, va += PAGE_SIZE) {
3489: ptegidx = va_to_pteg(pm, va);
3490: pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3491: pmap_pte_insert(ptegidx, &pt);
3492: }
1.41 matt 3493:
3494: for (; pa < kernelend;
1.53 garbled 3495: pa += PAGE_SIZE, va += PAGE_SIZE) {
3496: ptegidx = va_to_pteg(pm, va);
3497: pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3498: pmap_pte_insert(ptegidx, &pt);
3499: }
3500:
1.58 garbled 3501: for (va = 0, pa = 0; va < kernelstart;
1.53 garbled 3502: pa += PAGE_SIZE, va += PAGE_SIZE) {
3503: ptegidx = va_to_pteg(pm, va);
1.58 garbled 3504: if (va < 0x3000)
3505: pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3506: else
3507: pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3508: pmap_pte_insert(ptegidx, &pt);
3509: }
3510: for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH;
3511: pa += PAGE_SIZE, va += PAGE_SIZE) {
3512: ptegidx = va_to_pteg(pm, va);
1.53 garbled 3513: pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3514: pmap_pte_insert(ptegidx, &pt);
3515: }
3516: #endif
1.42 matt 3517:
3518: __asm volatile ("mtsrin %0,%1"
3519: :: "r"(sr), "r"(kernelstart));
1.41 matt 3520: }
3521: #endif
1.1 matt 3522: }
CVSweb <webmaster@jp.NetBSD.org>