Annotation of src/sys/arch/hppa/hppa/pmap.c, Revision 1.22
1.22 ! skrll 1: /* $NetBSD: pmap.c,v 1.21 2006/08/24 07:00:46 skrll Exp $ */
1.1 fredette 2:
3: /*-
4: * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Matthew Fredette.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /* $OpenBSD: pmap.c,v 1.46 2001/07/25 13:25:31 art Exp $ */
40:
41: /*
42: * Copyright (c) 1998-2001 Michael Shalayeff
43: * All rights reserved.
44: *
45: * Redistribution and use in source and binary forms, with or without
46: * modification, are permitted provided that the following conditions
47: * are met:
48: * 1. Redistributions of source code must retain the above copyright
49: * notice, this list of conditions and the following disclaimer.
50: * 2. Redistributions in binary form must reproduce the above copyright
51: * notice, this list of conditions and the following disclaimer in the
52: * documentation and/or other materials provided with the distribution.
53: * 3. All advertising materials mentioning features or use of this software
54: * must display the following acknowledgement:
55: * This product includes software developed by Michael Shalayeff.
56: * 4. The name of the author may not be used to endorse or promote products
57: * derived from this software without specific prior written permission.
58: *
59: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
60: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
61: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
63: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
64: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
65: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
66: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
67: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
68: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
69: */
70: /*
71: * Copyright 1996 1995 by Open Software Foundation, Inc.
72: * All Rights Reserved
73: *
74: * Permission to use, copy, modify, and distribute this software and
75: * its documentation for any purpose and without fee is hereby granted,
76: * provided that the above copyright notice appears in all copies and
77: * that both the copyright notice and this permission notice appear in
78: * supporting documentation.
79: *
80: * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
81: * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
82: * FOR A PARTICULAR PURPOSE.
83: *
84: * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
85: * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
86: * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
87: * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
88: * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
89: */
90: /*
91: * Mach Operating System
92: * Copyright (c) 1990,1991,1992,1993,1994 The University of Utah and
93: * the Computer Systems Laboratory (CSL).
94: * Copyright (c) 1991,1987 Carnegie Mellon University.
95: * All rights reserved.
96: *
97: * Permission to use, copy, modify and distribute this software and its
98: * documentation is hereby granted, provided that both the copyright
99: * notice and this permission notice appear in all copies of the
100: * software, derivative works or modified versions, and any portions
101: * thereof, and that both notices appear in supporting documentation,
102: * and that all advertising materials mentioning features or use of
103: * this software display the following acknowledgement: ``This product
104: * includes software developed by the Computer Systems Laboratory at
105: * the University of Utah.''
106: *
107: * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
108: * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
109: * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
110: * THIS SOFTWARE.
111: *
112: * CSL requests users of this software to return to csl-dist@cs.utah.edu any
113: * improvements that they make and grant CSL redistribution rights.
114: *
115: * Carnegie Mellon requests users of this software to return to
116: * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
117: * School of Computer Science
118: * Carnegie Mellon University
119: * Pittsburgh PA 15213-3890
120: * any improvements or extensions that they make and grant Carnegie Mellon
121: * the rights to redistribute these changes.
122: *
123: * Utah $Hdr: pmap.c 1.49 94/12/15$
124: * Author: Mike Hibler, Bob Wheeler, University of Utah CSL, 10/90
125: */
126: /*
127: * Manages physical address maps for hppa.
128: *
129: * In addition to hardware address maps, this
130: * module is called upon to provide software-use-only
131: * maps which may or may not be stored in the same
132: * form as hardware maps. These pseudo-maps are
133: * used to store intermediate results from copy
134: * operations to and from address spaces.
135: *
136: * Since the information managed by this module is
137: * also stored by the logical address mapping module,
138: * this module may throw away valid virtual-to-physical
139: * mappings at almost any time. However, invalidations
140: * of virtual-to-physical mappings must be done as
141: * requested.
142: *
143: * In order to cope with hardware architectures which
144: * make virtual-to-physical map invalidates expensive,
145: * this module may delay invalidate or reduced protection
146: * operations until such time as they are actually
147: * necessary. This module is given full information to
148: * when physical maps must be made correct.
149: *
150: */
151: /*
152: * CAVEATS:
153: *
154: * Needs more work for MP support
155: * page maps are stored as linear linked lists, some
156: * improvement may be achieved should we use smth else
157: * protection id (pid) allocation should be done in a pid_t fashion
158: * (maybe just use the pid itself)
159: * some ppl say, block tlb entries should be maintained somewhere in uvm
160: * and be ready for reloads in the fault handler.
1.18 perry 161: * usage of inline grows the code size by 100%, but hopefully
1.1 fredette 162: * makes it faster as well, since the functions are actually
163: * very small.
164: * retail: 8.1k -> 15.1K
165: * debug: 12.2k -> 22.1K
166: *
167: * References:
168: * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
169: * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
170: *
171: */
1.9 lukem 172:
173: #include <sys/cdefs.h>
1.22 ! skrll 174: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.21 2006/08/24 07:00:46 skrll Exp $");
1.1 fredette 175:
176: #include <sys/param.h>
177: #include <sys/systm.h>
178: #include <sys/lock.h>
179: #include <sys/malloc.h>
180: #include <sys/user.h>
181: #include <sys/proc.h>
182:
183: #include <uvm/uvm.h>
184:
185: #include <machine/reg.h>
186: #include <machine/psl.h>
187: #include <machine/cpu.h>
188: #include <machine/pmap.h>
189: #include <machine/pte.h>
190: #include <machine/cpufunc.h>
191:
192: #include <hppa/hppa/hpt.h>
193: #include <hppa/hppa/machdep.h>
194:
195: #define static /**/
1.18 perry 196: #define inline /* */
1.1 fredette 197:
198: #ifdef PMAPDEBUG
199: #define PDB_INIT 0x00000002
200: #define PDB_ENTER 0x00000004
201: #define PDB_REMOVE 0x00000008
202: #define PDB_KENTER 0x00000010
203: #define PDB_PMAP 0x00000020
204: #define PDB_CACHE 0x00000040
205: #define PDB_BITS 0x00000080
206: #define PDB_EXTRACT 0x00000100
207: #define PDB_PROTECT 0x00000200
208: #define PDB_PV_ALLOC 0x00000400
209: #define PDB_PV_ENTER 0x00000800
210: #define PDB_PV_REMOVE 0x00001000
211: #define PDB_PV_FIND_VA 0x00002000
212: #define PDB_WIRING 0x00004000
213: #define PDB_ZERO 0x00008000
214: #define PDB_STEAL 0x00010000
215: #define PDB_COPY 0x00020000
216: int pmapdebug = 0
217: #if 1
218: | PDB_ENTER
219: | PDB_REMOVE
220: | PDB_KENTER
221: | PDB_BITS
222: | PDB_PROTECT
223: | PDB_EXTRACT
224: | PDB_WIRING
225: | PDB_ZERO
226: | PDB_STEAL
227: | PDB_COPY
228: #endif
229: ;
230: #define PMAP_PRINTF_MASK(m,v,x) do { \
231: if ((pmapdebug & (m)) == (v)) { \
232: printf("%s", __FUNCTION__); \
233: printf x; \
234: } \
235: } while(/* CONSTCOND */ 0)
236: #else
237: #define PMAP_PRINTF_MASK(m,v,x) do { } while(/* CONSTCOND */ 0)
238: #endif
239: #define PMAP_PRINTF(v,x) PMAP_PRINTF_MASK(v,v,x)
240:
1.8 thorpej 241: vaddr_t virtual_steal, virtual_start, virtual_end;
1.1 fredette 242:
243: /* These two virtual pages are available for copying and zeroing. */
244: static vaddr_t tmp_vpages[2];
245:
246: /* Free list of PV entries. */
247: static struct pv_entry *pv_free_list;
248:
1.3 fredette 249: /* This is an array of struct pv_head, one per physical page. */
250: static struct pv_head *pv_head_tbl;
1.1 fredette 251:
1.3 fredette 252: /*
253: * This is a bitmap of page-is-aliased bits.
254: * The magic 5 is log2(sizeof(u_int) * 8), and the magic 31 is 2^5 - 1.
1.1 fredette 255: */
1.3 fredette 256: static u_int *page_aliased_bitmap;
257: #define _PAGE_ALIASED_WORD(pa) page_aliased_bitmap[((pa) >> PGSHIFT) >> 5]
258: #define _PAGE_ALIASED_BIT(pa) (1 << (((pa) >> PGSHIFT) & 31))
259: #define PAGE_IS_ALIASED(pa) (_PAGE_ALIASED_WORD(pa) & _PAGE_ALIASED_BIT(pa))
1.1 fredette 260:
261: struct pmap kernel_pmap_store;
262: pmap_t kernel_pmap;
263: boolean_t pmap_initialized = FALSE;
264:
265: TAILQ_HEAD(, pmap) pmap_freelist; /* list of free pmaps */
266: u_int pmap_nfree;
267: struct simplelock pmap_freelock; /* and lock */
268:
269: struct simplelock pmap_lock; /* XXX this is all broken */
270: struct simplelock sid_pid_lock; /* pids */
271:
272: u_int pages_per_vm_page;
273: u_int pid_counter;
274:
275: #ifdef PMAPDEBUG
1.11 chs 276: void pmap_hptdump(void);
1.1 fredette 277: #endif
278:
279: u_int kern_prot[8], user_prot[8];
280:
281: vaddr_t hpt_base;
282: vsize_t hpt_mask;
283:
284: /*
1.3 fredette 285: * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set
286: * Reference Manual" (HP part number 09740-90039) defines equivalent
287: * and non-equivalent virtual addresses in the cache.
288: *
289: * This macro evaluates to TRUE iff the two space/virtual address
290: * combinations are non-equivalent aliases, and therefore will find
291: * two different locations in the cache.
292: *
293: * NB: currently, the CPU-specific desidhash() functions disable the
294: * use of the space in all cache hashing functions. This means that
295: * this macro definition is stricter than it has to be (because it
296: * takes space into account), but one day cache space hashing should
297: * be re-enabled. Cache space hashing should yield better performance
298: * through better utilization of the cache, assuming that most aliasing
299: * is the read-only kind, which we do allow in the cache.
300: */
301: #define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \
302: (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \
303: ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0))
1.1 fredette 304:
1.2 fredette 305: /* Prototypes. */
1.11 chs 306: void __pmap_pv_update(paddr_t, struct pv_entry *, u_int, u_int);
1.18 perry 307: static inline void pmap_pv_remove(struct pv_entry *);
1.2 fredette 308:
1.1 fredette 309: /*
310: * Given a directly-mapped region, this makes pv_entries out of it and
311: * adds them to the free list.
312: */
1.18 perry 313: static inline void pmap_pv_add(vaddr_t, vaddr_t);
314: static inline void
1.1 fredette 315: pmap_pv_add(vaddr_t pv_start, vaddr_t pv_end)
316: {
317: struct pv_entry *pv;
318: int s;
319:
320: /* Align pv_start, then add the new pv_entries. */
321: pv_start = (pv_start + sizeof(*pv) - 1) & ~(sizeof(*pv) - 1);
322: pv = (struct pv_entry *) pv_start;
323: s = splvm();
324: while ((vaddr_t)(pv + 1) <= pv_end) {
325: pv->pv_next = pv_free_list;
326: pv_free_list = pv;
327: pv++;
328: }
329: splx(s);
330:
1.19 skrll 331: PMAP_PRINTF(PDB_INIT, (": %ld pv_entries @ %x allocated\n",
1.1 fredette 332: (pv - (struct pv_entry *) pv_start), (u_int)pv_start));
333: }
334:
335: /*
336: * This allocates and returns a new struct pv_entry.
337: *
1.2 fredette 338: * If we run out of preallocated struct pv_entries, we have to forcibly
339: * free one. malloc() isn't an option, because a) we'll probably end
340: * up back here anyways when malloc() maps what it's trying to return to
341: * us, and b) even if malloc() did succeed, the TLB fault handlers run
342: * in physical mode and thus require that all pv_entries be directly
1.1 fredette 343: * mapped, a quality unlikely for malloc()-returned memory.
344: */
1.18 perry 345: static inline struct pv_entry *pmap_pv_alloc(void);
346: static inline struct pv_entry *
1.1 fredette 347: pmap_pv_alloc(void)
348: {
1.2 fredette 349: struct pv_entry *pv, *pv_fallback;
350: u_int hpt_index_first, hpt_index, hpt_size;
351: struct hpt_entry *hpt;
1.1 fredette 352:
353: pv = pv_free_list;
1.2 fredette 354: if (pv == NULL) {
355: /*
356: * We need to find a struct pv_entry to forcibly
1.3 fredette 357: * free. It cannot be wired. We prefer to free
358: * mappings that aren't marked as referenced. We
359: * search the HPT for an entry to free, starting
360: * at a semirandom HPT index determined by the
361: * current value of the interval timer.
1.2 fredette 362: */
363: hpt_size = hpt_mask / sizeof(*hpt);
364: mfctl(CR_ITMR, hpt_index_first);
365: hpt_index = hpt_index_first = hpt_index_first & hpt_size;
366: pv_fallback = NULL;
367: do {
368: hpt = ((struct hpt_entry *) hpt_base) + hpt_index;
369: for (pv = hpt->hpt_entry;
370: pv != NULL;
371: pv = pv->pv_hash) {
1.3 fredette 372: if (!(pv->pv_tlbprot & TLB_WIRED)) {
1.2 fredette 373: if (!(pv->pv_tlbprot & TLB_REF))
374: break;
375: pv_fallback = pv;
376: }
377: }
378: if (pv != NULL)
379: break;
380: if (pv_fallback != NULL) {
381: pv = pv_fallback;
382: break;
383: }
384: hpt_index = (hpt_index + 1) & hpt_size;
385: } while (hpt_index != hpt_index_first);
386:
387: /* Remove the mapping. */
388: if (pv != NULL) {
389: KASSERT(pv->pv_pmap->pmap_stats.resident_count > 0);
390: pv->pv_pmap->pmap_stats.resident_count--;
391: pmap_pv_remove(pv);
392: pv = pv_free_list;
393: }
394:
395: if (pv == NULL)
396: panic("out of pv_entries");
397:
398: }
1.1 fredette 399: pv_free_list = pv->pv_next;
400: pv->pv_next = NULL;
401:
402: PMAP_PRINTF(PDB_PV_ALLOC, ("() = %p\n", pv));
403: return pv;
404: }
405:
406: /*
407: * Given a struct pv_entry allocated by pmap_pv_alloc, this frees it.
408: */
1.18 perry 409: static inline void pmap_pv_free(struct pv_entry *);
410: static inline void
1.1 fredette 411: pmap_pv_free(struct pv_entry *pv)
412: {
413: PMAP_PRINTF(PDB_PV_ALLOC, ("(%p)\n", pv));
414:
415: pv->pv_next = pv_free_list;
416: pv_free_list = pv;
417: }
418:
419: /*
420: * Given a VA, this hashes it into an HPT index.
421: *
422: * This hash function is the one used by the hardware TLB filler on
423: * the 7100LC, to index the hardware page table (HPT), which is sort
424: * of a cache of TLB entries.
425: *
426: * On other CPUs, locore.S has a software TLB filler that does exactly
427: * the same thing, right down to using this same hash function.
428: *
429: * This HPT is also used as a general VA->PA mapping store, with
430: * struct pv_entry chains hanging off of the HPT entries.
431: */
1.18 perry 432: static inline struct hpt_entry *pmap_hpt_hash(pa_space_t, vaddr_t);
433: static inline struct hpt_entry *
1.1 fredette 434: pmap_hpt_hash(pa_space_t sp, vaddr_t va)
435: {
436: struct hpt_entry *hpt;
1.18 perry 437: __asm volatile (
1.1 fredette 438: "extru %2, 23, 20, %%r22\n\t" /* r22 = (va >> 8) */
439: "zdep %1, 22, 16, %%r23\n\t" /* r23 = (sp << 9) */
440: "dep %%r0, 31, 4, %%r22\n\t" /* r22 &= ~0xf */
441: "xor %%r22,%%r23, %%r23\n\t" /* r23 ^= r22 */
442: "mfctl %%cr24, %%r22\n\t" /* r22 = sizeof(HPT)-1 */
443: "and %%r22,%%r23, %%r23\n\t" /* r23 &= r22 */
444: "mfctl %%cr25, %%r22\n\t" /* r22 = addr of HPT table */
445: "or %%r23, %%r22, %0" /* %0 = HPT entry */
446: : "=r" (hpt) : "r" (sp), "r" (va) : "r22", "r23");
447: return hpt;
448: }
449:
450: /*
451: * Given a PA, returns the table offset for it.
452: */
1.18 perry 453: static inline int pmap_table_find_pa(paddr_t);
454: static inline int
1.1 fredette 455: pmap_table_find_pa(paddr_t pa)
456: {
1.2 fredette 457: int off;
1.1 fredette 458:
1.2 fredette 459: off = atop(pa);
460: return (off < totalphysmem) ? off : -1;
1.1 fredette 461: }
462:
463: /*
464: * Given a PA, returns the first mapping for it.
465: */
1.18 perry 466: static inline struct pv_entry *pmap_pv_find_pa(paddr_t);
467: static inline struct pv_entry *
1.1 fredette 468: pmap_pv_find_pa(paddr_t pa)
469: {
470: int table_off;
471:
472: table_off = pmap_table_find_pa(pa);
473: KASSERT(table_off >= 0);
1.3 fredette 474: return pv_head_tbl[table_off].pv_head_pvs;
1.1 fredette 475: }
476:
477: /*
478: * Given a VA, this finds any mapping for it.
479: */
1.18 perry 480: static inline struct pv_entry *pmap_pv_find_va(pa_space_t, vaddr_t);
481: static inline struct pv_entry *
1.1 fredette 482: pmap_pv_find_va(pa_space_t space, vaddr_t va)
483: {
484: struct pv_entry *pv = pmap_hpt_hash(space, va)->hpt_entry;
485:
486: while(pv && (pv->pv_va != va || pv->pv_space != space))
487: pv = pv->pv_hash;
488:
489: PMAP_PRINTF(PDB_PV_FIND_VA, ("(0x%x:%p) = %p\n",
490: space, (caddr_t)va, pv));
491: return pv;
492: }
493:
494: /*
1.3 fredette 495: * Given a page's PA, checks for non-equivalent aliasing,
496: * and stores and returns the result.
497: */
1.11 chs 498: static int pmap_pv_check_alias(paddr_t);
1.3 fredette 499: static int
500: pmap_pv_check_alias(paddr_t pa)
501: {
502: struct pv_entry *pv_outer, *pv;
503: pa_space_t space;
504: vaddr_t va;
505: int aliased;
506: u_int *aliased_word, aliased_bit;
507:
508: /* By default we find no aliasing. */
509: aliased = FALSE;
510:
511: /*
1.5 fredette 512: * We should never get called on I/O pages.
1.3 fredette 513: */
1.5 fredette 514: KASSERT(pa < HPPA_IOSPACE);
1.3 fredette 515:
516: /*
517: * Make an outer loop over the mappings, checking
518: * each following inner mapping for non-equivalent
519: * aliasing. If the non-equivalent alias relation
520: * is deemed transitive, this outer loop only needs
521: * one iteration.
522: */
1.5 fredette 523: for (pv_outer = pmap_pv_find_pa(pa);
1.3 fredette 524: pv_outer != NULL;
525: pv_outer = pv_outer->pv_next) {
526:
527: /* Load this outer mapping's space and address. */
528: space = pv_outer->pv_space;
529: va = pv_outer->pv_va;
530:
531: /* Do the inner loop. */
532: for (pv = pv_outer->pv_next;
533: pv != NULL;
534: pv = pv->pv_next) {
535: if (NON_EQUIVALENT_ALIAS(space, va,
536: pv->pv_space, pv->pv_va)) {
537: aliased = TRUE;
538: break;
539: }
540: }
541:
542: #ifndef NON_EQUIVALENT_ALIAS_TRANSITIVE
543: if (aliased)
544: #endif /* !NON_EQUIVALENT_ALIAS_TRANSITIVE */
545: break;
546: }
547:
548: /* Store and return the result. */
549: aliased_word = &_PAGE_ALIASED_WORD(pa);
550: aliased_bit = _PAGE_ALIASED_BIT(pa);
551: *aliased_word = (*aliased_word & ~aliased_bit) |
552: (aliased ? aliased_bit : 0);
553: return aliased;
554: }
555:
556: /*
557: * Given a VA->PA mapping and tlbprot bits to clear and set,
558: * this flushes the mapping from the TLB and cache, and changes
559: * the protection accordingly. This is used when a mapping is
560: * changing.
561: */
1.18 perry 562: static inline void _pmap_pv_update(paddr_t, struct pv_entry *, u_int, u_int);
563: static inline void
1.11 chs 564: _pmap_pv_update(paddr_t pa, struct pv_entry *pv, u_int tlbprot_clear,
565: u_int tlbprot_set)
1.3 fredette 566: {
567: struct pv_entry *ppv;
568: int no_rw_alias;
569:
570: /*
1.5 fredette 571: * We should never get called on I/O pages.
572: */
573: KASSERT(pa < HPPA_IOSPACE);
574:
575: /*
1.3 fredette 576: * If the TLB protection of this mapping is changing,
577: * check for a change in the no read-write alias state
578: * of the page.
579: */
580: KASSERT((tlbprot_clear & TLB_AR_MASK) == 0 ||
581: (tlbprot_clear & TLB_AR_MASK) == TLB_AR_MASK);
582: if (tlbprot_clear & TLB_AR_MASK) {
583:
584: /*
585: * Assume that no read-write aliasing
586: * exists. It does exist if this page is
587: * aliased and any mapping is writable.
588: */
589: no_rw_alias = TLB_NO_RW_ALIAS;
590: if (PAGE_IS_ALIASED(pa)) {
591: for (ppv = pmap_pv_find_pa(pa);
592: ppv != NULL;
593: ppv = ppv->pv_next) {
594: if (TLB_AR_WRITABLE(ppv == pv ?
595: tlbprot_set :
596: ppv->pv_tlbprot)) {
597: no_rw_alias = 0;
598: break;
599: }
600: }
601: }
602:
603: /* Note if the no read-write alias state has changed. */
604: if ((pv->pv_tlbprot & TLB_NO_RW_ALIAS) ^ no_rw_alias) {
605: tlbprot_clear |= TLB_NO_RW_ALIAS;
606: tlbprot_set |= no_rw_alias;
607: }
608: }
609:
610: /*
611: * Now call our asm helper function. At the very least,
612: * this will flush out the requested mapping and change
613: * its protection. If the changes touch any of TLB_REF,
614: * TLB_DIRTY, and TLB_NO_RW_ALIAS, all mappings of the
615: * page will be flushed and changed.
616: */
617: __pmap_pv_update(pa, pv, tlbprot_clear, tlbprot_set);
618: }
619: #define pmap_pv_update(pv, tc, ts) \
620: _pmap_pv_update(tlbptob((pv)->pv_tlbpage), pv, tc, ts)
621:
622: /*
1.1 fredette 623: * Given a pmap, a VA, a PA, and a TLB protection, this enters
624: * a new mapping and returns the new struct pv_entry.
625: */
1.18 perry 626: static inline struct pv_entry *pmap_pv_enter(pmap_t, pa_space_t, vaddr_t,
1.11 chs 627: paddr_t, u_int);
1.18 perry 628: static inline struct pv_entry *
1.11 chs 629: pmap_pv_enter(pmap_t pmap, pa_space_t space, vaddr_t va, paddr_t pa,
630: u_int tlbprot)
1.1 fredette 631: {
632: struct hpt_entry *hpt = pmap_hpt_hash(space, va);
633: int table_off;
1.3 fredette 634: struct pv_head *hpv;
1.5 fredette 635: struct pv_entry *pv, *pv_other;
1.1 fredette 636:
637: #ifdef DIAGNOSTIC
638: /* Make sure this VA isn't already entered. */
639: for (pv = hpt->hpt_entry; pv != NULL; pv = pv->pv_hash)
640: if (pv->pv_va == va && pv->pv_space == space)
641: panic("pmap_pv_enter: VA already entered");
642: #endif /* DIAGNOSTIC */
643:
644: /*
1.5 fredette 645: * Allocate a new pv_entry, fill it, and link it into the HPT.
1.3 fredette 646: */
1.1 fredette 647: pv = pmap_pv_alloc();
648: pv->pv_va = va;
649: pv->pv_pmap = pmap;
650: pv->pv_space = space;
651: pv->pv_tlbprot = tlbprot;
652: pv->pv_tlbpage = tlbbtop(pa);
1.3 fredette 653: pv->pv_hpt = hpt;
1.1 fredette 654: pv->pv_hash = hpt->hpt_entry;
655: hpt->hpt_entry = pv;
656:
1.5 fredette 657: /*
658: * If this mapping is for I/O space, mark the mapping
659: * uncacheable. (This is fine even on CPUs that don't
660: * support the U-bit; these CPUs don't cache references
661: * to I/O space.) Also mark this mapping as having
662: * no read/write aliasing, and we're done - we don't
663: * keep PA->VA lists for I/O space.
664: */
665: if (pa >= HPPA_IOSPACE) {
666: KASSERT(tlbprot & TLB_UNMANAGED);
667: pv->pv_tlbprot |= TLB_UNCACHEABLE | TLB_NO_RW_ALIAS;
668: return pv;
669: }
670:
671: /* Get the head of the PA->VA translation list. */
672: table_off = pmap_table_find_pa(pa);
673: KASSERT(table_off >= 0);
674: hpv = pv_head_tbl + table_off;
675:
676: #ifdef DIAGNOSTIC
677: /* Make sure this VA isn't already entered. */
678: for (pv_other = hpv->pv_head_pvs;
679: pv_other != NULL;
680: pv_other = pv_other->pv_next)
681: if (pmap == pv_other->pv_pmap && va == pv_other->pv_va)
682: panic("pmap_pv_enter: VA already in pv_tab");
683: #endif /* DIAGNOSTIC */
684:
685: /*
686: * Link this mapping into the PA->VA list.
687: */
688: pv_other = hpv->pv_head_pvs;
689: pv->pv_next = pv_other;
690: hpv->pv_head_pvs = pv;
691:
692: /*
693: * If there are no other mappings of this page, this
694: * mapping has no read/write aliasing. Otherwise, give
695: * this mapping the same TLB_NO_RW_ALIAS status as the
696: * other mapping (all mappings of the same page must
697: * always be marked the same).
698: */
699: pv->pv_tlbprot |= (pv_other == NULL ?
700: TLB_NO_RW_ALIAS :
701: (pv_other->pv_tlbprot & TLB_NO_RW_ALIAS));
702:
1.3 fredette 703: /* Check for read-write aliasing. */
704: if (!PAGE_IS_ALIASED(pa))
705: pmap_pv_check_alias(pa);
706: _pmap_pv_update(pa, pv, TLB_AR_MASK, tlbprot & TLB_AR_MASK);
707:
1.1 fredette 708: return pv;
709: }
710:
711: /*
712: * Given a particular VA->PA mapping, this removes it.
713: */
1.18 perry 714: static inline void
1.1 fredette 715: pmap_pv_remove(struct pv_entry *pv)
716: {
717: paddr_t pa = tlbptob(pv->pv_tlbpage);
718: int table_off;
1.3 fredette 719: struct pv_head *hpv;
720: struct pv_entry **_pv;
1.1 fredette 721:
722: PMAP_PRINTF(PDB_PV_REMOVE, ("(%p)\n", pv));
723:
1.5 fredette 724: /* Unlink this pv_entry from the HPT. */
1.3 fredette 725: _pv = &pv->pv_hpt->hpt_entry;
726: while (*_pv != pv) {
727: KASSERT(*_pv != NULL);
1.1 fredette 728: _pv = &(*_pv)->pv_hash;
1.3 fredette 729: }
1.1 fredette 730: *_pv = pv->pv_hash;
1.5 fredette 731:
732: /*
733: * If this mapping is for I/O space, simply flush the
734: * old mapping, free it, and we're done.
735: */
736: if (pa >= HPPA_IOSPACE) {
737: __pmap_pv_update(pa, pv, 0, 0);
738: pmap_pv_free(pv);
739: return;
740: }
741:
742: /* Get the head of the PA->VA translation list. */
743: table_off = pmap_table_find_pa(pa);
744: KASSERT(table_off >= 0);
745: hpv = pv_head_tbl + table_off;
746:
747: /* Unlink this pv_entry from the PA->VA translation list. */
1.3 fredette 748: _pv = &hpv->pv_head_pvs;
749: while (*_pv != pv) {
750: KASSERT(*_pv != NULL);
751: _pv = &(*_pv)->pv_next;
752: }
753: *_pv = pv->pv_next;
1.1 fredette 754:
755: /*
1.3 fredette 756: * Check for read-write aliasing. This will also flush
757: * the old mapping.
1.1 fredette 758: */
1.3 fredette 759: if (PAGE_IS_ALIASED(pa))
760: pmap_pv_check_alias(pa);
761: _pmap_pv_update(pa, pv, TLB_AR_MASK, TLB_AR_KR);
1.1 fredette 762:
1.3 fredette 763: /* Free this mapping. */
764: pmap_pv_free(pv);
1.1 fredette 765: }
766:
767: /*
1.22 ! skrll 768: * Bootstrap the system enough to run with virtual memory.
! 769: * Map the kernel's code and data, and allocate the system page table.
! 770: * Called with mapping OFF.
! 771: *
! 772: * Parameters:
! 773: * vstart PA of first available physical page
! 774: * vend PA of last available physical page
1.1 fredette 775: */
776: void
1.11 chs 777: pmap_bootstrap(vaddr_t *vstart, vaddr_t *vend)
1.1 fredette 778: {
779: vaddr_t addr;
780: vsize_t size;
781: vaddr_t pv_region;
782: struct hpt_entry *hptp;
783: #define BTLB_SET_SIZE 16
784: vaddr_t btlb_entry_start[BTLB_SET_SIZE];
785: vsize_t btlb_entry_size[BTLB_SET_SIZE];
786: int btlb_entry_vm_prot[BTLB_SET_SIZE];
787: int btlb_i, btlb_j;
1.2 fredette 788: vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got;
1.1 fredette 789: extern int kernel_text, etext;
1.2 fredette 790: vaddr_t kernel_data;
791: paddr_t phys_start, phys_end;
1.1 fredette 792:
1.21 skrll 793: PMAP_PRINTF(PDB_INIT, (": phys addresses %p - %p\n",
794: (void *)*vstart, (void *)*vend));
795:
1.1 fredette 796: uvm_setpagesize();
797:
1.6 thorpej 798: pages_per_vm_page = 1; /* XXX This should die */
1.1 fredette 799:
800: kern_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_NA;
801: kern_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_KR;
802: kern_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_KRW;
803: kern_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_KRW;
804: kern_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_KRX;
805: kern_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_KRX;
806: kern_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_KRWX;
807: kern_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_KRWX;
808:
809: user_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_NA;
810: user_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE] =TLB_AR_UR;
811: user_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_URW;
812: user_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE] =TLB_AR_URW;
813: user_prot[VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_URX;
814: user_prot[VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE] =TLB_AR_URX;
815: user_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_URWX;
816: user_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_URWX;
817:
818: /*
819: * Initialize kernel pmap
820: */
821: kernel_pmap = &kernel_pmap_store;
822: #if NCPUS > 1
823: lock_init(&pmap_lock, FALSE, ETAP_VM_PMAP_SYS, ETAP_VM_PMAP_SYS_I);
824: #endif /* NCPUS > 1 */
825: simple_lock_init(&kernel_pmap->pmap_lock);
826: simple_lock_init(&pmap_freelock);
827: simple_lock_init(&sid_pid_lock);
828:
829: kernel_pmap->pmap_refcnt = 1;
830: kernel_pmap->pmap_space = HPPA_SID_KERNEL;
831: kernel_pmap->pmap_pid = HPPA_PID_KERNEL;
832:
833: /*
834: * Allocate various tables and structures.
835: */
836: addr = hppa_round_page(*vstart);
837: virtual_end = *vend;
838:
839: /*
840: * Figure out how big the HPT must be, and align
841: * addr to what will be its beginning. We don't
842: * waste the pages skipped for the alignment;
843: * they become struct pv_entry pages.
844: */
1.2 fredette 845: pv_region = addr;
1.1 fredette 846: mfctl(CR_HPTMASK, size);
847: addr = (addr + size) & ~(size);
848: pv_free_list = NULL;
849: pmap_pv_add(pv_region, addr);
850:
851: /* Allocate the HPT */
852: for (hptp = (struct hpt_entry *)addr;
853: ((u_int)hptp - addr) <= size; hptp++) {
854: hptp->hpt_valid = 0;
855: hptp->hpt_vpn = 0;
856: hptp->hpt_space = -1;
857: hptp->hpt_tlbpage = 0;
858: hptp->hpt_tlbprot = 0;
859: hptp->hpt_entry = NULL;
860: }
1.21 skrll 861: PMAP_PRINTF(PDB_INIT, (": hpt_table 0x%lx @ %p\n", size + 1,
862: (caddr_t)addr));
1.20 skrll 863: /*
864: * load cr25 with the address of the HPT table
865: * NB: It sez CR_VTOP, but we (and the TLB handlers) know better ...
866: */
1.1 fredette 867: mtctl(addr, CR_VTOP);
868: hpt_base = addr;
869: hpt_mask = size;
1.10 chs 870: lwp0.l_md.md_regs->tf_hptm = size;
871: lwp0.l_md.md_regs->tf_vtop = addr;
1.1 fredette 872: addr += size + 1;
873:
1.3 fredette 874: /* Allocate the struct pv_head array. */
1.2 fredette 875: addr = ALIGN(addr);
1.3 fredette 876: pv_head_tbl = (struct pv_head *) addr;
1.2 fredette 877: memset(pv_head_tbl, 0, sizeof(*pv_head_tbl) * totalphysmem);
878: addr = (vaddr_t) (pv_head_tbl + totalphysmem);
879:
1.3 fredette 880: /* Allocate the page aliased bitmap. */
1.2 fredette 881: addr = ALIGN(addr);
1.3 fredette 882: page_aliased_bitmap = (u_int *) addr;
883: addr = (vaddr_t) (&_PAGE_ALIASED_WORD(totalphysmem) + 1);
884: memset(page_aliased_bitmap, 0, addr - (vaddr_t) page_aliased_bitmap);
1.2 fredette 885:
886: /*
887: * Allocate the largest struct pv_entry region. The
888: * 6 is a magic constant, chosen to allow on average
889: * all physical pages to have 6 simultaneous mappings
890: * without having to reclaim any struct pv_entry.
891: */
892: pv_region = addr;
893: addr += sizeof(struct pv_entry) * totalphysmem * 6;
894: pmap_pv_add(pv_region, addr);
895:
896: /*
897: * Allocate the steal region. Because pmap_steal_memory
898: * must panic whenever an allocation cannot be fulfilled,
899: * we have to guess at the maximum amount of space that
900: * might be stolen. Overestimating is not really a problem,
901: * as it only leads to lost virtual space, not lost physical
902: * pages.
903: */
904: addr = hppa_round_page(addr);
905: virtual_steal = addr;
906: addr += totalphysmem * sizeof(struct vm_page);
907: memset((caddr_t) virtual_steal, 0, addr - virtual_steal);
908:
909: /*
910: * We now have a rough idea of where managed kernel virtual
911: * space will begin, and we can start mapping everything
912: * before that.
913: */
914: addr = hppa_round_page(addr);
915: *vstart = addr;
916:
1.1 fredette 917: /*
1.2 fredette 918: * In general, the virtual space below the kernel text is
919: * left unmapped, to allow detection of NULL dereferences.
920: * However, these tmp_vpages are two virtual pages right
921: * before the kernel text that can be mapped for page copying
922: * and zeroing.
1.1 fredette 923: */
1.2 fredette 924: tmp_vpages[1] = hppa_trunc_page((vaddr_t) &kernel_text) - PAGE_SIZE;
925: tmp_vpages[0] = tmp_vpages[1] - PAGE_SIZE;
1.1 fredette 926:
927: /*
1.2 fredette 928: * The kernel text, data, and bss must be direct-mapped,
929: * because the kernel often runs in physical mode, and
930: * anyways the loader loaded the kernel into physical
931: * memory exactly where it was linked.
932: *
933: * All memory already allocated after bss, either by
934: * our caller or by this function itself, must also be
935: * direct-mapped, because it's completely unmanaged
936: * and was allocated in physical mode.
1.1 fredette 937: *
1.2 fredette 938: * BTLB entries are used to do this direct mapping.
939: * BTLB entries have a minimum and maximum possible size,
940: * and MD code gives us these sizes in units of pages.
1.1 fredette 941: */
1.2 fredette 942: btlb_entry_min = (vsize_t) hppa_btlb_size_min * PAGE_SIZE;
943: btlb_entry_max = (vsize_t) hppa_btlb_size_max * PAGE_SIZE;
1.1 fredette 944:
1.2 fredette 945: /*
946: * We begin by making BTLB entries for the kernel text.
947: * To keep things simple, we insist that the kernel text
948: * be aligned to the minimum BTLB entry size.
949: */
950: if (((vaddr_t) &kernel_text) & (btlb_entry_min - 1))
951: panic("kernel text not aligned to BTLB minimum size");
1.1 fredette 952:
953: /*
1.2 fredette 954: * To try to conserve BTLB entries, take a hint from how
955: * the kernel was linked: take the kernel text start as
956: * our effective minimum BTLB entry size, assuming that
957: * the data segment was also aligned to that size.
958: *
959: * In practice, linking the kernel at 2MB, and aligning
960: * the data segment to a 2MB boundary, should control well
961: * how much of the BTLB the pmap uses. However, this code
962: * should not rely on this 2MB magic number, nor should
963: * it rely on the data segment being aligned at all. This
964: * is to allow (smaller) kernels (linked lower) to work fine.
1.1 fredette 965: */
1.2 fredette 966: btlb_entry_min = (vaddr_t) &kernel_text;
1.18 perry 967: __asm volatile (
1.2 fredette 968: " ldil L%%$global$, %0 \n"
969: " ldo R%%$global$(%0), %0 \n"
970: : "=r" (kernel_data));
1.1 fredette 971:
972: /*
1.2 fredette 973: * Now make BTLB entries to direct-map the kernel text
974: * read- and execute-only as much as possible. Note that
975: * if the data segment isn't nicely aligned, the last
976: * BTLB entry for the kernel text may also cover some of
977: * the data segment, meaning it will have to allow writing.
978: */
979: addr = (vaddr_t) &kernel_text;
980: btlb_j = 0;
981: while (addr < (vaddr_t) &etext) {
1.1 fredette 982:
983: /* Set up the next BTLB entry. */
984: KASSERT(btlb_j < BTLB_SET_SIZE);
1.2 fredette 985: btlb_entry_start[btlb_j] = addr;
1.1 fredette 986: btlb_entry_size[btlb_j] = btlb_entry_min;
1.2 fredette 987: btlb_entry_vm_prot[btlb_j] = VM_PROT_READ | VM_PROT_EXECUTE;
988: if (addr + btlb_entry_min > kernel_data)
1.1 fredette 989: btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE;
990:
1.2 fredette 991: /* Coalesce BTLB entries whenever possible. */
1.1 fredette 992: while (btlb_j > 0 &&
993: btlb_entry_vm_prot[btlb_j] ==
994: btlb_entry_vm_prot[btlb_j - 1] &&
995: btlb_entry_size[btlb_j] ==
996: btlb_entry_size[btlb_j - 1] &&
997: !(btlb_entry_start[btlb_j - 1] &
1.2 fredette 998: ((btlb_entry_size[btlb_j - 1] << 1) - 1)) &&
999: (btlb_entry_size[btlb_j - 1] << 1) <=
1000: btlb_entry_max)
1.1 fredette 1001: btlb_entry_size[--btlb_j] <<= 1;
1002:
1003: /* Move on. */
1.2 fredette 1004: addr = btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
1.1 fredette 1005: btlb_j++;
1.2 fredette 1006: }
1007:
1008: /*
1009: * Now make BTLB entries to direct-map the kernel data,
1010: * bss, and all of the preallocated space read-write.
1011: *
1012: * Note that, unlike above, we're not concerned with
1013: * making these BTLB entries such that they finish as
1014: * close as possible to the end of the space we need
1015: * them to map. Instead, to minimize the number of BTLB
1016: * entries we need, we make them as large as possible.
1017: * The only thing this wastes is kernel virtual space,
1018: * which is plentiful.
1019: */
1020: while (addr < *vstart) {
1021:
1022: /* Make the next BTLB entry. */
1023: KASSERT(btlb_j < BTLB_SET_SIZE);
1024: size = btlb_entry_min;
1025: while ((addr + size) < *vstart &&
1026: (size << 1) < btlb_entry_max &&
1027: !(addr & ((size << 1) - 1)))
1028: size <<= 1;
1029: btlb_entry_start[btlb_j] = addr;
1030: btlb_entry_size[btlb_j] = size;
1031: btlb_entry_vm_prot[btlb_j] = VM_PROT_READ | VM_PROT_WRITE;
1032:
1033: /* Move on. */
1034: addr = btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
1035: btlb_j++;
1036: }
1037:
1.1 fredette 1038: /* Now insert all of the BTLB entries. */
1039: for (btlb_i = 0; btlb_i < btlb_j; btlb_i++) {
1040: btlb_entry_got = btlb_entry_size[btlb_i];
1.4 fredette 1041: if (hppa_btlb_insert(kernel_pmap->pmap_space,
1.1 fredette 1042: btlb_entry_start[btlb_i],
1043: btlb_entry_start[btlb_i],
1044: &btlb_entry_got,
1045: kernel_pmap->pmap_pid |
1046: pmap_prot(kernel_pmap,
1047: btlb_entry_vm_prot[btlb_i])) < 0)
1048: panic("pmap_bootstrap: cannot insert BTLB entry");
1049: if (btlb_entry_got != btlb_entry_size[btlb_i])
1050: panic("pmap_bootstrap: BTLB entry mapped wrong amount");
1051: }
1052:
1.2 fredette 1053: /*
1054: * We now know the exact beginning of managed kernel
1055: * virtual space.
1056: */
1.1 fredette 1057: *vstart = btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1];
1.8 thorpej 1058: virtual_start = *vstart;
1.1 fredette 1059:
1060: /*
1.2 fredette 1061: * Finally, load physical pages into UVM. There are
1062: * three segments of pages.
1.1 fredette 1063: */
1.2 fredette 1064: physmem = 0;
1065:
1066: /* The first segment runs from [resvmem..kernel_text). */
1067: phys_start = resvmem;
1068: phys_end = atop(hppa_trunc_page(&kernel_text));
1.21 skrll 1069:
1070: PMAP_PRINTF(PDB_INIT, (": phys segment 0x%05x 0x%05x\n",
1071: (u_int)phys_start, (u_int)phys_end));
1.2 fredette 1072: if (phys_end > phys_start) {
1073: uvm_page_physload(phys_start, phys_end,
1074: phys_start, phys_end, VM_FREELIST_DEFAULT);
1075: physmem += phys_end - phys_start;
1076: }
1.1 fredette 1077:
1.2 fredette 1078: /* The second segment runs from [etext..kernel_data). */
1079: phys_start = atop(hppa_round_page((vaddr_t) &etext));
1080: phys_end = atop(hppa_trunc_page(kernel_data));
1.21 skrll 1081:
1082: PMAP_PRINTF(PDB_INIT, (": phys segment 0x%05x 0x%05x\n",
1083: (u_int)phys_start, (u_int)phys_end));
1.2 fredette 1084: if (phys_end > phys_start) {
1085: uvm_page_physload(phys_start, phys_end,
1086: phys_start, phys_end, VM_FREELIST_DEFAULT);
1087: physmem += phys_end - phys_start;
1088: }
1.1 fredette 1089:
1.2 fredette 1090: /* The third segment runs from [virtual_steal..totalphysmem). */
1091: phys_start = atop(virtual_steal);
1092: phys_end = totalphysmem;
1.21 skrll 1093:
1094: PMAP_PRINTF(PDB_INIT, (": phys segment 0x%05x 0x%05x\n",
1095: (u_int)phys_start, (u_int)phys_end));
1.2 fredette 1096: if (phys_end > phys_start) {
1097: uvm_page_physload(phys_start, phys_end,
1098: phys_start, phys_end, VM_FREELIST_DEFAULT);
1099: physmem += phys_end - phys_start;
1100: }
1.1 fredette 1101: }
1102:
1103: /*
1104: * pmap_steal_memory(size, startp, endp)
1105: * steals memory block of size `size' from directly mapped
1106: * segment (mapped behind the scenes).
1107: * directly mapped cannot be grown dynamically once allocated.
1108: */
1109: vaddr_t
1.11 chs 1110: pmap_steal_memory(vsize_t size, vaddr_t *startp, vaddr_t *endp)
1.1 fredette 1111: {
1112: vaddr_t va;
1.2 fredette 1113: int lcv;
1.1 fredette 1114:
1115: PMAP_PRINTF(PDB_STEAL, ("(%lx, %p, %p)\n", size, startp, endp));
1116:
1.8 thorpej 1117: /* Remind the caller of the start and end of virtual space. */
1118: if (startp)
1119: *startp = virtual_start;
1120: if (endp)
1121: *endp = virtual_end;
1122:
1.1 fredette 1123: /* Round the allocation up to a page. */
1124: size = hppa_round_page(size);
1125:
1126: /* We must panic if we cannot steal the memory. */
1.8 thorpej 1127: if (size > virtual_start - virtual_steal)
1.1 fredette 1128: panic("pmap_steal_memory: out of memory");
1129:
1130: /* Steal the memory. */
1131: va = virtual_steal;
1132: virtual_steal += size;
1.2 fredette 1133: PMAP_PRINTF(PDB_STEAL, (": steal %ld bytes @%x\n", size, (u_int)va));
1134: for (lcv = 0; lcv < vm_nphysseg ; lcv++)
1135: if (vm_physmem[lcv].start == atop(va)) {
1136: vm_physmem[lcv].start = atop(virtual_steal);
1137: vm_physmem[lcv].avail_start = atop(virtual_steal);
1138: break;
1139: }
1140: if (lcv == vm_nphysseg)
1141: panic("pmap_steal_memory inconsistency");
1.1 fredette 1142:
1143: return va;
1.8 thorpej 1144: }
1145:
1146: /*
1147: * How much virtual space does this kernel have?
1148: * (After mapping kernel text, data, etc.)
1149: */
1150: void
1151: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
1152: {
1153: *vstartp = virtual_start;
1154: *vendp = virtual_end;
1.1 fredette 1155: }
1156:
1157: /*
1158: * Finishes the initialization of the pmap module.
1159: * This procedure is called from vm_mem_init() in vm/vm_init.c
1160: * to initialize any remaining data structures that the pmap module
1161: * needs to map virtual memory (VM is already ON).
1162: */
1163: void
1.11 chs 1164: pmap_init(void)
1.1 fredette 1165: {
1.11 chs 1166: extern void gateway_page(void);
1.1 fredette 1167:
1168: TAILQ_INIT(&pmap_freelist);
1169: pid_counter = HPPA_PID_KERNEL + 2;
1170:
1171: /*
1172: * map SysCall gateways page once for everybody
1173: * NB: we'll have to remap the phys memory
1174: * if we have any at SYSCALLGATE address (;
1175: *
1176: * no spls since no interrupts
1177: */
1.3 fredette 1178: pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, SYSCALLGATE,
1179: (paddr_t)&gateway_page,
1180: TLB_GATE_PROT | TLB_UNMANAGED | TLB_WIRED);
1.1 fredette 1181:
1182: pmap_initialized = TRUE;
1183: }
1184:
1185: /*
1186: * Initialize a preallocated and zeroed pmap structure,
1187: * such as one in a vmspace structure.
1188: */
1.11 chs 1189: static void pmap_pinit(pmap_t);
1.1 fredette 1190: static void
1.11 chs 1191: pmap_pinit(pmap_t pmap)
1.1 fredette 1192: {
1.10 chs 1193: u_int pid;
1.1 fredette 1194: int s;
1195:
1196: PMAP_PRINTF(PDB_PMAP, ("(%p), pid=%x\n", pmap, pmap->pmap_pid));
1197:
1198: if (!(pid = pmap->pmap_pid)) {
1199:
1200: /*
1201: * Allocate space and protection IDs for the pmap.
1202: * If all are allocated, there is nothing we can do.
1203: */
1204: s = splvm();
1205: if (pid_counter < HPPA_MAX_PID) {
1206: pid = pid_counter;
1207: pid_counter += 2;
1208: } else
1209: pid = 0;
1210: splx(s);
1211:
1212: if (pid == 0)
1213: panic ("no more pmap ids\n");
1214:
1215: simple_lock_init(&pmap->pmap_lock);
1216: }
1217:
1218: s = splvm();
1219: pmap->pmap_pid = pid;
1220: pmap->pmap_space = (pmap->pmap_pid >> 1) - 1;
1221: pmap->pmap_refcnt = 1;
1222: pmap->pmap_stats.resident_count = 0;
1223: pmap->pmap_stats.wired_count = 0;
1224: splx(s);
1225: }
1226:
1227: /*
1228: * pmap_create()
1229: *
1230: * Create and return a physical map.
1231: * the map is an actual physical map, and may be referenced by the hardware.
1232: */
1233: pmap_t
1.11 chs 1234: pmap_create(void)
1.1 fredette 1235: {
1.10 chs 1236: pmap_t pmap;
1.1 fredette 1237: int s;
1238:
1239: PMAP_PRINTF(PDB_PMAP, ("()"));
1240:
1241: /*
1242: * If there is a pmap in the pmap free list, reuse it.
1243: */
1244: s = splvm();
1245: if (pmap_nfree) {
1246: pmap = pmap_freelist.tqh_first;
1247: TAILQ_REMOVE(&pmap_freelist, pmap, pmap_list);
1248: pmap_nfree--;
1249: splx(s);
1250: } else {
1251: splx(s);
1252: MALLOC(pmap, struct pmap *, sizeof(*pmap), M_VMMAP, M_NOWAIT);
1253: if (pmap == NULL)
1254: return NULL;
1.11 chs 1255: memset(pmap, 0, sizeof(*pmap));
1.1 fredette 1256: }
1257:
1258: pmap_pinit(pmap);
1259:
1260: return(pmap);
1261: }
1262:
1263: /*
1264: * pmap_destroy(pmap)
1265: * Gives up a reference to the specified pmap. When the reference count
1266: * reaches zero the pmap structure is added to the pmap free list.
1267: * Should only be called if the map contains no valid mappings.
1268: */
1269: void
1.11 chs 1270: pmap_destroy(pmap_t pmap)
1.1 fredette 1271: {
1272: int ref_count;
1273: int s;
1274:
1275: PMAP_PRINTF(PDB_PMAP, ("(%p)\n", pmap));
1276:
1277: s = splvm();
1278:
1279: ref_count = --pmap->pmap_refcnt;
1280:
1281: if (ref_count < 0)
1282: panic("pmap_destroy(): ref_count < 0");
1283: if (!ref_count) {
1284: KASSERT(pmap->pmap_stats.resident_count == 0);
1285: KASSERT(pmap->pmap_stats.wired_count == 0);
1286:
1287: /*
1288: * Add the pmap to the pmap free list
1289: * We cannot free() disposed pmaps because of
1290: * PID shortage of 2^16
1291: */
1292: TAILQ_INSERT_HEAD(&pmap_freelist, pmap, pmap_list);
1293: pmap_nfree++;
1294: }
1295: splx(s);
1296: }
1297:
1298: /*
1.10 chs 1299: * pmap_activate(lwp)
1300: * Activates the vmspace for the given LWP. This
1.1 fredette 1301: * isn't necessarily the current process.
1302: */
1303: void
1.10 chs 1304: pmap_activate(struct lwp *l)
1.1 fredette 1305: {
1.10 chs 1306: struct proc *p = l->l_proc;
1.1 fredette 1307: pmap_t pmap = p->p_vmspace->vm_map.pmap;
1308: pa_space_t space = pmap->pmap_space;
1.10 chs 1309: struct trapframe *tf = l->l_md.md_regs;
1.1 fredette 1310:
1311: /* space is cached for the copy{in,out}'s pleasure */
1.10 chs 1312: l->l_addr->u_pcb.pcb_space = space;
1.1 fredette 1313:
1314: /* Load all of the user's space registers. */
1315: tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
1316: tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = space;
1317: tf->tf_iisq_head = tf->tf_iisq_tail = space;
1318:
1319: /*
1320: * Load the protection registers. NB that
1321: * if p *is* the current process, we set pidr2
1322: * to the new space immediately, so any copyins
1323: * or copyouts that happen before we return to
1324: * userspace work.
1325: */
1326: tf->tf_pidr1 = tf->tf_pidr2 = pmap->pmap_pid;
1327: if (p == curproc)
1328: mtctl(pmap->pmap_pid, CR_PIDR2);
1329: }
1330:
1331: /*
1332: * pmap_enter(pmap, va, pa, prot, flags)
1333: * Create a translation for the virtual address (va) to the physical
1334: * address (pa) in the pmap with the protection requested. If the
1335: * translation is wired then we can not allow a page fault to occur
1336: * for this mapping.
1337: */
1338: int
1.11 chs 1339: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1.1 fredette 1340: {
1.10 chs 1341: struct pv_entry *pv;
1.1 fredette 1342: u_int tlbpage, tlbprot;
1343: pa_space_t space;
1344: boolean_t waswired;
1345: boolean_t wired = (flags & PMAP_WIRED) != 0;
1346: int s;
1347:
1348: /* Get a handle on the mapping we want to enter. */
1349: space = pmap_sid(pmap, va);
1350: va = hppa_trunc_page(va);
1351: pa = hppa_trunc_page(pa);
1352: tlbpage = tlbbtop(pa);
1353: tlbprot = pmap_prot(pmap, prot) | pmap->pmap_pid;
1354: if (wired)
1355: tlbprot |= TLB_WIRED;
1.16 chs 1356: if (flags & VM_PROT_ALL) {
1357: tlbprot |= TLB_REF;
1358: if (flags & VM_PROT_WRITE)
1359: tlbprot |= TLB_DIRTY;
1360: }
1.1 fredette 1361:
1362: #ifdef PMAPDEBUG
1363: if (!pmap_initialized || (pmapdebug & PDB_ENTER))
1364: PMAP_PRINTF(0, ("(%p, %p, %p, %x, %swired)\n",
1365: pmap, (caddr_t)va, (caddr_t)pa,
1366: prot, wired? "" : "un"));
1367: #endif
1368:
1369: s = splvm();
1370:
1371: if (!(pv = pmap_pv_find_va(space, va))) {
1372: /*
1373: * Mapping for this virtual address doesn't exist.
1374: * Enter a new mapping.
1375: */
1376: pv = pmap_pv_enter(pmap, space, va, pa, tlbprot);
1377: pmap->pmap_stats.resident_count++;
1378: waswired = FALSE;
1379: } else {
1.3 fredette 1380: KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1.1 fredette 1381: waswired = pv->pv_tlbprot & TLB_WIRED;
1382:
1383: /* see if we are remapping the page to another PA */
1384: if (pv->pv_tlbpage != tlbpage) {
1385: PMAP_PRINTF(PDB_ENTER, (": moving pa %x -> %x\n",
1386: pv->pv_tlbpage, tlbpage));
1387: /* update tlbprot to avoid extra subsequent fault */
1388: pmap_pv_remove(pv);
1389: pv = pmap_pv_enter(pmap, space, va, pa, tlbprot);
1390: } else {
1391: /* We are just changing the protection. */
1392: #ifdef PMAPDEBUG
1393: if (pmapdebug & PDB_ENTER) {
1394: char buffer1[64];
1395: char buffer2[64];
1396: bitmask_snprintf(pv->pv_tlbprot, TLB_BITS,
1397: buffer1, sizeof(buffer1));
1398: bitmask_snprintf(tlbprot, TLB_BITS,
1399: buffer2, sizeof(buffer2));
1400: printf("pmap_enter: changing %s->%s\n",
1401: buffer1, buffer2);
1402: }
1403: #endif
1404: pmap_pv_update(pv, TLB_AR_MASK|TLB_PID_MASK|TLB_WIRED,
1405: tlbprot);
1406: }
1407: }
1408:
1409: /*
1410: * Adjust statistics
1411: */
1412: if (wired && !waswired) {
1413: pmap->pmap_stats.wired_count++;
1414: } else if (!wired && waswired) {
1415: pmap->pmap_stats.wired_count--;
1416: }
1417: splx(s);
1418:
1419: return (0);
1420: }
1421:
1422: /*
1423: * pmap_remove(pmap, sva, eva)
1424: * unmaps all virtual addresses v in the virtual address
1425: * range determined by [sva, eva) and pmap.
1426: * sva and eva must be on machine independent page boundaries and
1427: * sva must be less than or equal to eva.
1428: */
1429: void
1.11 chs 1430: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1 fredette 1431: {
1.10 chs 1432: struct pv_entry *pv;
1433: pa_space_t space;
1.1 fredette 1434: int s;
1435:
1436: PMAP_PRINTF(PDB_REMOVE, ("(%p, %p, %p)\n",
1437: pmap, (caddr_t)sva, (caddr_t)eva));
1438:
1439: sva = hppa_trunc_page(sva);
1440: space = pmap_sid(pmap, sva);
1441:
1442: s = splvm();
1443:
1444: while (sva < eva) {
1445: pv = pmap_pv_find_va(space, sva);
1446: if (pv) {
1.3 fredette 1447: KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1.1 fredette 1448: KASSERT(pmap->pmap_stats.resident_count > 0);
1449: pmap->pmap_stats.resident_count--;
1450: if (pv->pv_tlbprot & TLB_WIRED) {
1451: KASSERT(pmap->pmap_stats.wired_count > 0);
1452: pmap->pmap_stats.wired_count--;
1453: }
1454: pmap_pv_remove(pv);
1455: PMAP_PRINTF(PDB_REMOVE, (": removed %p for 0x%x:%p\n",
1456: pv, space, (caddr_t)sva));
1457: }
1458: sva += PAGE_SIZE;
1459: }
1460:
1461: splx(s);
1462: }
1463:
1464: /*
1465: * pmap_page_protect(pa, prot)
1466: *
1467: * Lower the permission for all mappings to a given page.
1468: */
1469: void
1.11 chs 1470: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1 fredette 1471: {
1.10 chs 1472: struct pv_entry *pv, *pv_next;
1473: pmap_t pmap;
1474: u_int tlbprot;
1.1 fredette 1475: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1476: int s;
1477:
1478: PMAP_PRINTF(PDB_PROTECT, ("(%p, %x)\n", (caddr_t)pa, prot));
1479:
1480: switch (prot) {
1481: case VM_PROT_ALL:
1482: return;
1483: case VM_PROT_READ:
1484: case VM_PROT_READ|VM_PROT_EXECUTE:
1485: s = splvm();
1486: for (pv = pmap_pv_find_pa(pa); pv; pv = pv->pv_next) {
1487: /* Ignore unmanaged mappings. */
1.3 fredette 1488: if (pv->pv_tlbprot & TLB_UNMANAGED)
1.1 fredette 1489: continue;
1490: /*
1491: * Compare new protection with old to see if
1492: * anything needs to be changed.
1493: */
1494: tlbprot = pmap_prot(pv->pv_pmap, prot);
1495: if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot) {
1496: pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
1497: }
1498: }
1499: splx(s);
1500: break;
1501: default:
1502: s = splvm();
1503: for (pv = pmap_pv_find_pa(pa); pv != NULL; pv = pv_next) {
1504: pv_next = pv->pv_next;
1505: /* Ignore unmanaged mappings. */
1.3 fredette 1506: if (pv->pv_tlbprot & TLB_UNMANAGED)
1.1 fredette 1507: continue;
1508: #ifdef PMAPDEBUG
1509: if (pmapdebug & PDB_PROTECT) {
1510: char buffer[64];
1511: bitmask_snprintf(pv->pv_tlbprot, TLB_BITS,
1512: buffer, sizeof(buffer));
1513: printf("pv={%p,%x:%x,%s,%x}->%p\n",
1514: pv->pv_pmap, pv->pv_space, pv->pv_va,
1515: buffer,
1516: tlbptob(pv->pv_tlbpage), pv->pv_hash);
1517: }
1518: #endif
1519: pmap = pv->pv_pmap;
1520: if (pv->pv_tlbprot & TLB_WIRED) {
1521: KASSERT(pmap->pmap_stats.wired_count > 0);
1522: pmap->pmap_stats.wired_count--;
1523: }
1524: pmap_pv_remove(pv);
1525: KASSERT(pmap->pmap_stats.resident_count > 0);
1526: pmap->pmap_stats.resident_count--;
1527: }
1528: splx(s);
1529: break;
1530: }
1531: }
1532:
1533: /*
1534: * pmap_protect(pmap, s, e, prot)
1535: * changes the protection on all virtual addresses v in the
1536: * virtual address range determined by [s, e) and pmap to prot.
1537: * s and e must be on machine independent page boundaries and
1538: * s must be less than or equal to e.
1539: */
1540: void
1.11 chs 1541: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1 fredette 1542: {
1.10 chs 1543: struct pv_entry *pv;
1.1 fredette 1544: u_int tlbprot;
1545: pa_space_t space;
1546: int s;
1547:
1548: PMAP_PRINTF(PDB_PROTECT, ("(%p, %p, %p, %x)\n",
1549: pmap, (caddr_t)sva, (caddr_t)eva, prot));
1550:
1551: if (prot == VM_PROT_NONE) {
1552: pmap_remove(pmap, sva, eva);
1553: return;
1554: }
1555:
1556: sva = hppa_trunc_page(sva);
1557: space = pmap_sid(pmap, sva);
1558: tlbprot = pmap_prot(pmap, prot);
1559:
1560: s = splvm();
1561: for(; sva < eva; sva += PAGE_SIZE) {
1.15 chs 1562: if ((pv = pmap_pv_find_va(space, sva))) {
1563:
1.1 fredette 1564: /*
1565: * Compare new protection with old to see if
1566: * anything needs to be changed.
1567: */
1568: if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot) {
1569: pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
1570: }
1571: }
1572: }
1573: splx(s);
1574: }
1575:
1576: /*
1577: * Routine: pmap_unwire
1578: * Function: Change the wiring attribute for a map/virtual-address
1579: * pair.
1580: * In/out conditions:
1581: * The mapping must already exist in the pmap.
1582: *
1583: * Change the wiring for a given virtual page. This routine currently is
1584: * only used to unwire pages and hence the mapping entry will exist.
1585: */
1586: void
1.11 chs 1587: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1 fredette 1588: {
1589: struct pv_entry *pv;
1590: int s;
1591:
1592: va = hppa_trunc_page(va);
1593: PMAP_PRINTF(PDB_WIRING, ("(%p, %p)\n", pmap, (caddr_t)va));
1594:
1595: simple_lock(&pmap->pmap_lock);
1596:
1597: s = splvm();
1598: if ((pv = pmap_pv_find_va(pmap_sid(pmap, va), va)) == NULL)
1599: panic("pmap_unwire: can't find mapping entry");
1600:
1.3 fredette 1601: KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1.1 fredette 1602: if (pv->pv_tlbprot & TLB_WIRED) {
1603: KASSERT(pmap->pmap_stats.wired_count > 0);
1604: pv->pv_tlbprot &= ~TLB_WIRED;
1605: pmap->pmap_stats.wired_count--;
1606: }
1607: splx(s);
1608: simple_unlock(&pmap->pmap_lock);
1609: }
1610:
1611: /*
1612: * pmap_extract(pmap, va, pap)
1613: * fills in the physical address corrsponding to the
1614: * virtual address specified by pmap and va into the
1615: * storage pointed to by pap and returns TRUE if the
1616: * virtual address is mapped. returns FALSE in not mapped.
1617: */
1618: boolean_t
1.11 chs 1619: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.1 fredette 1620: {
1621: struct pv_entry *pv;
1622: vaddr_t off;
1623: int s;
1624:
1625: off = va;
1626: off -= (va = hppa_trunc_page(va));
1627:
1628: s = splvm();
1629: if ((pv = pmap_pv_find_va(pmap_sid(pmap, va), va))) {
1630: if (pap != NULL)
1631: *pap = tlbptob(pv->pv_tlbpage) + off;
1632: PMAP_PRINTF(PDB_EXTRACT, ("(%p, %p) = %p\n",
1633: pmap, (caddr_t)va,
1634: (caddr_t)(tlbptob(pv->pv_tlbpage) + off)));
1635: } else {
1636: PMAP_PRINTF(PDB_EXTRACT, ("(%p, %p) unmapped\n",
1637: pmap, (caddr_t)va));
1638: }
1639: splx(s);
1640: return (pv != NULL);
1641: }
1642:
1643: /*
1644: * pmap_zero_page(pa)
1645: *
1646: * Zeros the specified page.
1647: */
1648: void
1.11 chs 1649: pmap_zero_page(paddr_t pa)
1.1 fredette 1650: {
1651: struct pv_entry *pv;
1652: int s;
1653:
1654: PMAP_PRINTF(PDB_ZERO, ("(%p)\n", (caddr_t)pa));
1655:
1656: s = splvm(); /* XXX are we already that high? */
1657:
1658: /* Map the physical page. */
1.3 fredette 1659: pv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[1], pa,
1660: TLB_AR_KRW | TLB_UNMANAGED | TLB_WIRED);
1.1 fredette 1661:
1662: /* Zero it. */
1663: memset((caddr_t)tmp_vpages[1], 0, PAGE_SIZE);
1664:
1665: /* Unmap the physical page. */
1666: pmap_pv_remove(pv);
1667:
1668: splx(s);
1669: }
1670:
1671: /*
1672: * pmap_copy_page(src, dst)
1673: *
1674: * pmap_copy_page copies the src page to the destination page. If a mapping
1675: * can be found for the source, we use that virtual address. Otherwise, a
1676: * slower physical page copy must be done. The destination is always a
1677: * physical address sivnce there is usually no mapping for it.
1678: */
1679: void
1.11 chs 1680: pmap_copy_page(paddr_t spa, paddr_t dpa)
1.1 fredette 1681: {
1682: struct pv_entry *spv, *dpv;
1683: int s;
1684:
1685: PMAP_PRINTF(PDB_COPY, ("(%p, %p)\n", (caddr_t)spa, (caddr_t)dpa));
1686:
1687: s = splvm(); /* XXX are we already that high? */
1688:
1689: /* Map the two pages. */
1.3 fredette 1690: spv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[0], spa,
1691: TLB_AR_KR | TLB_UNMANAGED | TLB_WIRED);
1692: dpv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[1], dpa,
1693: TLB_AR_KRW | TLB_UNMANAGED | TLB_WIRED);
1.1 fredette 1694:
1695: /* Do the copy. */
1696: memcpy((caddr_t)tmp_vpages[1], (const caddr_t)tmp_vpages[0], PAGE_SIZE);
1697:
1698: /* Unmap the pages. */
1699: pmap_pv_remove(spv);
1700: pmap_pv_remove(dpv);
1701:
1702: splx(s);
1703: }
1704:
1705: /*
1706: * Given a PA and a bit, this tests and clears that bit in
1707: * the modref information for the PA.
1708: */
1.18 perry 1709: static inline boolean_t pmap_clear_bit(paddr_t, u_int);
1710: static inline boolean_t
1.3 fredette 1711: pmap_clear_bit(paddr_t pa, u_int tlbprot_bit)
1.1 fredette 1712: {
1713: int table_off;
1.3 fredette 1714: struct pv_head *hpv;
1715: u_int pv_head_bit;
1.1 fredette 1716: boolean_t ret;
1717: int s;
1718:
1719: table_off = pmap_table_find_pa(pa);
1720: KASSERT(table_off >= 0);
1.3 fredette 1721: hpv = pv_head_tbl + table_off;
1722: pv_head_bit = (tlbprot_bit == TLB_REF ? PV_HEAD_REF : PV_HEAD_DIRTY);
1.1 fredette 1723: s = splvm();
1.3 fredette 1724: _pmap_pv_update(pa, NULL, tlbprot_bit, 0);
1725: ret = hpv->pv_head_writable_dirty_ref & pv_head_bit;
1726: hpv->pv_head_writable_dirty_ref &= ~pv_head_bit;
1.1 fredette 1727: splx(s);
1728: return ret;
1729: }
1730:
1731: /*
1732: * Given a PA and a bit, this tests that bit in the modref
1733: * information for the PA.
1734: */
1.18 perry 1735: static inline boolean_t pmap_test_bit(paddr_t, u_int);
1736: static inline boolean_t
1.3 fredette 1737: pmap_test_bit(paddr_t pa, u_int tlbprot_bit)
1.1 fredette 1738: {
1739: int table_off;
1.3 fredette 1740: struct pv_head *hpv;
1741: u_int pv_head_bit;
1.1 fredette 1742: struct pv_entry *pv;
1743: boolean_t ret;
1744: int s;
1745:
1746: table_off = pmap_table_find_pa(pa);
1747: KASSERT(table_off >= 0);
1.3 fredette 1748: hpv = pv_head_tbl + table_off;
1749: pv_head_bit = (tlbprot_bit == TLB_REF ? PV_HEAD_REF : PV_HEAD_DIRTY);
1.1 fredette 1750: s = splvm();
1.3 fredette 1751: ret = (hpv->pv_head_writable_dirty_ref & pv_head_bit) != 0;
1.1 fredette 1752: if (!ret) {
1.3 fredette 1753: for (pv = hpv->pv_head_pvs;
1.1 fredette 1754: pv != NULL;
1755: pv = pv->pv_next) {
1.3 fredette 1756: if ((pv->pv_tlbprot & (TLB_UNMANAGED | tlbprot_bit)) ==
1757: tlbprot_bit) {
1758: hpv->pv_head_writable_dirty_ref |= pv_head_bit;
1.1 fredette 1759: ret = TRUE;
1760: break;
1761: }
1762: }
1763: }
1764: splx(s);
1765: return ret;
1766: }
1767:
1768: /*
1769: * pmap_clear_modify(pa)
1770: * clears the hardware modified ("dirty") bit for one
1771: * machine independant page starting at the given
1772: * physical address. phys must be aligned on a machine
1773: * independant page boundary.
1774: */
1775: boolean_t
1.11 chs 1776: pmap_clear_modify(struct vm_page *pg)
1.1 fredette 1777: {
1.10 chs 1778: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1 fredette 1779: boolean_t ret = pmap_clear_bit(pa, TLB_DIRTY);
1780: PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
1781: return ret;
1782: }
1783:
1784: /*
1785: * pmap_is_modified(pa)
1786: * returns TRUE if the given physical page has been modified
1787: * since the last call to pmap_clear_modify().
1788: */
1789: boolean_t
1.11 chs 1790: pmap_is_modified(struct vm_page *pg)
1.1 fredette 1791: {
1.10 chs 1792: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1 fredette 1793: boolean_t ret = pmap_test_bit(pa, TLB_DIRTY);
1794: PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
1795: return ret;
1796: }
1797:
1798: /*
1799: * pmap_clear_reference(pa)
1800: * clears the hardware referenced bit in the given machine
1801: * independant physical page.
1802: *
1803: * Currently, we treat a TLB miss as a reference; i.e. to clear
1804: * the reference bit we flush all mappings for pa from the TLBs.
1805: */
1806: boolean_t
1.11 chs 1807: pmap_clear_reference(struct vm_page *pg)
1.1 fredette 1808: {
1.10 chs 1809: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1 fredette 1810: boolean_t ret = pmap_clear_bit(pa, TLB_REF);
1811: PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
1812: return ret;
1813: }
1814:
1815: /*
1816: * pmap_is_referenced(pa)
1817: * returns TRUE if the given physical page has been referenced
1818: * since the last call to pmap_clear_reference().
1819: */
1820: boolean_t
1.11 chs 1821: pmap_is_referenced(struct vm_page *pg)
1.1 fredette 1822: {
1.10 chs 1823: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1 fredette 1824: boolean_t ret = pmap_test_bit(pa, TLB_REF);
1825: PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
1826: return ret;
1827: }
1828:
1829: void
1.11 chs 1830: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1.1 fredette 1831: {
1.12 chs 1832: u_int tlbprot;
1.1 fredette 1833: int s;
1834: #ifdef PMAPDEBUG
1835: int opmapdebug = pmapdebug;
1836:
1837: /*
1838: * If we're being told to map page zero, we can't
1839: * call printf() at all, because doing so would
1840: * lead to an infinite recursion on this call.
1841: * (printf requires page zero to be mapped).
1842: */
1843: if (va == 0)
1844: pmapdebug = 0;
1845: #endif /* PMAPDEBUG */
1846:
1847: PMAP_PRINTF(PDB_KENTER, ("(%p, %p, %x)\n",
1848: (caddr_t)va, (caddr_t)pa, prot));
1849: va = hppa_trunc_page(va);
1.12 chs 1850: tlbprot = TLB_WIRED | TLB_UNMANAGED;
1851: tlbprot |= (prot & PMAP_NC) ? TLB_UNCACHEABLE : 0;
1852: tlbprot |= pmap_prot(pmap_kernel(), prot & VM_PROT_ALL);
1.1 fredette 1853: s = splvm();
1854: KASSERT(pmap_pv_find_va(HPPA_SID_KERNEL, va) == NULL);
1.12 chs 1855: pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, va, pa, tlbprot);
1.1 fredette 1856: splx(s);
1.12 chs 1857:
1.1 fredette 1858: #ifdef PMAPDEBUG
1859: pmapdebug = opmapdebug;
1860: #endif /* PMAPDEBUG */
1861: }
1862:
1863: void
1.11 chs 1864: pmap_kremove(vaddr_t va, vsize_t size)
1.1 fredette 1865: {
1.10 chs 1866: struct pv_entry *pv;
1.1 fredette 1867: int s;
1868: #ifdef PMAPDEBUG
1869: int opmapdebug = pmapdebug;
1870:
1871: /*
1872: * If we're being told to unmap page zero, we can't
1873: * call printf() at all, because doing so would
1874: * lead to an infinite recursion on this call.
1875: * (printf requires page zero to be mapped).
1876: */
1877: if (va == 0)
1878: pmapdebug = 0;
1879: #endif /* PMAPDEBUG */
1880:
1881: PMAP_PRINTF(PDB_KENTER, ("(%p, %x)\n",
1882: (caddr_t)va, (u_int)size));
1883:
1884: size += va;
1885: va = hppa_trunc_page(va);
1886: size -= va;
1887: s = splvm();
1888: for (size = hppa_round_page(size); size;
1889: size -= PAGE_SIZE, va += PAGE_SIZE) {
1890: pv = pmap_pv_find_va(HPPA_SID_KERNEL, va);
1891: if (pv) {
1.3 fredette 1892: KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) != 0);
1.1 fredette 1893: pmap_pv_remove(pv);
1894: } else {
1895: PMAP_PRINTF(PDB_REMOVE, (": no pv for %p\n",
1896: (caddr_t)va));
1897: }
1898: }
1899: splx(s);
1900: #ifdef PMAPDEBUG
1901: pmapdebug = opmapdebug;
1902: #endif /* PMAPDEBUG */
1903: }
1904:
1905: /*
1906: * pmap_redzone(sva, eva, create)
1907: * creates or removes a red zone in already mapped and wired memory,
1908: * from [sva, eva) in the kernel map.
1909: */
1910: void
1911: pmap_redzone(vaddr_t sva, vaddr_t eva, int create)
1912: {
1913: vaddr_t va;
1914: struct pv_entry *pv;
1915: u_int tlbprot;
1916: int s;
1917:
1918: sva = hppa_trunc_page(sva);
1919: tlbprot = (create ? TLB_AR_NA : TLB_AR_KRW);
1920: s = splvm();
1921: for(va = sva; va < eva; va += PAGE_SIZE) {
1922: pv = pmap_pv_find_va(HPPA_SID_KERNEL, va);
1923: KASSERT(pv != NULL);
1924: /*
1925: * Compare new protection with old to see if
1926: * anything needs to be changed.
1927: */
1928: if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot)
1929: pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
1930: }
1931: splx(s);
1932: }
1933:
1934: #if defined(PMAPDEBUG) && defined(DDB)
1935: #include <ddb/db_output.h>
1936: /*
1937: * prints whole va->pa (aka HPT or HVT)
1938: */
1939: void
1.11 chs 1940: pmap_hptdump(void)
1.1 fredette 1941: {
1.10 chs 1942: struct hpt_entry *hpt, *ehpt;
1943: struct pv_entry *pv;
1.1 fredette 1944:
1945: mfctl(CR_HPTMASK, ehpt);
1946: mfctl(CR_VTOP, hpt);
1947: ehpt = (struct hpt_entry *)((int)hpt + (int)ehpt + 1);
1948: db_printf("HPT dump %p-%p:\n", hpt, ehpt);
1949: for (; hpt < ehpt; hpt++)
1950: if (hpt->hpt_valid || hpt->hpt_entry) {
1.19 skrll 1951: char buf[128];
1952:
1953: bitmask_snprintf(hpt->hpt_tlbprot, TLB_BITS, buf,
1954: sizeof(buf));
1955: db_printf("hpt@%p: %x{%sv=%x:%x},%s,%x\n",
1.1 fredette 1956: hpt, *(int *)hpt, (hpt->hpt_valid?"ok,":""),
1957: hpt->hpt_space, hpt->hpt_vpn << 9,
1.19 skrll 1958: buf, tlbptob(hpt->hpt_tlbpage));
1959:
1960: for (pv = hpt->hpt_entry; pv; pv = pv->pv_hash) {
1961: bitmask_snprintf(hpt->hpt_tlbprot, TLB_BITS, buf,
1962: sizeof(buf));
1963: db_printf(" pv={%p,%x:%x,%s,%x}->%p\n",
1.1 fredette 1964: pv->pv_pmap, pv->pv_space, pv->pv_va,
1.19 skrll 1965: buf, tlbptob(pv->pv_tlbpage), pv->pv_hash);
1966: }
1.1 fredette 1967: }
1968: }
1969: #endif
CVSweb <webmaster@jp.NetBSD.org>