Annotation of src/sys/arch/ia64/ia64/pmap.c, Revision 1.28.2.2
1.28.2.2! tls 1: /* $NetBSD$ */
1.2 cherry 2:
3:
4: /*-
5: * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center and by Chris G. Demetriou.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: *
21: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31: * POSSIBILITY OF SUCH DAMAGE.
32: */
33:
34: /*-
35: * Copyright (c) 1991 Regents of the University of California.
36: * All rights reserved.
37: * Copyright (c) 1994 John S. Dyson
38: * All rights reserved.
39: * Copyright (c) 1994 David Greenman
40: * All rights reserved.
41: * Copyright (c) 1998,2000 Doug Rabson
42: * All rights reserved.
43: *
44: * This code is derived from software contributed to Berkeley by
45: * the Systems Programming Group of the University of Utah Computer
46: * Science Department and William Jolitz of UUNET Technologies Inc.
47: *
48: * Redistribution and use in source and binary forms, with or without
49: * modification, are permitted provided that the following conditions
50: * are met:
51: * 1. Redistributions of source code must retain the above copyright
52: * notice, this list of conditions and the following disclaimer.
53: * 2. Redistributions in binary form must reproduce the above copyright
54: * notice, this list of conditions and the following disclaimer in the
55: * documentation and/or other materials provided with the distribution.
56: * 3. All advertising materials mentioning features or use of this software
57: * must display the following acknowledgement:
58: * This product includes software developed by the University of
59: * California, Berkeley and its contributors.
60: * 4. Neither the name of the University nor the names of its contributors
61: * may be used to endorse or promote products derived from this software
62: * without specific prior written permission.
63: *
64: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
65: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
68: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74: * SUCH DAMAGE.
75: *
76: * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
77: * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
78: * with some ideas from NetBSD's alpha pmap
79: */
80:
81: /* __FBSDID("$FreeBSD: src/sys/ia64/ia64/pmap.c,v 1.172 2005/11/20 06:09:48 alc Exp $"); */
82:
83:
84: /* XXX: This module is a mess. Need to clean up Locking, list traversal. etc....... */
1.1 cherry 85:
86: #include <sys/cdefs.h>
87:
1.28.2.2! tls 88: __KERNEL_RCSID(0, "$NetBSD$");
1.1 cherry 89:
90: #include <sys/param.h>
91: #include <sys/systm.h>
92: #include <sys/buf.h>
93: #include <sys/reboot.h>
94: #include <sys/lock.h>
95:
96: #include <uvm/uvm.h>
97:
98: #include <machine/pal.h>
99: #include <machine/atomic.h>
100: #include <machine/pte.h>
101: #include <sys/sched.h>
102: #include <machine/cpufunc.h>
103: #include <machine/md_var.h>
104:
105:
106: /*
107: * Kernel virtual memory management.
108: */
109: static int nkpt;
110: struct ia64_lpte **ia64_kptdir;
111: #define KPTE_DIR_INDEX(va) \
112: ((va >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1))
113: #define KPTE_PTE_INDEX(va) \
114: ((va >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1))
115: #define NKPTEPG (PAGE_SIZE / sizeof(struct ia64_lpte))
116:
117:
118: /* Values for ptc.e. XXX values for SKI. */
119: static uint64_t pmap_ptc_e_base = 0x100000000;
120: static uint64_t pmap_ptc_e_count1 = 3;
121: static uint64_t pmap_ptc_e_count2 = 2;
122: static uint64_t pmap_ptc_e_stride1 = 0x2000;
123: static uint64_t pmap_ptc_e_stride2 = 0x100000000;
1.12 kochi 124: kmutex_t pmap_ptc_lock; /* Global PTC lock */
1.1 cherry 125:
126: /* VHPT Base */
127:
128: vaddr_t vhpt_base;
129: vaddr_t pmap_vhpt_log2size;
130:
131: struct ia64_bucket *pmap_vhpt_bucket;
132: int pmap_vhpt_nbuckets;
1.28.2.2! tls 133: kmutex_t pmap_vhptlock; /* VHPT collision chain lock */
1.1 cherry 134:
135: int pmap_vhpt_inserts;
136: int pmap_vhpt_resident;
137: int pmap_vhpt_collisions;
138:
139: #ifdef DEBUG
140: static void dump_vhpt(void);
141: #endif
142:
143: /*
144: * Data for the RID allocator
145: */
146: static int pmap_ridcount;
147: static int pmap_rididx;
148: static int pmap_ridmapsz;
149: static int pmap_ridmax;
150: static uint64_t *pmap_ridmap;
1.12 kochi 151: kmutex_t pmap_rid_lock; /* RID allocator lock */
1.1 cherry 152:
153:
1.5 thorpej 154: bool pmap_initialized; /* Has pmap_init completed? */
1.1 cherry 155: u_long pmap_pages_stolen; /* instrumentation */
156:
1.14 pooka 157: static struct pmap kernel_pmap_store; /* the kernel's pmap (proc0) */
1.21 kiyohara 158: struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
1.1 cherry 159:
160: static vaddr_t kernel_vm_end; /* VA of last avail page ( end of kernel Address Space ) */
161:
162: /*
163: * This variable contains the number of CPU IDs we need to allocate
164: * space for when allocating the pmap structure. It is used to
165: * size a per-CPU array of ASN and ASN Generation number.
166: */
167: u_long pmap_ncpuids;
168:
169: #ifndef PMAP_PV_LOWAT
170: #define PMAP_PV_LOWAT 16
171: #endif
172: int pmap_pv_lowat = PMAP_PV_LOWAT;
173:
174: /*
175: * PV table management functions.
176: */
177: void *pmap_pv_page_alloc(struct pool *, int);
178: void pmap_pv_page_free(struct pool *, void *);
179:
180: struct pool_allocator pmap_pv_page_allocator = {
181: pmap_pv_page_alloc, pmap_pv_page_free, 0,
182: };
183:
1.5 thorpej 184: bool pmap_poolpage_alloc(paddr_t *);
1.1 cherry 185: void pmap_poolpage_free(paddr_t);
186:
187: /*
188: * List of all pmaps, used to update them when e.g. additional kernel
189: * page tables are allocated. This list is kept LRU-ordered by
190: * pmap_activate(). XXX: Check on this.....
191: */
192: TAILQ_HEAD(, pmap) pmap_all_pmaps;
193:
194: /*
195: * The pools from which pmap structures and sub-structures are allocated.
196: */
197: struct pool pmap_pmap_pool;
198: struct pool pmap_ia64_lpte_pool;
199: struct pool pmap_pv_pool;
200:
1.12 kochi 201: kmutex_t pmap_main_lock;
202: kmutex_t pmap_all_pmaps_slock;
1.1 cherry 203:
204: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1.12 kochi 205: /* XXX(kochi) need to use only spin lock? */
1.1 cherry 206: #define PMAP_MAP_TO_HEAD_LOCK() \
207: spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
208: #define PMAP_MAP_TO_HEAD_UNLOCK() \
209: spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
210: #define PMAP_HEAD_TO_MAP_LOCK() \
211: spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
212: #define PMAP_HEAD_TO_MAP_UNLOCK() \
213: spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
214: #else
215: #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
216: #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
217: #define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
218: #define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
219: #endif /* MULTIPROCESSOR || LOCKDEBUG */
220:
221:
1.28.2.2! tls 222: #define pmap_accessed(lpte) ((lpte)->pte & PTE_ACCESSED)
! 223: #define pmap_dirty(lpte) ((lpte)->pte & PTE_DIRTY)
! 224: #define pmap_managed(lpte) ((lpte)->pte & PTE_MANAGED)
! 225: #define pmap_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK)
! 226: #define pmap_present(lpte) ((lpte)->pte & PTE_PRESENT)
! 227: #define pmap_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56)
! 228: #define pmap_wired(lpte) ((lpte)->pte & PTE_WIRED)
! 229:
! 230: #define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED
! 231: #define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY
! 232: #define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT
! 233: #define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED
1.1 cherry 234:
1.28.2.2! tls 235: #define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED
1.1 cherry 236:
237:
238: /*
239: * The VHPT bucket head structure.
240: */
241: struct ia64_bucket {
1.12 kochi 242: uint64_t chain;
243: kmutex_t lock;
244: u_int length;
1.1 cherry 245: };
246:
247:
248: /* Local Helper functions */
249:
250: static void pmap_invalidate_all(pmap_t);
251: static void pmap_invalidate_page(pmap_t, vaddr_t);
252:
253: static pmap_t pmap_switch(pmap_t pm);
254: static pmap_t pmap_install(pmap_t);
255:
256: static struct ia64_lpte *pmap_find_kpte(vaddr_t);
257:
258: static void
1.5 thorpej 259: pmap_set_pte(struct ia64_lpte *, vaddr_t, vaddr_t, bool, bool);
1.1 cherry 260: static void
261: pmap_free_pte(struct ia64_lpte *pte, vaddr_t va);
262:
263: static __inline void
264: pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot);
265: static int
266: pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vaddr_t va,
267: pv_entry_t pv, int freepte);
268:
269: static struct ia64_lpte *
270: pmap_find_pte(vaddr_t va);
271: static int
272: pmap_remove_entry(pmap_t pmap, struct vm_page * pg, vaddr_t va, pv_entry_t pv);
273: static void
274: pmap_insert_entry(pmap_t pmap, vaddr_t va, struct vm_page *pg);
275:
276: static void
277: pmap_enter_vhpt(struct ia64_lpte *, vaddr_t);
278: static int pmap_remove_vhpt(vaddr_t);
279: static struct ia64_lpte *
280: pmap_find_vhpt(vaddr_t);
281: void
282: pmap_page_purge(struct vm_page * pg);
283: static void
284: pmap_remove_page(pmap_t pmap, vaddr_t va);
285:
286:
1.21 kiyohara 287: static uint32_t pmap_allocate_rid(void);
1.1 cherry 288: static void pmap_free_rid(uint32_t rid);
289:
290: static vaddr_t
291: pmap_steal_vhpt_memory(vsize_t);
292:
293: /*
294: * pmap_steal_memory: [ INTERFACE ]
295: *
296: * Bootstrap memory allocator (alternative to uvm_pageboot_alloc()).
297: * This function allows for early dynamic memory allocation until the
298: * virtual memory system has been bootstrapped. After that point, either
299: * kmem_alloc or malloc should be used. This function works by stealing
300: * pages from the (to be) managed page pool, then implicitly mapping the
301: * pages (by using their RR7 addresses) and zeroing them.
302: *
303: * It may be used once the physical memory segments have been pre-loaded
304: * into the vm_physmem[] array. Early memory allocation MUST use this
305: * interface! This cannot be used after uvm_page_init(), and will
306: * generate a panic if tried.
307: *
308: * Note that this memory will never be freed, and in essence it is wired
309: * down.
310: *
311: * We must adjust *vstartp and/or *vendp iff we use address space
312: * from the kernel virtual address range defined by pmap_virtual_space().
313: *
314: * Note: no locking is necessary in this function.
315: */
316: vaddr_t
317: pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
318: {
319: int lcv, npgs, x;
320: vaddr_t va;
321: paddr_t pa;
322:
323: size = round_page(size);
324: npgs = atop(size);
325:
326: for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1.6 thorpej 327: if (uvm.page_init_done == true)
1.1 cherry 328: panic("pmap_steal_memory: called _after_ bootstrap");
329:
1.26 uebayasi 330: if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start ||
331: VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
1.1 cherry 332: continue;
333:
1.26 uebayasi 334: if ((VM_PHYSMEM_PTR(lcv)->avail_end - VM_PHYSMEM_PTR(lcv)->avail_start)
1.1 cherry 335: < npgs)
336: continue;
337:
338: /*
339: * There are enough pages here; steal them!
340: */
1.26 uebayasi 341: pa = ptoa(VM_PHYSMEM_PTR(lcv)->avail_start);
342: VM_PHYSMEM_PTR(lcv)->avail_start += npgs;
343: VM_PHYSMEM_PTR(lcv)->start += npgs;
1.1 cherry 344:
345: /*
346: * Have we used up this segment?
347: */
1.28.2.2! tls 348: if (VM_PHYSMEM_PTR(lcv)->avail_start ==
! 349: VM_PHYSMEM_PTR(lcv)->end) {
1.1 cherry 350: if (vm_nphysseg == 1)
351: panic("pmap_steal_memory: out of memory!");
352:
353: /* Remove this segment from the list. */
354: vm_nphysseg--;
355: for (x = lcv; x < vm_nphysseg; x++) {
356: /* structure copy */
1.26 uebayasi 357: VM_PHYSMEM_PTR_SWAP(x, x + 1);
1.1 cherry 358: }
359: }
360:
361: va = IA64_PHYS_TO_RR7(pa);
1.7 christos 362: memset((void *)va, 0, size);
1.1 cherry 363: pmap_pages_stolen += npgs;
1.12 kochi 364: return va;
1.1 cherry 365: }
366:
367: /*
368: * If we got here, this was no memory left.
369: */
370: panic("pmap_steal_memory: no memory to steal");
371: }
372:
373: /*
374: * pmap_steal_vhpt_memory: Derived from alpha/pmap.c:pmap_steal_memory()
375: * Note: This function is not visible outside the pmap module.
376: * Based on pmap_steal_memory();
377: * Assumptions: size is always a power of 2.
378: * Returns: Allocated memory at a naturally aligned address
379: */
380: static vaddr_t
381: pmap_steal_vhpt_memory(vsize_t size)
382: {
383: int lcv, npgs, x;
384: vaddr_t va;
385: paddr_t pa;
386: paddr_t vhpt_start = 0, start1, start2, end1, end2;
387:
388: size = round_page(size);
389: npgs = atop(size);
390:
391: for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1.6 thorpej 392: if (uvm.page_init_done == true)
1.1 cherry 393: panic("pmap_vhpt_steal_memory: called _after_ bootstrap");
394:
1.26 uebayasi 395: if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start || /* XXX: ??? */
396: VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
1.1 cherry 397: continue;
398:
399: /* Break off a VHPT sized, aligned chunk off this segment. */
400:
1.26 uebayasi 401: start1 = VM_PHYSMEM_PTR(lcv)->avail_start;
1.1 cherry 402:
403: /* Align requested start address on requested size boundary */
404: end1 = vhpt_start = roundup(start1, npgs);
405:
406: start2 = vhpt_start + npgs;
1.26 uebayasi 407: end2 = VM_PHYSMEM_PTR(lcv)->avail_end;
1.1 cherry 408:
409: /* Case 1: Doesn't fit. skip this segment */
410:
411: if (start2 > end2) {
412: vhpt_start = 0;
413: continue;
414: }
415:
416: /* For all cases of fit:
417: * - Remove segment.
418: * - Re-insert fragments via uvm_page_physload();
419: */
420:
1.28.2.2! tls 421: /*
1.1 cherry 422: * We _fail_ on a vhpt request which exhausts memory.
423: */
424: if (start1 == end1 &&
425: start2 == end2 &&
426: vm_nphysseg == 1) {
1.28.2.2! tls 427: #ifdef DEBUG
! 428: printf("pmap_vhpt_steal_memory: out of memory!");
1.1 cherry 429: #endif
1.28.2.2! tls 430: return -1;
! 431: }
1.1 cherry 432:
433: /* Remove this segment from the list. */
434: vm_nphysseg--;
1.28.2.2! tls 435: for (x = lcv; x < vm_nphysseg; x++)
1.1 cherry 436: /* structure copy */
1.26 uebayasi 437: VM_PHYSMEM_PTR_SWAP(x, x + 1);
1.1 cherry 438:
439: /* Case 2: Perfect fit - skip segment reload. */
440:
441: if (start1 == end1 && start2 == end2) break;
442:
1.28.2.2! tls 443: /* Case 3: Left unfit - reload it.
! 444: */
1.1 cherry 445:
1.28.2.2! tls 446: if (start1 != end1)
1.1 cherry 447: uvm_page_physload(start1, end1, start1, end1,
448: VM_FREELIST_DEFAULT);
1.28.2.2! tls 449:
1.1 cherry 450: /* Case 4: Right unfit - reload it. */
451:
1.28.2.2! tls 452: if (start2 != end2)
1.1 cherry 453: uvm_page_physload(start2, end2, start2, end2,
454: VM_FREELIST_DEFAULT);
455:
456: /* Case 5: Both unfit - Redundant, isn't it ? */
457: break;
458: }
459:
460: /*
461: * If we got here, we couldn't find a fit.
462: */
463: if (vhpt_start == 0) {
464: #ifdef DEBUG
465: printf("pmap_steal_vhpt_memory: no VHPT aligned fit found.");
466: #endif
467: return -1;
468: }
469:
470: /*
471: * There are enough pages here; steal them!
472: */
473: pa = ptoa(vhpt_start);
474: va = IA64_PHYS_TO_RR7(pa);
1.7 christos 475: memset((void *)va, 0, size);
1.1 cherry 476: pmap_pages_stolen += npgs;
1.12 kochi 477: return va;
1.1 cherry 478: }
479:
480: /*
481: * pmap_bootstrap:
482: *
483: * Bootstrap the system to run with virtual memory.
484: *
485: * Note: no locking is necessary in this function.
486: */
487: void
1.17 cegger 488: pmap_bootstrap(void)
1.1 cherry 489: {
490: struct ia64_pal_result res;
491: vaddr_t base, limit;
492: size_t size;
493: vsize_t bufsz;
494:
495: int i, ridbits;
496:
497: /*
498: * Query the PAL Code to find the loop parameters for the
499: * ptc.e instruction.
500: */
501: res = ia64_call_pal_static(PAL_PTCE_INFO, 0, 0, 0);
502: if (res.pal_status != 0)
503: panic("Can't configure ptc.e parameters");
504: pmap_ptc_e_base = res.pal_result[0];
505: pmap_ptc_e_count1 = res.pal_result[1] >> 32;
506: pmap_ptc_e_count2 = res.pal_result[1] & ((1L<<32) - 1);
507: pmap_ptc_e_stride1 = res.pal_result[2] >> 32;
508: pmap_ptc_e_stride2 = res.pal_result[2] & ((1L<<32) - 1);
509: if (bootverbose)
510: printf("ptc.e base=0x%lx, count1=%ld, count2=%ld, "
511: "stride1=0x%lx, stride2=0x%lx\n",
512: pmap_ptc_e_base,
513: pmap_ptc_e_count1,
514: pmap_ptc_e_count2,
515: pmap_ptc_e_stride1,
516: pmap_ptc_e_stride2);
1.21 kiyohara 517: mutex_init(&pmap_ptc_lock, MUTEX_DEFAULT, IPL_VM);
1.1 cherry 518:
519: /*
520: * Setup RIDs. RIDs 0..7 are reserved for the kernel.
521: *
522: * We currently need at least 19 bits in the RID because PID_MAX
523: * can only be encoded in 17 bits and we need RIDs for 5 regions
524: * per process. With PID_MAX equalling 99999 this means that we
525: * need to be able to encode 499995 (=5*PID_MAX).
526: * The Itanium processor only has 18 bits and the architected
527: * minimum is exactly that. So, we cannot use a PID based scheme
528: * in those cases. Enter pmap_ridmap...
529: * We should avoid the map when running on a processor that has
530: * implemented enough bits. This means that we should pass the
531: * process/thread ID to pmap. This we currently don't do, so we
532: * use the map anyway. However, we don't want to allocate a map
533: * that is large enough to cover the range dictated by the number
534: * of bits in the RID, because that may result in a RID map of
535: * 2MB in size for a 24-bit RID. A 64KB map is enough.
536: * The bottomline: we create a 32KB map when the processor only
537: * implements 18 bits (or when we can't figure it out). Otherwise
538: * we create a 64KB map.
539: */
540: res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0);
541: if (res.pal_status != 0) {
542: if (bootverbose)
543: printf("Can't read VM Summary - assuming 18 Region ID bits\n");
544: ridbits = 18; /* guaranteed minimum */
545: } else {
546: ridbits = (res.pal_result[1] >> 8) & 0xff;
547: if (bootverbose)
548: printf("Processor supports %d Region ID bits\n",
549: ridbits);
550: }
551: if (ridbits > 19)
552: ridbits = 19;
553:
554: pmap_ridmax = (1 << ridbits);
555: pmap_ridmapsz = pmap_ridmax / 64;
556: pmap_ridmap = (uint64_t *)uvm_pageboot_alloc(pmap_ridmax / 8);
557: pmap_ridmap[0] |= 0xff;
558: pmap_rididx = 0;
559: pmap_ridcount = 8;
560:
561: /* XXX: The FreeBSD pmap.c defines initialises this like this:
562: * mtx_init(&pmap_ridmutex, "RID allocator lock", NULL, MTX_DEF);
563: * MTX_DEF can *sleep*.
564: */
1.21 kiyohara 565: mutex_init(&pmap_rid_lock, MUTEX_DEFAULT, IPL_VM);
1.1 cherry 566:
567: /*
568: * Compute the number of pages kmem_map will have.
569: */
570: kmeminit_nkmempages();
571:
572: /*
573: * Figure out how many initial PTE's are necessary to map the
574: * kernel. We also reserve space for kmem_alloc_pageable()
575: * for vm_fork().
576: */
577:
578: /* Get size of buffer cache and set an upper limit */
579: bufsz = buf_memcalc();
580: buf_setvalimit(bufsz);
581:
1.21 kiyohara 582: nkpt = (((ubc_nwins << ubc_winshift) + uvm_emap_size +
1.11 yamt 583: bufsz + 16 * NCARGS + pager_map_size) / PAGE_SIZE +
1.1 cherry 584: USRIOSIZE + (maxproc * UPAGES) + nkmempages) / NKPTEPG;
585:
586: /*
587: * Allocate some memory for initial kernel 'page tables'.
588: */
589: ia64_kptdir = (void *)uvm_pageboot_alloc((nkpt + 1) * PAGE_SIZE);
1.28.2.2! tls 590: for (i = 0; i < nkpt; i++)
! 591: ia64_kptdir[i] =
! 592: (void*)((vaddr_t)ia64_kptdir + PAGE_SIZE * (i + 1));
1.1 cherry 593:
594: kernel_vm_end = nkpt * PAGE_SIZE * NKPTEPG + VM_MIN_KERNEL_ADDRESS -
1.28.2.2! tls 595: VM_GATEWAY_SIZE;
! 596:
1.1 cherry 597: /*
598: * Initialize the pmap pools and list.
599: */
600: pmap_ncpuids = pmap_ridmax;
1.8 ad 601: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
602: &pool_allocator_nointr, IPL_NONE); /* This may block. */
1.1 cherry 603:
604: /* XXX: Need to convert ia64_kptdir[][] to a pool. ????*/
605:
1.28.2.2! tls 606: /* The default pool allocator uses uvm_km_alloc & friends.
! 607: * XXX: We should be using regular vm_alloced mem for regular,
! 608: * non-kernel ptesl
1.1 cherry 609: */
610:
1.8 ad 611: pool_init(&pmap_ia64_lpte_pool, sizeof (struct ia64_lpte),
1.28.2.2! tls 612: sizeof(void *), 0, 0, "ptpl", NULL, IPL_NONE);
1.1 cherry 613:
1.8 ad 614: pool_init(&pmap_pv_pool, sizeof (struct pv_entry), sizeof(void *),
615: 0, 0, "pvpl", &pmap_pv_page_allocator, IPL_NONE);
1.1 cherry 616:
617: TAILQ_INIT(&pmap_all_pmaps);
618:
619: /*
620: * Figure out a useful size for the VHPT, based on the size of
621: * physical memory and try to locate a region which is large
622: * enough to contain the VHPT (which must be a power of two in
623: * size and aligned to a natural boundary).
624: * We silently bump up the VHPT size to the minimum size if the
625: * user has set the tunable too small. Likewise, the VHPT size
626: * is silently capped to the maximum allowed.
627: */
628:
629: pmap_vhpt_log2size = PMAP_VHPT_LOG2SIZE;
630:
631: if (pmap_vhpt_log2size == 0) {
632: pmap_vhpt_log2size = 15;
633: size = 1UL << pmap_vhpt_log2size;
634: while (size < physmem * 32) {
635: pmap_vhpt_log2size++;
636: size <<= 1;
1.28.2.2! tls 637: }
! 638: } else
! 639: if (pmap_vhpt_log2size < 15)
! 640: pmap_vhpt_log2size = 15;
1.1 cherry 641:
642: if (pmap_vhpt_log2size > 61) pmap_vhpt_log2size = 61;
643:
644: vhpt_base = 0;
645: base = limit = 0;
646: size = 1UL << pmap_vhpt_log2size;
647: while (vhpt_base == 0 && size) {
648: if (bootverbose)
649: printf("Trying VHPT size 0x%lx\n", size);
650:
651: /* allocate size bytes aligned at size */
652: /* #ifdef MULTIPROCESSOR, then (size * MAXCPU) bytes */
1.28.2.2! tls 653: base = pmap_steal_vhpt_memory(size);
1.1 cherry 654:
655: if (!base) {
656: /* Can't fit, try next smaller size. */
657: pmap_vhpt_log2size--;
658: size >>= 1;
659: } else
660: vhpt_base = IA64_PHYS_TO_RR7(base);
661: }
662: if (pmap_vhpt_log2size < 15)
663: panic("Can't find space for VHPT");
664:
665: if (bootverbose)
666: printf("Putting VHPT at 0x%lx\n", base);
667:
1.21 kiyohara 668: mutex_init(&pmap_vhptlock, MUTEX_DEFAULT, IPL_VM);
1.1 cherry 669:
670: __asm __volatile("mov cr.pta=%0;; srlz.i;;" ::
671: "r" (vhpt_base + (1<<8) + (pmap_vhpt_log2size<<2) + 1));
672:
673: #ifdef DEBUG
674: dump_vhpt();
675: #endif
676:
677: /*
678: * Initialise vhpt pte entries.
679: */
680:
681: pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
682:
683: pmap_vhpt_bucket = (void *)uvm_pageboot_alloc(pmap_vhpt_nbuckets *
684: sizeof(struct ia64_bucket));
685:
686: struct ia64_lpte *pte;
687:
688: pte = (struct ia64_lpte *)vhpt_base;
689: for (i = 0; i < pmap_vhpt_nbuckets; i++) {
690: pte[i].pte = 0;
691: pte[i].itir = 0;
692: pte[i].tag = 1UL << 63; /* Invalid tag */
693: pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i);
694: /* Stolen memory is zeroed! */
1.28.2.2! tls 695: mutex_init(&pmap_vhpt_bucket[i].lock, MUTEX_DEFAULT, IPL_VM);
1.1 cherry 696: }
697:
698: /*
699: * Initialize the locks.
700: */
1.21 kiyohara 701: mutex_init(&pmap_main_lock, MUTEX_DEFAULT, IPL_VM);
702: mutex_init(&pmap_all_pmaps_slock, MUTEX_DEFAULT, IPL_VM);
1.1 cherry 703:
704: /*
705: * Initialize the kernel pmap (which is statically allocated).
706: */
707: memset(pmap_kernel(), 0, sizeof(struct pmap));
708:
1.21 kiyohara 709: mutex_init(&pmap_kernel()->pm_slock, MUTEX_DEFAULT, IPL_VM);
1.1 cherry 710: for (i = 0; i < 5; i++)
711: pmap_kernel()->pm_rid[i] = 0;
712: pmap_kernel()->pm_active = 1;
713: TAILQ_INIT(&pmap_kernel()->pm_pvlist);
1.28.2.2! tls 714:
1.1 cherry 715: TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
716:
717: /*
718: * Region 5 is mapped via the vhpt.
719: */
1.28.2.2! tls 720: ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1);
1.1 cherry 721:
722: /*
723: * Region 6 is direct mapped UC and region 7 is direct mapped
724: * WC. The details of this is controlled by the Alt {I,D}TLB
1.28.2.2! tls 725: * handlers. Here we just make sure that they have the largest
1.1 cherry 726: * possible page size to minimise TLB usage.
727: */
728: ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (IA64_ID_PAGE_SHIFT << 2));
729: ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (IA64_ID_PAGE_SHIFT << 2));
1.21 kiyohara 730: ia64_srlz_d();
1.1 cherry 731:
732: /*
733: * Clear out any random TLB entries left over from booting.
734: */
1.21 kiyohara 735: pmap_invalidate_all(pmap_kernel());
1.1 cherry 736:
737: map_gateway_page();
738: }
739:
740: /*
741: * pmap_init: [ INTERFACE ]
742: *
743: * Initialize the pmap module. Called by vm_init(), to initialize any
744: * structures that the pmap system needs to map virtual memory.
745: *
746: * Note: no locking is necessary in this function.
747: */
748: void
749: pmap_init(void)
750: {
751:
752: /*
753: * Set a low water mark on the pv_entry pool, so that we are
754: * more likely to have these around even in extreme memory
755: * starvation.
756: */
757: pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
758:
759: /*
760: * Now it is safe to enable pv entry recording.
761: */
1.6 thorpej 762: pmap_initialized = true;
1.1 cherry 763:
764: }
765:
766: /*
767: * vtophys: virtual address to physical address. For use by
768: * machine-dependent code only.
769: */
770:
771: paddr_t
1.16 dsl 772: vtophys(vaddr_t va)
1.1 cherry 773: {
774: paddr_t pa;
775:
1.6 thorpej 776: if (pmap_extract(pmap_kernel(), va, &pa) == true)
1.12 kochi 777: return pa;
778: return 0;
1.1 cherry 779: }
780:
781: /*
782: * pmap_virtual_space: [ INTERFACE ]
783: *
784: * Define the initial bounds of the kernel virtual address space.
785: */
786: void
787: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
788: {
1.28.2.2! tls 789:
! 790: *vstartp = VM_MIN_KERNEL_ADDRESS;
! 791: *vendp = VM_MAX_KERNEL_ADDRESS;
1.1 cherry 792: }
793:
794: /*
795: * pmap_remove_all: [ INTERFACE ]
796: *
797: * This function is a hint to the pmap implementation that all
798: * entries in pmap will be removed before any more entries are
799: * entered.
800: */
801: void
802: pmap_remove_all(pmap_t pmap)
803: {
804: /* Nothing Yet */
805: }
806:
807: /*
808: * pmap_remove: [ INTERFACE ]
809: *
810: * Remove the given range of addresses from the specified map.
811: *
812: * It is assumed that the start and end are properly
813: * rounded to the page size.
814: */
815: void
816: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
817: {
1.28.2.2! tls 818: pmap_t oldpmap;
! 819: vaddr_t va;
! 820: pv_entry_t pv;
! 821: struct ia64_lpte *pte;
! 822:
! 823: if (pmap->pm_stats.resident_count == 0)
! 824: return;
! 825:
! 826: PMAP_MAP_TO_HEAD_LOCK();
! 827: PMAP_LOCK(pmap);
! 828: oldpmap = pmap_install(pmap);
! 829:
! 830: /*
! 831: * special handling of removing one page. a very
! 832: * common operation and easy to short circuit some
! 833: * code.
! 834: */
! 835: if (sva + PAGE_SIZE == eva) {
! 836: pmap_remove_page(pmap, sva);
! 837: goto out;
! 838: }
! 839:
! 840: if (pmap->pm_stats.resident_count < ((eva - sva) >> PAGE_SHIFT)) {
! 841: TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
! 842: va = pv->pv_va;
! 843: if (va >= sva && va < eva) {
! 844: pte = pmap_find_vhpt(va);
! 845: KASSERT(pte != NULL);
! 846: pmap_remove_pte(pmap, pte, va, pv, 1);
1.1 cherry 847: pmap_invalidate_page(pmap, va);
1.28.2.2! tls 848: }
! 849: }
1.1 cherry 850:
1.28.2.2! tls 851: } else {
! 852: for (va = sva; va < eva; va += PAGE_SIZE) {
! 853: pte = pmap_find_vhpt(va);
! 854: if (pte != NULL) {
! 855: pmap_remove_pte(pmap, pte, va, 0, 1);
1.1 cherry 856: pmap_invalidate_page(pmap, va);
857: }
858: }
859: }
1.28.2.2! tls 860:
1.1 cherry 861: out:
1.28.2.2! tls 862: pmap_install(oldpmap);
! 863: PMAP_UNLOCK(pmap);
1.1 cherry 864: PMAP_MAP_TO_HEAD_UNLOCK();
865:
866: }
867:
868: /*
869: * pmap_zero_page: [ INTERFACE ]
870: *
871: * Zero the specified (machine independent) page by mapping the page
872: * into virtual memory and clear its contents, one machine dependent
873: * page at a time.
874: *
875: * Note: no locking is necessary in this function.
876: */
877: void
878: pmap_zero_page(paddr_t phys)
879: {
880: vaddr_t va = IA64_PHYS_TO_RR7(phys);
1.28.2.2! tls 881:
1.18 cegger 882: memset((void *) va, 0, PAGE_SIZE);
1.1 cherry 883: }
884:
885: /*
886: * pmap_copy_page: [ INTERFACE ]
887: *
888: * Copy the specified (machine independent) page by mapping the page
889: * into virtual memory and using memcpy to copy the page, one machine
890: * dependent page at a time.
891: *
892: * Note: no locking is necessary in this function.
893: */
894: void
895: pmap_copy_page(paddr_t psrc, paddr_t pdst)
896: {
897: vaddr_t vsrc = IA64_PHYS_TO_RR7(psrc);
898: vaddr_t vdst = IA64_PHYS_TO_RR7(pdst);
1.28.2.2! tls 899:
1.23 cegger 900: memcpy((void *) vdst, (void *) vsrc, PAGE_SIZE);
1.1 cherry 901: }
902:
903: /*
904: * pmap_unwire: [ INTERFACE ]
905: *
906: * Clear the wired attribute for a map/virtual-address pair.
907: *
908: * The mapping must already exist in the pmap.
909: */
910: void
911: pmap_unwire(pmap_t pmap, vaddr_t va)
912: {
913: pmap_t oldpmap;
914: struct ia64_lpte *pte;
915:
916: if (pmap == NULL)
917: return;
918:
919: PMAP_LOCK(pmap);
920: oldpmap = pmap_install(pmap);
921:
922: pte = pmap_find_vhpt(va);
923:
924: KASSERT(pte != NULL);
925:
926: /*
927: * If wiring actually changed (always?) clear the wire bit and
928: * update the wire count. Note that wiring is not a hardware
929: * characteristic so there is no need to invalidate the TLB.
930: */
931:
932: if (pmap_wired(pte)) {
933: pmap->pm_stats.wired_count--;
934: pmap_clear_wired(pte);
935: }
936: #ifdef DIAGNOSTIC
937: else {
938: printf("pmap_unwire: wiring for pmap %p va 0x%lx "
939: "didn't change!\n", pmap, va);
940: }
941: #endif
942: pmap_install(oldpmap);
943: PMAP_UNLOCK(pmap);
944: }
945:
946: /*
947: * pmap_kenter_pa: [ INTERFACE ]
948: *
949: * Enter a va -> pa mapping into the kernel pmap without any
950: * physical->virtual tracking.
951: *
952: * Note: no locking is necessary in this function.
953: */
954: void
1.24 cegger 955: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 cherry 956: {
1.28.2.2! tls 957: struct ia64_lpte *pte;
1.1 cherry 958:
1.28.2.2! tls 959: pte = pmap_find_kpte(va);
! 960: if (pmap_present(pte))
! 961: pmap_invalidate_page(pmap_kernel(), va);
! 962: else
! 963: pmap_enter_vhpt(pte, va);
! 964: pmap_pte_prot(pmap_kernel(), pte, prot);
! 965: pmap_set_pte(pte, va, pa, false, false);
1.1 cherry 966: }
967:
968: /*
969: * pmap_kremove: [ INTERFACE ]
970: *
971: * Remove a mapping entered with pmap_kenter_pa() starting at va,
972: * for size bytes (assumed to be page rounded).
973: */
974: void
975: pmap_kremove(vaddr_t va, vsize_t size)
976: {
1.28.2.2! tls 977: struct ia64_lpte *pte;
1.1 cherry 978:
1.28.2.2! tls 979: while (size > 0) {
! 980: pte = pmap_find_kpte(va);
! 981: if (pmap_present(pte)) {
! 982: pmap_remove_vhpt(va);
! 983: pmap_invalidate_page(pmap_kernel(), va);
! 984: pmap_clear_present(pte);
! 985: }
! 986: va += PAGE_SIZE;
! 987: size -= PAGE_SIZE;
! 988: }
1.1 cherry 989: }
990:
991: /*
992: * pmap_create: [ INTERFACE ]
993: *
994: * Create and return a physical map.
995: *
996: * Note: no locking is necessary in this function.
997: */
998: pmap_t
999: pmap_create(void)
1000: {
1001: pmap_t pmap;
1002: int i;
1003:
1004: #ifdef DEBUG
1.28.2.2! tls 1005: printf("pmap_create()\n");
1.1 cherry 1006: #endif
1007:
1008: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1009: memset(pmap, 0, sizeof(*pmap));
1010:
1.28.2.2! tls 1011: for (i = 0; i < 5; i++)
! 1012: pmap->pm_rid[i] = pmap_allocate_rid();
! 1013: pmap->pm_active = 0;
! 1014: TAILQ_INIT(&pmap->pm_pvlist);
! 1015: memset(&pmap->pm_stats, 0, sizeof (pmap->pm_stats) );
1.1 cherry 1016:
1.21 kiyohara 1017: mutex_init(&pmap->pm_slock, MUTEX_DEFAULT, IPL_VM);
1.1 cherry 1018:
1.12 kochi 1019: mutex_enter(&pmap_all_pmaps_slock);
1.1 cherry 1020: TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap, pm_list);
1.12 kochi 1021: mutex_exit(&pmap_all_pmaps_slock);
1.1 cherry 1022:
1.12 kochi 1023: return pmap;
1.1 cherry 1024: }
1025:
1026: /*
1027: * pmap_destroy: [ INTERFACE ]
1028: *
1029: * Drop the reference count on the specified pmap, releasing
1030: * all resources if the reference count drops to zero.
1031: */
1032: void
1033: pmap_destroy(pmap_t pmap)
1034: {
1035: int i;
1036:
1037: #ifdef DEBUG
1.28.2.2! tls 1038: printf("pmap_destroy(%p)\n", pmap);
1.1 cherry 1039: #endif
1040:
1.28.2.2! tls 1041: for (i = 0; i < 5; i++)
! 1042: if (pmap->pm_rid[i])
! 1043: pmap_free_rid(pmap->pm_rid[i]);
1.1 cherry 1044: /*
1045: * Remove it from the global list of all pmaps.
1046: */
1.12 kochi 1047: mutex_enter(&pmap_all_pmaps_slock);
1.1 cherry 1048: TAILQ_REMOVE(&pmap_all_pmaps, pmap, pm_list);
1.12 kochi 1049: mutex_exit(&pmap_all_pmaps_slock);
1.1 cherry 1050:
1051: pool_put(&pmap_pmap_pool, pmap);
1052:
1053: }
1054:
1055: /*
1056: * pmap_activate: [ INTERFACE ]
1057: *
1058: * Activate the pmap used by the specified process. This includes
1059: * reloading the MMU context if the current process, and marking
1060: * the pmap in use by the processor.
1061: *
1062: * Note: We may use only spin locks here, since we are called
1063: * by a critical section in cpu_switch()!
1064: */
1065: void
1066: pmap_activate(struct lwp *l)
1067: {
1.28.2.2! tls 1068:
1.1 cherry 1069: pmap_install(vm_map_pmap(&l->l_proc->p_vmspace->vm_map));
1070: }
1071:
1072: /*
1073: * pmap_deactivate: [ INTERFACE ]
1074: *
1075: * Mark that the pmap used by the specified process is no longer
1076: * in use by the processor.
1077: *
1078: */
1079:
1080: void
1081: pmap_deactivate(struct lwp *l)
1082: {
1083: }
1084:
1085: /*
1086: * pmap_protect: [ INTERFACE ]
1087: *
1088: * Set the physical protection on the specified range of this map
1089: * as requested.
1090: */
1091: /*
1092: * Set the physical protection on the
1093: * specified range of this map as requested.
1094: */
1095: void
1096: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1097: {
1098: pmap_t oldpmap;
1099: struct ia64_lpte *pte;
1100: vaddr_t pa;
1101: struct vm_page *pg;
1102:
1103: if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1104: pmap_remove(pmap, sva, eva);
1105: return;
1106: }
1107:
1108: if (prot & VM_PROT_WRITE)
1109: return;
1110:
1111: if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
1112: panic("pmap_protect: unaligned addresses");
1113:
1114: PMAP_LOCK(pmap);
1115: oldpmap = pmap_install(pmap);
1116: while (sva < eva) {
1.28.2.2! tls 1117: /*
1.1 cherry 1118: * If page is invalid, skip this page
1119: */
1120: pte = pmap_find_vhpt(sva);
1121: if (pte == NULL) {
1122: sva += PAGE_SIZE;
1123: continue;
1124: }
1125:
1126: if (pmap_prot(pte) != prot) {
1127: if (pmap_managed(pte)) {
1128: pa = pmap_ppn(pte);
1129: pg = PHYS_TO_VM_PAGE(pa);
1.28.2.1 tls 1130: if (pmap_dirty(pte))
1131: pmap_clear_dirty(pte);
1.1 cherry 1132: if (pmap_accessed(pte)) {
1133: pmap_clear_accessed(pte);
1134: }
1135: }
1136: pmap_pte_prot(pmap, pte, prot);
1137: pmap_invalidate_page(pmap, sva);
1138: }
1139:
1140: sva += PAGE_SIZE;
1141: }
1142: pmap_install(oldpmap);
1143: PMAP_UNLOCK(pmap);
1144: }
1145:
1146: /*
1147: * pmap_extract: [ INTERFACE ]
1148: *
1149: * Extract the physical address associated with the given
1150: * pmap/virtual address pair.
1151: */
1.5 thorpej 1152: bool
1.1 cherry 1153: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1154: {
1.28.2.2! tls 1155: struct ia64_lpte *pte;
! 1156: pmap_t oldpmap;
! 1157: paddr_t pa;
! 1158:
! 1159: pa = 0;
! 1160: mutex_enter(&pmap->pm_slock);
! 1161: oldpmap = pmap_install(pmap); /*XXX: isn't this a little inefficient ?*/
! 1162: pte = pmap_find_vhpt(va);
! 1163: if (pte != NULL && pmap_present(pte)) {
! 1164: if (pap != NULL)
! 1165: *pap = pmap_ppn(pte);
! 1166: } else {
! 1167: mutex_exit(&pmap->pm_slock);
! 1168: return false;
1.21 kiyohara 1169: }
1.28.2.2! tls 1170: pmap_install(oldpmap);
! 1171: mutex_exit(&pmap->pm_slock);
! 1172: return true;
1.1 cherry 1173:
1174: }
1175:
1176: /*
1177: * pmap_clear_modify: [ INTERFACE ]
1178: *
1179: * Clear the modify bits on the specified physical page.
1180: */
1.5 thorpej 1181: bool
1.1 cherry 1182: pmap_clear_modify(struct vm_page *pg)
1183: {
1.25 uebayasi 1184: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.6 thorpej 1185: bool rv = false;
1.1 cherry 1186: struct ia64_lpte *pte;
1187: pmap_t oldpmap;
1188: pv_entry_t pv;
1189:
1.25 uebayasi 1190: TAILQ_FOREACH(pv, &md->pv_list, pv_list) {
1.1 cherry 1191: PMAP_LOCK(pv->pv_pmap);
1192: oldpmap = pmap_install(pv->pv_pmap);
1193: pte = pmap_find_vhpt(pv->pv_va);
1194: KASSERT(pte != NULL);
1195: if (pmap_dirty(pte)) {
1.6 thorpej 1196: rv = true;
1.1 cherry 1197: pmap_clear_dirty(pte);
1198: pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1199: }
1200: pmap_install(oldpmap);
1201: PMAP_UNLOCK(pv->pv_pmap);
1202: }
1203: return rv;
1204: }
1205:
1206: /*
1207: * pmap_page_protect: [ INTERFACE ]
1208: *
1209: * Lower the permission for all mappings to a given page to
1210: * the permissions specified.
1211: */
1212: void
1213: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1214: {
1.25 uebayasi 1215: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.28.2.2! tls 1216: struct ia64_lpte *pte;
! 1217: pmap_t oldpmap, pmap;
! 1218: pv_entry_t pv;
! 1219:
! 1220: if ((prot & VM_PROT_WRITE) != 0)
! 1221: return;
! 1222: if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
! 1223: if (pg->flags & PG_RDONLY)
! 1224: return;
! 1225: TAILQ_FOREACH(pv, &md->pv_list, pv_list) {
! 1226: pmap = pv->pv_pmap;
! 1227: PMAP_LOCK(pmap);
! 1228: oldpmap = pmap_install(pmap);
! 1229: pte = pmap_find_vhpt(pv->pv_va);
! 1230: KASSERT(pte != NULL);
! 1231: pmap_pte_prot(pmap, pte, prot);
! 1232: pmap_invalidate_page(pmap, pv->pv_va);
! 1233: pmap_install(oldpmap);
! 1234: PMAP_UNLOCK(pmap);
! 1235: }
! 1236:
! 1237: pg->flags |= PG_RDONLY;
! 1238: } else
! 1239: pmap_page_purge(pg);
1.1 cherry 1240: }
1241:
1242: /*
1243: * pmap_reference: [ INTERFACE ]
1244: *
1245: * Add a reference to the specified pmap.
1246: */
1247: void
1248: pmap_reference(pmap_t pmap)
1249: {
1250:
1251: #ifdef DEBUG
1.21 kiyohara 1252: printf("pmap_reference(%p)\n", pmap);
1.1 cherry 1253: #endif
1254:
1255: PMAP_LOCK(pmap);
1256: pmap->pm_count++;
1257: PMAP_UNLOCK(pmap);
1258: }
1259:
1260: /*
1261: * pmap_clear_reference: [ INTERFACE ]
1262: *
1263: * Clear the reference bit on the specified physical page.
1264: */
1.5 thorpej 1265: bool
1.1 cherry 1266: pmap_clear_reference(struct vm_page *pg)
1267: {
1.28.2.2! tls 1268:
1.12 kochi 1269: return false;
1.1 cherry 1270: }
1271:
1272: /*
1273: * pmap_phys_address: [ INTERFACE ]
1274: *
1275: * Return the physical address corresponding to the specified
1276: * cookie. Used by the device pager to decode a device driver's
1277: * mmap entry point return value.
1278: *
1279: * Note: no locking is necessary in this function.
1280: */
1281: paddr_t
1.9 macallan 1282: pmap_phys_address(paddr_t ppn)
1.1 cherry 1283: {
1284:
1.12 kochi 1285: return ia64_ptob(ppn);
1.1 cherry 1286: }
1287:
1288: /*
1289: * pmap_enter: [ INTERFACE ]
1290: *
1291: * Insert the given physical page (p) at
1292: * the specified virtual address (v) in the
1293: * target physical map with the protection requested.
1294: *
1295: * If specified, the page will be wired down, meaning
1296: * that the related pte can not be reclaimed.
1297: *
1298: * Note: This is the only routine which MAY NOT lazy-evaluate
1299: * or lose information. That is, this routine must actually
1300: * insert this page into the given map NOW.
1301: */
1302: int
1.20 cegger 1303: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 cherry 1304: {
1.28.2.2! tls 1305: pmap_t oldpmap;
! 1306: vaddr_t opa;
! 1307: struct ia64_lpte origpte;
! 1308: struct ia64_lpte *pte;
! 1309: bool managed, wired;
1.1 cherry 1310: struct vm_page *pg;
1311: int error = 0;
1312:
1.28.2.2! tls 1313: PMAP_MAP_TO_HEAD_LOCK();
! 1314: PMAP_LOCK(pmap);
! 1315: oldpmap = pmap_install(pmap);
! 1316:
! 1317: va &= ~PAGE_MASK;
1.1 cherry 1318:
1.28.2.2! tls 1319: managed = false;
1.1 cherry 1320:
1321: wired = (flags & PMAP_WIRED) !=0;
1322:
1323: pg = PHYS_TO_VM_PAGE(pa);
1324:
1.28.2.2! tls 1325: #ifdef DIAGNOSTIC
! 1326: if (va > VM_MAX_KERNEL_ADDRESS)
! 1327: panic("pmap_enter: toobig");
! 1328: #endif
1.1 cherry 1329:
1.28.2.2! tls 1330: /*
! 1331: * Find (or create) a pte for the given mapping.
! 1332: */
! 1333: while ((pte = pmap_find_pte(va)) == NULL) {
! 1334: pmap_install(oldpmap);
! 1335: PMAP_UNLOCK(pmap);
! 1336: PMAP_MAP_TO_HEAD_UNLOCK();
! 1337: uvm_kick_pdaemon();
! 1338: PMAP_MAP_TO_HEAD_LOCK();
! 1339: PMAP_LOCK(pmap);
! 1340: oldpmap = pmap_install(pmap);
! 1341: }
! 1342: origpte = *pte;
! 1343: if (!pmap_present(pte)) {
! 1344: opa = ~0UL;
! 1345: pmap_enter_vhpt(pte, va);
! 1346: } else
! 1347: opa = pmap_ppn(pte);
1.1 cherry 1348:
1.28.2.2! tls 1349: /*
! 1350: * Mapping has not changed, must be protection or wiring change.
! 1351: */
! 1352: if (opa == pa) {
! 1353: /*
! 1354: * Wiring change, just update stats. We don't worry about
! 1355: * wiring PT pages as they remain resident as long as there
! 1356: * are valid mappings in them. Hence, if a user page is wired,
! 1357: * the PT page will be also.
! 1358: */
! 1359: if (wired && !pmap_wired(&origpte))
! 1360: pmap->pm_stats.wired_count++;
! 1361: else if (!wired && pmap_wired(&origpte))
! 1362: pmap->pm_stats.wired_count--;
! 1363:
! 1364: managed = (pmap_managed(&origpte)) ? true : false;
! 1365:
! 1366:
! 1367: /*
! 1368: * We might be turning off write access to the page,
! 1369: * so we go ahead and sense modify status.
! 1370: */
! 1371: if (managed && pmap_dirty(&origpte))
1.1 cherry 1372: pg->flags &= ~PG_CLEAN;
1373:
1.28.2.2! tls 1374: pmap_invalidate_page(pmap, va);
! 1375: goto validate;
! 1376: }
! 1377:
! 1378: /*
! 1379: * Mapping has changed, invalidate old range and fall
! 1380: * through to handle validating new mapping.
! 1381: */
! 1382: if (opa != ~0UL) {
! 1383: pmap_remove_pte(pmap, pte, va, 0, 0);
! 1384: pmap_enter_vhpt(pte, va);
! 1385: }
! 1386:
! 1387: /*
! 1388: * Enter on the PV list if part of our managed memory.
! 1389: */
! 1390:
! 1391: if (pg != NULL) {
! 1392: pmap_insert_entry(pmap, va, pg);
! 1393: managed = true;
! 1394: }
! 1395:
! 1396: /*
! 1397: * Increment counters
! 1398: */
! 1399: pmap->pm_stats.resident_count++;
! 1400: if (wired)
! 1401: pmap->pm_stats.wired_count++;
1.1 cherry 1402:
1403: validate:
1404:
1.28.2.2! tls 1405: /*
! 1406: * Now validate mapping with desired protection/wiring. This
! 1407: * adds the pte to the VHPT if necessary.
! 1408: */
! 1409: pmap_pte_prot(pmap, pte, prot);
! 1410: pmap_set_pte(pte, va, pa, wired, managed);
! 1411:
! 1412: PMAP_MAP_TO_HEAD_UNLOCK();
! 1413: pmap_install(oldpmap);
! 1414: PMAP_UNLOCK(pmap);
1.1 cherry 1415:
1416: return error; /* XXX: Look into this. */
1417: }
1418:
1419: /*
1.28 chs 1420: * Routine: pmap_page_purge
1.1 cherry 1421: * Function:
1422: * Removes this physical page from
1423: * all physical maps in which it resides.
1424: * Reflects back modify bits to the pager.
1425: *
1426: * Notes:
1427: * Original versions of this routine were very
1428: * inefficient because they iteratively called
1429: * pmap_remove (slow...)
1430: */
1431:
1432: void
1.25 uebayasi 1433: pmap_page_purge(struct vm_page *pg)
1.1 cherry 1434: {
1.25 uebayasi 1435: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.1 cherry 1436: pmap_t oldpmap;
1437: pv_entry_t pv;
1438:
1.25 uebayasi 1439: while ((pv = TAILQ_FIRST(&md->pv_list)) != NULL) {
1.1 cherry 1440: struct ia64_lpte *pte;
1441: pmap_t pmap = pv->pv_pmap;
1442: vaddr_t va = pv->pv_va;
1443:
1444: PMAP_LOCK(pmap);
1445: oldpmap = pmap_install(pmap);
1446: pte = pmap_find_vhpt(va);
1447: KASSERT(pte != NULL);
1448: if (pmap_ppn(pte) != VM_PAGE_TO_PHYS(pg))
1.28.2.2! tls 1449: panic("pmap_remove_all:"
! 1450: "pv_table for %lx is inconsistent",
! 1451: VM_PAGE_TO_PHYS(pg));
1.1 cherry 1452: pmap_remove_pte(pmap, pte, va, pv, 1);
1453: pmap_install(oldpmap);
1454: PMAP_UNLOCK(pmap);
1455: }
1456:
1457: pg->flags |= PG_RDONLY;
1458:
1459: }
1460:
1461: pmap_t
1462: pmap_switch(pmap_t pm)
1463: {
1.28.2.2! tls 1464: pmap_t prevpm;
! 1465: int i;
1.1 cherry 1466:
1467: prevpm = curcpu()->ci_pmap;
1.28.2.2! tls 1468: if (prevpm == pm)
! 1469: return prevpm;
! 1470: if (pm == NULL)
! 1471: for (i = 0; i < 5; i++)
! 1472: ia64_set_rr(IA64_RR_BASE(i),
! 1473: (i << 8)|(PAGE_SHIFT << 2)|1);
! 1474: else
! 1475: for (i = 0; i < 5; i++)
! 1476: ia64_set_rr(IA64_RR_BASE(i),
! 1477: (pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1);
! 1478: curcpu()->ci_pmap = pm;
1.21 kiyohara 1479: ia64_srlz_d();
1.28.2.2! tls 1480: return prevpm;
1.1 cherry 1481: }
1482:
1483: static pmap_t
1484: pmap_install(pmap_t pm)
1485: {
1.28.2.2! tls 1486: pmap_t prevpm;
1.1 cherry 1487: int splsched;
1488:
1.28.2.2! tls 1489: splsched = splsched();
! 1490: prevpm = pmap_switch(pm);
1.1 cherry 1491: splx(splsched);
1.28.2.2! tls 1492: return prevpm;
1.1 cherry 1493: }
1494:
1495: static uint32_t
1496: pmap_allocate_rid(void)
1497: {
1498: uint64_t bit, bits;
1499: int rid;
1500:
1.12 kochi 1501: mutex_enter(&pmap_rid_lock);
1.1 cherry 1502: if (pmap_ridcount == pmap_ridmax)
1503: panic("pmap_allocate_rid: All Region IDs used");
1504:
1505: /* Find an index with a free bit. */
1506: while ((bits = pmap_ridmap[pmap_rididx]) == ~0UL) {
1507: pmap_rididx++;
1508: if (pmap_rididx == pmap_ridmapsz)
1509: pmap_rididx = 0;
1510: }
1511: rid = pmap_rididx * 64;
1512:
1513: /* Find a free bit. */
1514: bit = 1UL;
1515: while (bits & bit) {
1516: rid++;
1517: bit <<= 1;
1518: }
1519:
1520: pmap_ridmap[pmap_rididx] |= bit;
1521: pmap_ridcount++;
1.12 kochi 1522: mutex_exit(&pmap_rid_lock);
1.1 cherry 1523:
1524: return rid;
1525: }
1526:
1527: static void
1528: pmap_free_rid(uint32_t rid)
1529: {
1530: uint64_t bit;
1531: int idx;
1532:
1533: idx = rid / 64;
1534: bit = ~(1UL << (rid & 63));
1535:
1.12 kochi 1536: mutex_enter(&pmap_rid_lock);
1.1 cherry 1537: pmap_ridmap[idx] &= bit;
1538: pmap_ridcount--;
1.12 kochi 1539: mutex_exit(&pmap_rid_lock);
1.1 cherry 1540: }
1541:
1542: /***************************************************
1543: * Manipulate TLBs for a pmap
1544: ***************************************************/
1545:
1546: static void
1547: pmap_invalidate_page(pmap_t pmap, vaddr_t va)
1548: {
1.28.2.2! tls 1549:
1.1 cherry 1550: KASSERT((pmap == pmap_kernel() || pmap == curcpu()->ci_pmap));
1551: ia64_ptc_g(va, PAGE_SHIFT << 2);
1552: }
1553:
1554: static void
1555: pmap_invalidate_all_1(void *arg)
1556: {
1.21 kiyohara 1557: uint64_t addr;
1.1 cherry 1558: int i, j;
1559: register_t psr;
1560:
1561: psr = intr_disable();
1562: addr = pmap_ptc_e_base;
1563: for (i = 0; i < pmap_ptc_e_count1; i++) {
1564: for (j = 0; j < pmap_ptc_e_count2; j++) {
1565: ia64_ptc_e(addr);
1566: addr += pmap_ptc_e_stride2;
1567: }
1568: addr += pmap_ptc_e_stride1;
1569: }
1570: intr_restore(psr);
1571: }
1572:
1573: static void
1574: pmap_invalidate_all(pmap_t pmap)
1575: {
1576:
1.28.2.2! tls 1577: KASSERT(pmap == pmap_kernel() || pmap == curcpu()->ci_pmap);
1.1 cherry 1578:
1579: #ifdef MULTIPROCESSOR
1580: smp_rendezvous(0, pmap_invalidate_all_1, 0, 0);
1581: #else
1582: pmap_invalidate_all_1(0);
1583: #endif
1584: }
1585:
1586: /***************************************************
1587: * Low level mapping routines.....
1588: ***************************************************/
1589:
1590: /*
1591: * Find the kernel lpte for mapping the given virtual address, which
1592: * must be in the part of region 5 which we can cover with our kernel
1593: * 'page tables'.
1594: */
1595: static struct ia64_lpte *
1596: pmap_find_kpte(vaddr_t va)
1597: {
1.28.2.2! tls 1598:
1.1 cherry 1599: KASSERT((va >> 61) == 5);
1600: KASSERT(IA64_RR_MASK(va) < (nkpt * PAGE_SIZE * NKPTEPG));
1.12 kochi 1601: return &ia64_kptdir[KPTE_DIR_INDEX(va)][KPTE_PTE_INDEX(va)];
1.1 cherry 1602: }
1603:
1604:
1605: /***************************************************
1606: * Low level helper routines.....
1607: ***************************************************/
1608:
1609: /*
1.28.2.2! tls 1610: * Find a pte suitable for mapping a user-space address. If one exists
1.1 cherry 1611: * in the VHPT, that one will be returned, otherwise a new pte is
1612: * allocated.
1613: */
1614: static struct ia64_lpte *
1615: pmap_find_pte(vaddr_t va)
1616: {
1617: struct ia64_lpte *pte;
1618:
1619: if (va >= VM_MAXUSER_ADDRESS)
1620: return pmap_find_kpte(va);
1621:
1622: pte = pmap_find_vhpt(va);
1623: if (pte == NULL) {
1624: pte = pool_get(&pmap_ia64_lpte_pool, PR_NOWAIT);
1625: pte->tag = 1UL << 63;
1626: }
1.28.2.2! tls 1627:
1.1 cherry 1628: return pte;
1629: }
1630:
1631: static __inline void
1632: pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot)
1633: {
1.28.2.2! tls 1634: static int prot2ar[4] = {
! 1635: PTE_AR_R, /* VM_PROT_NONE */
! 1636: PTE_AR_RW, /* VM_PROT_WRITE */
! 1637: PTE_AR_RX, /* VM_PROT_EXECUTE */
! 1638: PTE_AR_RWX /* VM_PROT_WRITE|VM_PROT_EXECUTE */
! 1639: };
! 1640:
! 1641: pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK);
! 1642: pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56;
! 1643: pte->pte |= (prot == VM_PROT_NONE || pm == pmap_kernel())
! 1644: ? PTE_PL_KERN : PTE_PL_USER;
! 1645: pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
1.1 cherry 1646: }
1647:
1648:
1649:
1650: /*
1651: * Set a pte to contain a valid mapping and enter it in the VHPT. If
1652: * the pte was orginally valid, then its assumed to already be in the
1653: * VHPT.
1654: * This functions does not set the protection bits. It's expected
1655: * that those have been set correctly prior to calling this function.
1656: */
1657: static void
1658: pmap_set_pte(struct ia64_lpte *pte, vaddr_t va, vaddr_t pa,
1.5 thorpej 1659: bool wired, bool managed)
1.1 cherry 1660: {
1661:
1.28.2.2! tls 1662: pte->pte &= PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK;
! 1663: pte->pte |= PTE_PRESENT | PTE_MA_WB;
! 1664: pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
! 1665: pte->pte |= (wired) ? PTE_WIRED : 0;
! 1666: pte->pte |= pa & PTE_PPN_MASK;
1.1 cherry 1667:
1.28.2.2! tls 1668: pte->itir = PAGE_SHIFT << 2;
1.1 cherry 1669:
1.28.2.2! tls 1670: pte->tag = ia64_ttag(va);
1.1 cherry 1671: }
1672:
1673: /*
1674: * Remove the (possibly managed) mapping represented by pte from the
1675: * given pmap.
1676: */
1677: static int
1678: pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vaddr_t va,
1679: pv_entry_t pv, int freepte)
1680: {
1681: int error;
1682: struct vm_page *pg;
1683:
1684: KASSERT(pmap == pmap_kernel() || pmap == curcpu()->ci_pmap);
1685:
1686: /*
1687: * First remove from the VHPT.
1688: */
1689: error = pmap_remove_vhpt(va);
1690: if (error)
1691: return error;
1692:
1693: pmap_invalidate_page(pmap, va);
1694:
1695: if (pmap_wired(pte))
1696: pmap->pm_stats.wired_count -= 1;
1697:
1698: pmap->pm_stats.resident_count -= 1;
1699: if (pmap_managed(pte)) {
1700: pg = PHYS_TO_VM_PAGE(pmap_ppn(pte));
1701: if (pmap_dirty(pte))
1.28.2.1 tls 1702: pg->flags &= ~(PG_CLEAN);
1.1 cherry 1703: if (pmap_accessed(pte))
1704: pg->flags &= ~PG_CLEAN; /* XXX: Do we need this ? */
1705:
1706: if (freepte)
1707: pmap_free_pte(pte, va);
1708:
1709: error = pmap_remove_entry(pmap, pg, va, pv);
1710:
1711: }
1712: if (freepte)
1713: pmap_free_pte(pte, va);
1714: return 0;
1715: }
1716:
1717: /*
1718: * Free a pte which is now unused. This simply returns it to the zone
1719: * allocator if it is a user mapping. For kernel mappings, clear the
1720: * valid bit to make it clear that the mapping is not currently used.
1721: */
1722: static void
1723: pmap_free_pte(struct ia64_lpte *pte, vaddr_t va)
1724: {
1.28.2.2! tls 1725:
! 1726: if (va >= VM_MAXUSER_ADDRESS)
1.1 cherry 1727: pmap_clear_present(pte);
1728: }
1729:
1730:
1731: /***************************************************
1732: * page management routines.
1733: ***************************************************/
1734:
1735: /*
1736: * get a new pv_entry, allocating a block from the system
1737: * when needed.
1738: * the memory allocation is performed bypassing the malloc code
1739: * because of the possibility of allocations at interrupt time.
1740: */
1741: /*
1742: * get a new pv_entry, allocating a block from the system
1743: * when needed.
1744: */
1745: static pv_entry_t
1746: get_pv_entry(pmap_t locked_pmap)
1747: {
1748: pv_entry_t allocated_pv;
1749:
1.28.2.2! tls 1750: allocated_pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
1.12 kochi 1751: return allocated_pv;
1.1 cherry 1752:
1753: /* XXX: Nice to have all this stuff later:
1754: * Reclaim pv entries: At first, destroy mappings to inactive
1755: * pages. After that, if a pv entry is still needed, destroy
1756: * mappings to active pages.
1757: */
1758: }
1759:
1760: /*
1761: * free the pv_entry back to the free list
1762: */
1763: static __inline void
1764: free_pv_entry(pv_entry_t pv)
1765: {
1.28.2.2! tls 1766:
1.1 cherry 1767: pool_put(&pmap_pv_pool, pv);
1768: }
1769:
1770: /*
1771: * Add an ia64_lpte to the VHPT.
1772: */
1773: static void
1774: pmap_enter_vhpt(struct ia64_lpte *pte, vaddr_t va)
1775: {
1776: struct ia64_bucket *bckt;
1777: struct ia64_lpte *vhpte;
1778: uint64_t pte_pa;
1779:
1780: /* Can fault, so get it out of the way. */
1781: pte_pa = ia64_tpa((vaddr_t)pte);
1782:
1783: vhpte = (struct ia64_lpte *)ia64_thash(va);
1784: bckt = (struct ia64_bucket *)vhpte->chain;
1785: /* XXX: fixme */
1.12 kochi 1786: mutex_enter(&bckt->lock);
1.1 cherry 1787: pte->chain = bckt->chain;
1788: ia64_mf();
1789: bckt->chain = pte_pa;
1790:
1791: pmap_vhpt_inserts++;
1792: bckt->length++;
1793: /*XXX : fixme */
1.12 kochi 1794: mutex_exit(&bckt->lock);
1.1 cherry 1795: }
1796:
1797: /*
1798: * Remove the ia64_lpte matching va from the VHPT. Return zero if it
1799: * worked or an appropriate error code otherwise.
1800: */
1801: static int
1802: pmap_remove_vhpt(vaddr_t va)
1803: {
1804: struct ia64_bucket *bckt;
1805: struct ia64_lpte *pte;
1806: struct ia64_lpte *lpte;
1807: struct ia64_lpte *vhpte;
1808: uint64_t chain, tag;
1809:
1810: tag = ia64_ttag(va);
1811: vhpte = (struct ia64_lpte *)ia64_thash(va);
1812: bckt = (struct ia64_bucket *)vhpte->chain;
1813:
1814: lpte = NULL;
1.12 kochi 1815: mutex_enter(&bckt->lock);
1.1 cherry 1816:
1817: chain = bckt->chain;
1818: pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
1819: while (chain != 0 && pte->tag != tag) {
1820: lpte = pte;
1821: chain = pte->chain;
1822: pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
1823: }
1824: if (chain == 0) {
1.12 kochi 1825: mutex_exit(&bckt->lock);
1826: return ENOENT;
1.1 cherry 1827: }
1828:
1829: /* Snip this pv_entry out of the collision chain. */
1830: if (lpte == NULL)
1831: bckt->chain = pte->chain;
1832: else
1833: lpte->chain = pte->chain;
1834: ia64_mf();
1835:
1836: bckt->length--;
1.12 kochi 1837: mutex_exit(&bckt->lock);
1838: return 0;
1.1 cherry 1839: }
1840:
1841: /*
1842: * Find the ia64_lpte for the given va, if any.
1843: */
1844: static struct ia64_lpte *
1845: pmap_find_vhpt(vaddr_t va)
1846: {
1847: struct ia64_bucket *bckt;
1848: struct ia64_lpte *pte;
1849: uint64_t chain, tag;
1850:
1851: tag = ia64_ttag(va);
1852: pte = (struct ia64_lpte *)ia64_thash(va);
1853: bckt = (struct ia64_bucket *)pte->chain;
1854:
1.12 kochi 1855: mutex_enter(&bckt->lock);
1.1 cherry 1856: chain = bckt->chain;
1857: pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
1858: while (chain != 0 && pte->tag != tag) {
1859: chain = pte->chain;
1860: pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain);
1861: }
1.12 kochi 1862: mutex_exit(&bckt->lock);
1863: return (chain != 0) ? pte : NULL;
1.1 cherry 1864: }
1865:
1866: /*
1867: * Remove an entry from the list of managed mappings.
1868: */
1869: static int
1.25 uebayasi 1870: pmap_remove_entry(pmap_t pmap, struct vm_page *pg, vaddr_t va, pv_entry_t pv)
1.1 cherry 1871: {
1.25 uebayasi 1872: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1873:
1.1 cherry 1874: if (!pv) {
1.25 uebayasi 1875: if (md->pv_list_count < pmap->pm_stats.resident_count) {
1.28.2.2! tls 1876: TAILQ_FOREACH(pv, &md->pv_list, pv_list)
! 1877: if (pmap == pv->pv_pmap && va == pv->pv_va)
1.1 cherry 1878: break;
1879: } else {
1.28.2.2! tls 1880: TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist)
! 1881: if (va == pv->pv_va)
1.1 cherry 1882: break;
1883: }
1884: }
1885:
1886: if (pv) {
1.25 uebayasi 1887: TAILQ_REMOVE(&md->pv_list, pv, pv_list);
1888: md->pv_list_count--;
1.28.2.2! tls 1889: if (TAILQ_FIRST(&md->pv_list) == NULL)
1.1 cherry 1890: pg->flags |= PG_RDONLY;
1891:
1892: TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1893: free_pv_entry(pv);
1894: return 0;
1.28.2.2! tls 1895: } else
1.1 cherry 1896: return ENOENT;
1897: }
1898:
1899: /*
1900: * Create a pv entry for page at pa for
1901: * (pmap, va).
1902: */
1903: static void
1904: pmap_insert_entry(pmap_t pmap, vaddr_t va, struct vm_page *pg)
1905: {
1.25 uebayasi 1906: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.1 cherry 1907: pv_entry_t pv;
1908:
1909: pv = get_pv_entry(pmap);
1910: pv->pv_pmap = pmap;
1911: pv->pv_va = va;
1912:
1913: TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1.25 uebayasi 1914: TAILQ_INSERT_TAIL(&md->pv_list, pv, pv_list);
1915: md->pv_list_count++;
1.1 cherry 1916: }
1917:
1918: /*
1919: * Remove a single page from a process address space
1920: */
1921: static void
1922: pmap_remove_page(pmap_t pmap, vaddr_t va)
1923: {
1924: struct ia64_lpte *pte;
1925:
1926: KASSERT(pmap == pmap_kernel() || pmap == curcpu()->ci_pmap);
1927:
1928: pte = pmap_find_vhpt(va);
1929: if (pte) {
1930: pmap_remove_pte(pmap, pte, va, 0, 1);
1931: pmap_invalidate_page(pmap, va);
1932: }
1933: return;
1934: }
1935:
1936: /*
1937: * pmap_pv_page_alloc:
1938: *
1939: * Allocate a page for the pv_entry pool.
1940: */
1941: void *
1942: pmap_pv_page_alloc(struct pool *pp, int flags)
1943: {
1944: paddr_t pg;
1945:
1946: if (pmap_poolpage_alloc(&pg))
1.12 kochi 1947: return (void *)IA64_PHYS_TO_RR7(pg);
1948: return NULL;
1.1 cherry 1949: }
1950:
1951: /*
1952: * pmap_pv_page_free:
1953: *
1954: * Free a pv_entry pool page.
1955: */
1956: void
1957: pmap_pv_page_free(struct pool *pp, void *v)
1958: {
1959:
1960: pmap_poolpage_free(IA64_RR_MASK((vaddr_t)v));
1961: }
1962:
1963: /******************** misc. functions ********************/
1964:
1965: /*
1966: * pmap_poolpage_alloc: based on alpha/pmap_physpage_alloc
1967: *
1968: * Allocate a single page from the VM system and return the
1969: * physical address for that page.
1970: */
1.5 thorpej 1971: bool
1.1 cherry 1972: pmap_poolpage_alloc(paddr_t *pap)
1973: {
1974: struct vm_page *pg;
1975: paddr_t pa;
1976:
1977: pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO);
1978: if (pg != NULL) {
1979: pa = VM_PAGE_TO_PHYS(pg);
1980:
1981: #ifdef DEBUG
1.25 uebayasi 1982: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1983: mutex_enter(&md->pv_mutex);
1.1 cherry 1984: if (pg->wire_count != 0) {
1985: printf("pmap_physpage_alloc: page 0x%lx has "
1986: "%d references\n", pa, pg->wire_count);
1987: panic("pmap_physpage_alloc");
1988: }
1.25 uebayasi 1989: mutex_exit(&md->pv_mutex);
1.1 cherry 1990: #endif
1991: *pap = pa;
1.12 kochi 1992: return true;
1.1 cherry 1993: }
1.12 kochi 1994: return false;
1.1 cherry 1995: }
1996:
1997: /*
1998: * pmap_poolpage_free: based on alpha/pmap_physpage_free:
1999: *
2000: * Free the single page table page at the specified physical address.
2001: */
2002: void
2003: pmap_poolpage_free(paddr_t pa)
2004: {
2005: struct vm_page *pg;
2006:
2007: if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
2008: panic("pmap_physpage_free: bogus physical page address");
2009:
2010: #ifdef DEBUG
1.25 uebayasi 2011: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2012: mutex_enter(&md->pv_mutex);
1.1 cherry 2013: if (pg->wire_count != 0)
2014: panic("pmap_physpage_free: page still has references");
1.25 uebayasi 2015: mutex_exit(&md->pv_mutex);
1.1 cherry 2016: #endif
2017:
2018: uvm_pagefree(pg);
2019: }
2020:
2021: #ifdef DEBUG
2022:
2023: static void dump_vhpt(void)
2024: {
2025:
2026: vaddr_t base;
2027: vsize_t size, i;
2028: struct ia64_lpte *pte;
2029:
1.28.2.2! tls 2030: __asm __volatile("mov %0=cr.pta;; srlz.i;;" : "=r" (base));
1.1 cherry 2031:
2032: #define VHPTBASE(x) ( (x) & (~0x7fffUL) )
2033: #define VHPTSIZE(x) ( (vsize_t) (1 << (((x) & 0x7cUL) >> 2)))
2034:
2035: size = VHPTSIZE(base);
2036: base = VHPTBASE(base);
2037:
2038: pte = (void *) base;
2039:
2040: printf("vhpt base = %lx \n", base);
2041: printf("vhpt size = %lx \n", size);
2042:
1.28.2.2! tls 2043: for(i = 0; i < size/sizeof(struct ia64_lpte);i++ )
1.1 cherry 2044: if(pte[i].pte & PTE_PRESENT) {
2045: printf("PTE_PRESENT ");
2046:
1.28.2.2! tls 2047: if (pte[i].pte & PTE_MA_MASK) printf("MA: ");
! 2048: if (pte[i].pte & PTE_MA_WB) printf("WB ");
! 2049: if (pte[i].pte & PTE_MA_UC) printf("UC ");
! 2050: if (pte[i].pte & PTE_MA_UCE) printf("UCE ");
! 2051: if (pte[i].pte & PTE_MA_WC) printf("WC ");
! 2052: if (pte[i].pte & PTE_MA_NATPAGE)printf("NATPAGE ");
! 2053:
! 2054: if (pte[i].pte & PTE_ACCESSED) printf("PTE_ACCESSED ");
! 2055: if (pte[i].pte & PTE_DIRTY) printf("PTE_DIRTY ");
! 2056:
! 2057: if (pte[i].pte & PTE_PL_MASK) printf("PL: ");
! 2058: if (pte[i].pte & PTE_PL_KERN) printf("KERN");
! 2059: if (pte[i].pte & PTE_PL_USER) printf("USER");
! 2060:
! 2061: if (pte[i].pte & PTE_AR_MASK) printf("AR: ");
! 2062: if (pte[i].pte & PTE_AR_R) printf("R ");
! 2063: if (pte[i].pte & PTE_AR_RX) printf("RX ");
! 2064: if (pte[i].pte & PTE_AR_RWX) printf("RWX ");
! 2065: if (pte[i].pte & PTE_AR_R_RW) printf("R RW ");
! 2066: if (pte[i].pte & PTE_AR_RX_RWX) printf("RX RWX ");
1.1 cherry 2067:
2068: printf("ppn = %lx", (pte[i].pte & PTE_PPN_MASK) >> 12);
2069:
1.28.2.2! tls 2070: if (pte[i].pte & PTE_ED) printf("ED ");
1.1 cherry 2071:
1.28.2.2! tls 2072: if (pte[i].pte & PTE_IG_MASK) printf("OS: ");
! 2073: if (pte[i].pte & PTE_WIRED) printf("WIRED ");
! 2074: if (pte[i].pte & PTE_MANAGED) printf("MANAGED ");
1.1 cherry 2075: printf("\n");
2076: }
2077: }
2078: #endif
CVSweb <webmaster@jp.NetBSD.org>