[BACK]Return to pmap.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / x86 / include

Annotation of src/sys/arch/x86/include/pmap.h, Revision 1.52

1.52    ! rmind       1: /*     $NetBSD: pmap.h,v 1.51 2012/03/11 16:28:02 jym Exp $    */
1.2       yamt        2:
                      3: /*
                      4:  * Copyright (c) 1997 Charles D. Cranor and Washington University.
                      5:  * All rights reserved.
                      6:  *
                      7:  * Redistribution and use in source and binary forms, with or without
                      8:  * modification, are permitted provided that the following conditions
                      9:  * are met:
                     10:  * 1. Redistributions of source code must retain the above copyright
                     11:  *    notice, this list of conditions and the following disclaimer.
                     12:  * 2. Redistributions in binary form must reproduce the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer in the
                     14:  *    documentation and/or other materials provided with the distribution.
                     15:  *
                     16:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     17:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     18:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     19:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     20:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     21:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     22:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     23:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     24:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     25:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     26:  */
                     27:
                     28: /*
                     29:  * Copyright (c) 2001 Wasabi Systems, Inc.
                     30:  * All rights reserved.
                     31:  *
                     32:  * Written by Frank van der Linden for Wasabi Systems, Inc.
                     33:  *
                     34:  * Redistribution and use in source and binary forms, with or without
                     35:  * modification, are permitted provided that the following conditions
                     36:  * are met:
                     37:  * 1. Redistributions of source code must retain the above copyright
                     38:  *    notice, this list of conditions and the following disclaimer.
                     39:  * 2. Redistributions in binary form must reproduce the above copyright
                     40:  *    notice, this list of conditions and the following disclaimer in the
                     41:  *    documentation and/or other materials provided with the distribution.
                     42:  * 3. All advertising materials mentioning features or use of this software
                     43:  *    must display the following acknowledgement:
                     44:  *      This product includes software developed for the NetBSD Project by
                     45:  *      Wasabi Systems, Inc.
                     46:  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
                     47:  *    or promote products derived from this software without specific prior
                     48:  *    written permission.
                     49:  *
                     50:  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
                     51:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     52:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     53:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
                     54:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     55:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     56:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     57:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     58:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     59:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     60:  * POSSIBILITY OF SUCH DAMAGE.
                     61:  */
                     62:
                     63: /*
                     64:  * pmap.h: see pmap.c for the history of this pmap module.
                     65:  */
                     66:
                     67: #ifndef _X86_PMAP_H_
                     68: #define        _X86_PMAP_H_
                     69:
                     70: /*
                     71:  * pl*_pi: index in the ptp page for a pde mapping a VA.
                     72:  * (pl*_i below is the index in the virtual array of all pdes per level)
                     73:  */
                     74: #define pl1_pi(VA)     (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
                     75: #define pl2_pi(VA)     (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
                     76: #define pl3_pi(VA)     (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
                     77: #define pl4_pi(VA)     (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
                     78:
                     79: /*
                     80:  * pl*_i: generate index into pde/pte arrays in virtual space
1.37      yamt       81:  *
                     82:  * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
1.2       yamt       83:  */
                     84: #define pl1_i(VA)      (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
                     85: #define pl2_i(VA)      (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
                     86: #define pl3_i(VA)      (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
                     87: #define pl4_i(VA)      (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
                     88: #define pl_i(va, lvl) \
                     89:         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
                     90:
                     91: #define        pl_i_roundup(va, lvl)   pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
                     92:
                     93: /*
                     94:  * PTP macros:
                     95:  *   a PTP's index is the PD index of the PDE that points to it
                     96:  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
                     97:  *   a PTP's VA is the first VA mapped by that PTP
                     98:  */
                     99:
                    100: #define ptp_va2o(va, lvl)      (pl_i(va, (lvl)+1) * PAGE_SIZE)
                    101:
1.29      jym       102: /* size of a PDP: usually one page, except for PAE */
1.12      bouyer    103: #ifdef PAE
                    104: #define PDP_SIZE 4
                    105: #else
                    106: #define PDP_SIZE 1
                    107: #endif
                    108:
                    109:
1.2       yamt      110: #if defined(_KERNEL)
1.52    ! rmind     111: #include <sys/kcpuset.h>
        !           112:
1.2       yamt      113: /*
                    114:  * pmap data structures: see pmap.c for details of locking.
                    115:  */
                    116:
                    117: /*
                    118:  * we maintain a list of all non-kernel pmaps
                    119:  */
                    120:
                    121: LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
                    122:
                    123: /*
1.43      jym       124:  * linked list of all non-kernel pmaps
                    125:  */
                    126: extern struct pmap_head pmaps;
                    127: extern kmutex_t pmaps_lock;    /* protects pmaps */
                    128:
                    129: /*
1.46      jym       130:  * pool_cache(9) that PDPs are allocated from
                    131:  */
                    132: extern struct pool_cache pmap_pdp_cache;
                    133:
                    134: /*
1.2       yamt      135:  * the pmap structure
                    136:  *
1.39      rmind     137:  * note that the pm_obj contains the lock pointer, the reference count,
1.2       yamt      138:  * page list, and number of PTPs within the pmap.
                    139:  *
1.39      rmind     140:  * pm_lock is the same as the lock for vm object 0.  Changes to
1.2       yamt      141:  * the other objects may only be made if that lock has been taken
                    142:  * (the other object locks are only used when uvm_pagealloc is called)
                    143:  *
                    144:  * XXX If we ever support processor numbers higher than 31, we'll have
                    145:  * XXX to rethink the CPU mask.
                    146:  */
                    147:
                    148: struct pmap {
                    149:        struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
                    150: #define        pm_lock pm_obj[0].vmobjlock
1.39      rmind     151:        kmutex_t pm_obj_lock[PTP_LEVELS-1];     /* locks for pm_objs */
1.2       yamt      152:        LIST_ENTRY(pmap) pm_list;       /* list (lck by pm_list lock) */
                    153:        pd_entry_t *pm_pdir;            /* VA of PD (lck by object lock) */
1.33      jym       154:        paddr_t pm_pdirpa[PDP_SIZE];    /* PA of PDs (read-only after create) */
1.2       yamt      155:        struct vm_page *pm_ptphint[PTP_LEVELS-1];
                    156:                                        /* pointer to a PTP in our pmap */
                    157:        struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
                    158:
                    159: #if !defined(__x86_64__)
                    160:        vaddr_t pm_hiexec;              /* highest executable mapping */
                    161: #endif /* !defined(__x86_64__) */
                    162:        int pm_flags;                   /* see below */
                    163:
                    164:        union descriptor *pm_ldt;       /* user-set LDT */
1.22      ad        165:        size_t pm_ldt_len;              /* size of LDT in bytes */
1.2       yamt      166:        int pm_ldt_sel;                 /* LDT selector */
1.52    ! rmind     167:        kcpuset_t *pm_cpus;             /* mask of CPUs using pmap */
        !           168:        kcpuset_t *pm_kernel_cpus;      /* mask of CPUs using kernel part
1.2       yamt      169:                                         of pmap */
1.52    ! rmind     170:        kcpuset_t *pm_xen_ptp_cpus;     /* mask of CPUs which have this pmap's
1.50      bouyer    171:                                         ptp mapped */
1.39      rmind     172:        uint64_t pm_ncsw;               /* for assertions */
                    173:        struct vm_page *pm_gc_ptp;      /* pages from pmap g/c */
1.2       yamt      174: };
                    175:
1.33      jym       176: /* macro to access pm_pdirpa slots */
1.12      bouyer    177: #ifdef PAE
                    178: #define pmap_pdirpa(pmap, index) \
                    179:        ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
                    180: #else
                    181: #define pmap_pdirpa(pmap, index) \
1.33      jym       182:        ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
1.12      bouyer    183: #endif
                    184:
1.45      cherry    185: /*
                    186:  * flag to be used for kernel mappings: PG_u on Xen/amd64,
                    187:  * 0 otherwise.
                    188:  */
                    189: #if defined(XEN) && defined(__x86_64__)
                    190: #define PG_k PG_u
                    191: #else
                    192: #define PG_k 0
                    193: #endif
                    194:
1.2       yamt      195: /*
1.28      cegger    196:  * MD flags that we use for pmap_enter and pmap_kenter_pa:
1.23      cegger    197:  */
                    198:
                    199: /*
1.2       yamt      200:  * global kernel variables
                    201:  */
                    202:
1.32      jym       203: /*
                    204:  * PDPpaddr is the physical address of the kernel's PDP.
                    205:  * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
                    206:  * value associated to the kernel process, proc0.
1.33      jym       207:  * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
                    208:  * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
1.32      jym       209:  * - Xen: it corresponds to the PFN of the kernel's PDP.
                    210:  */
1.2       yamt      211: extern u_long PDPpaddr;
                    212:
                    213: extern int pmap_pg_g;                  /* do we support PG_G? */
                    214: extern long nkptp[PTP_LEVELS];
                    215:
                    216: /*
                    217:  * macros
                    218:  */
                    219:
                    220: #define        pmap_resident_count(pmap)       ((pmap)->pm_stats.resident_count)
                    221: #define        pmap_wired_count(pmap)          ((pmap)->pm_stats.wired_count)
                    222:
                    223: #define pmap_clear_modify(pg)          pmap_clear_attrs(pg, PG_M)
                    224: #define pmap_clear_reference(pg)       pmap_clear_attrs(pg, PG_U)
                    225: #define pmap_copy(DP,SP,D,L,S)
                    226: #define pmap_is_modified(pg)           pmap_test_attrs(pg, PG_M)
                    227: #define pmap_is_referenced(pg)         pmap_test_attrs(pg, PG_U)
                    228: #define pmap_move(DP,SP,D,L,S)
1.35      jmcneill  229: #define pmap_phys_address(ppn)         (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
                    230: #define pmap_mmap_flags(ppn)           x86_mmap_flags(ppn)
1.2       yamt      231: #define pmap_valid_entry(E)            ((E) & PG_V) /* is PDE or PTE valid? */
                    232:
1.35      jmcneill  233: #if defined(__x86_64__) || defined(PAE)
                    234: #define X86_MMAP_FLAG_SHIFT    (64 - PGSHIFT)
                    235: #else
                    236: #define X86_MMAP_FLAG_SHIFT    (32 - PGSHIFT)
                    237: #endif
                    238:
                    239: #define X86_MMAP_FLAG_MASK     0xf
                    240: #define X86_MMAP_FLAG_PREFETCH 0x1
1.2       yamt      241:
                    242: /*
                    243:  * prototypes
                    244:  */
                    245:
                    246: void           pmap_activate(struct lwp *);
                    247: void           pmap_bootstrap(vaddr_t);
                    248: bool           pmap_clear_attrs(struct vm_page *, unsigned);
                    249: void           pmap_deactivate(struct lwp *);
                    250: void           pmap_page_remove (struct vm_page *);
                    251: void           pmap_remove(struct pmap *, vaddr_t, vaddr_t);
                    252: bool           pmap_test_attrs(struct vm_page *, unsigned);
                    253: void           pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
                    254: void           pmap_load(void);
1.6       jmcneill  255: paddr_t                pmap_init_tmp_pgtbl(paddr_t);
1.18      ad        256: void           pmap_remove_all(struct pmap *);
1.22      ad        257: void           pmap_ldt_sync(struct pmap *);
1.2       yamt      258:
1.25      rmind     259: void           pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t);
                    260: void           pmap_emap_remove(vaddr_t, vsize_t);
1.26      rmind     261: void           pmap_emap_sync(bool);
1.25      rmind     262:
1.30      dyoung    263: void           pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
                    264:                    pd_entry_t * const **);
                    265: void           pmap_unmap_ptes(struct pmap *, struct pmap *);
                    266:
                    267: int            pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
                    268:
1.35      jmcneill  269: u_int          x86_mmap_flags(paddr_t);
                    270:
1.40      tls       271: bool           pmap_is_curpmap(struct pmap *);
                    272:
1.2       yamt      273: vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
                    274:
1.39      rmind     275: typedef enum tlbwhy {
                    276:        TLBSHOOT_APTE,
                    277:        TLBSHOOT_KENTER,
                    278:        TLBSHOOT_KREMOVE,
                    279:        TLBSHOOT_FREE_PTP1,
                    280:        TLBSHOOT_FREE_PTP2,
                    281:        TLBSHOOT_REMOVE_PTE,
                    282:        TLBSHOOT_REMOVE_PTES,
                    283:        TLBSHOOT_SYNC_PV1,
                    284:        TLBSHOOT_SYNC_PV2,
                    285:        TLBSHOOT_WRITE_PROTECT,
                    286:        TLBSHOOT_ENTER,
                    287:        TLBSHOOT_UPDATE,
                    288:        TLBSHOOT_BUS_DMA,
                    289:        TLBSHOOT_BUS_SPACE,
                    290:        TLBSHOOT__MAX,
                    291: } tlbwhy_t;
                    292:
                    293: void           pmap_tlb_init(void);
1.52    ! rmind     294: void           pmap_tlb_cpu_init(struct cpu_info *);
1.39      rmind     295: void           pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
                    296: void           pmap_tlb_shootnow(void);
                    297: void           pmap_tlb_intr(void);
1.2       yamt      298:
1.25      rmind     299: #define        __HAVE_PMAP_EMAP
                    300:
1.2       yamt      301: #define PMAP_GROWKERNEL                /* turn on pmap_growkernel interface */
1.19      jmcneill  302: #define PMAP_FORK              /* turn on pmap_fork interface */
1.2       yamt      303:
                    304: /*
                    305:  * Do idle page zero'ing uncached to avoid polluting the cache.
                    306:  */
                    307: bool   pmap_pageidlezero(paddr_t);
                    308: #define        PMAP_PAGEIDLEZERO(pa)   pmap_pageidlezero((pa))
                    309:
                    310: /*
                    311:  * inline functions
                    312:  */
                    313:
1.30      dyoung    314: __inline static bool __unused
                    315: pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
                    316: {
                    317:        return pmap_pdes_invalid(va, pdes, lastpde) == 0;
                    318: }
                    319:
1.2       yamt      320: /*
                    321:  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
                    322:  *     if hardware doesn't support one-page flushing)
                    323:  */
                    324:
1.7       perry     325: __inline static void __unused
1.2       yamt      326: pmap_update_pg(vaddr_t va)
                    327: {
1.4       ad        328:        invlpg(va);
1.2       yamt      329: }
                    330:
                    331: /*
                    332:  * pmap_update_2pg: flush two pages from the TLB
                    333:  */
                    334:
1.7       perry     335: __inline static void __unused
1.2       yamt      336: pmap_update_2pg(vaddr_t va, vaddr_t vb)
                    337: {
1.4       ad        338:        invlpg(va);
                    339:        invlpg(vb);
1.2       yamt      340: }
                    341:
                    342: /*
                    343:  * pmap_page_protect: change the protection of all recorded mappings
                    344:  *     of a managed page
                    345:  *
                    346:  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
                    347:  * => we only have to worry about making the page more protected.
                    348:  *     unprotecting a page is done on-demand at fault time.
                    349:  */
                    350:
1.7       perry     351: __inline static void __unused
1.2       yamt      352: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                    353: {
                    354:        if ((prot & VM_PROT_WRITE) == 0) {
                    355:                if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
                    356:                        (void) pmap_clear_attrs(pg, PG_RW);
                    357:                } else {
                    358:                        pmap_page_remove(pg);
                    359:                }
                    360:        }
                    361: }
                    362:
                    363: /*
                    364:  * pmap_protect: change the protection of pages in a pmap
                    365:  *
                    366:  * => this function is a frontend for pmap_remove/pmap_write_protect
                    367:  * => we only have to worry about making the page more protected.
                    368:  *     unprotecting a page is done on-demand at fault time.
                    369:  */
                    370:
1.7       perry     371: __inline static void __unused
1.2       yamt      372: pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
                    373: {
                    374:        if ((prot & VM_PROT_WRITE) == 0) {
                    375:                if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
                    376:                        pmap_write_protect(pmap, sva, eva, prot);
                    377:                } else {
                    378:                        pmap_remove(pmap, sva, eva);
                    379:                }
                    380:        }
                    381: }
                    382:
                    383: /*
                    384:  * various address inlines
                    385:  *
                    386:  *  vtopte: return a pointer to the PTE mapping a VA, works only for
                    387:  *  user and PT addresses
                    388:  *
                    389:  *  kvtopte: return a pointer to the PTE mapping a kernel VA
                    390:  */
                    391:
                    392: #include <lib/libkern/libkern.h>
                    393:
1.7       perry     394: static __inline pt_entry_t * __unused
1.2       yamt      395: vtopte(vaddr_t va)
                    396: {
                    397:
                    398:        KASSERT(va < VM_MIN_KERNEL_ADDRESS);
                    399:
                    400:        return (PTE_BASE + pl1_i(va));
                    401: }
                    402:
1.7       perry     403: static __inline pt_entry_t * __unused
1.2       yamt      404: kvtopte(vaddr_t va)
                    405: {
                    406:        pd_entry_t *pde;
                    407:
                    408:        KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
                    409:
                    410:        pde = L2_BASE + pl2_i(va);
                    411:        if (*pde & PG_PS)
                    412:                return ((pt_entry_t *)pde);
                    413:
                    414:        return (PTE_BASE + pl1_i(va));
                    415: }
                    416:
                    417: paddr_t vtophys(vaddr_t);
                    418: vaddr_t        pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
                    419: void   pmap_cpu_init_late(struct cpu_info *);
1.15      ad        420: bool   sse2_idlezero_page(void *);
1.2       yamt      421:
1.5       bouyer    422: #ifdef XEN
1.38      jym       423: #include <sys/bitops.h>
                    424:
1.5       bouyer    425: #define XPTE_MASK      L1_FRAME
1.38      jym       426: /* Selects the index of a PTE in (A)PTE_BASE */
                    427: #define XPTE_SHIFT     (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
1.5       bouyer    428:
                    429: /* PTE access inline fuctions */
                    430:
                    431: /*
                    432:  * Get the machine address of the pointed pte
                    433:  * We use hardware MMU to get value so works only for levels 1-3
                    434:  */
                    435:
                    436: static __inline paddr_t
                    437: xpmap_ptetomach(pt_entry_t *pte)
                    438: {
                    439:        pt_entry_t *up_pte;
                    440:        vaddr_t va = (vaddr_t) pte;
                    441:
                    442:        va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
                    443:        up_pte = (pt_entry_t *) va;
                    444:
                    445:        return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
                    446: }
                    447:
                    448: /* Xen helpers to change bits of a pte */
                    449: #define XPMAP_UPDATE_DIRECT    1       /* Update direct map entry flags too */
                    450:
1.30      dyoung    451: paddr_t        vtomach(vaddr_t);
                    452: #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
                    453: #endif /* XEN */
                    454:
1.5       bouyer    455: /* pmap functions with machine addresses */
1.27      cegger    456: void   pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
1.5       bouyer    457: int    pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
1.24      cegger    458:            vm_prot_t, u_int, int);
1.5       bouyer    459: bool   pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
1.20      bouyer    460:
1.2       yamt      461: /*
                    462:  * Hooks for the pool allocator.
                    463:  */
                    464: #define        POOL_VTOPHYS(va)        vtophys((vaddr_t) (va))
                    465:
1.49      chs       466: #ifdef __HAVE_DIRECT_MAP
                    467:
                    468: #define L4_SLOT_DIRECT         509
                    469: #define PDIR_SLOT_DIRECT       L4_SLOT_DIRECT
                    470:
                    471: #define PMAP_DIRECT_BASE       (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
                    472: #define PMAP_DIRECT_END                (VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4)))
                    473:
                    474: #define PMAP_DIRECT_MAP(pa)    ((vaddr_t)PMAP_DIRECT_BASE + (pa))
                    475: #define PMAP_DIRECT_UNMAP(va)  ((paddr_t)(va) - PMAP_DIRECT_BASE)
                    476:
                    477: /*
                    478:  * Alternate mapping hooks for pool pages.
                    479:  */
                    480: #define PMAP_MAP_POOLPAGE(pa)  PMAP_DIRECT_MAP((pa))
                    481: #define PMAP_UNMAP_POOLPAGE(va)        PMAP_DIRECT_UNMAP((va))
                    482:
                    483: void   pagezero(vaddr_t);
                    484:
                    485: #endif /* __HAVE_DIRECT_MAP */
                    486:
1.2       yamt      487: #endif /* _KERNEL */
                    488:
                    489: #endif /* _X86_PMAP_H_ */

CVSweb <webmaster@jp.NetBSD.org>