[BACK]Return to pmap.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / x86 / include

Annotation of src/sys/arch/x86/include/pmap.h, Revision 1.71

1.71    ! maxv        1: /*     $NetBSD: pmap.h,v 1.70 2017/10/29 10:01:22 maxv Exp $   */
1.2       yamt        2:
                      3: /*
                      4:  * Copyright (c) 1997 Charles D. Cranor and Washington University.
                      5:  * All rights reserved.
                      6:  *
                      7:  * Redistribution and use in source and binary forms, with or without
                      8:  * modification, are permitted provided that the following conditions
                      9:  * are met:
                     10:  * 1. Redistributions of source code must retain the above copyright
                     11:  *    notice, this list of conditions and the following disclaimer.
                     12:  * 2. Redistributions in binary form must reproduce the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer in the
                     14:  *    documentation and/or other materials provided with the distribution.
                     15:  *
                     16:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     17:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     18:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     19:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     20:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     21:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     22:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     23:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     24:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     25:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     26:  */
                     27:
                     28: /*
                     29:  * Copyright (c) 2001 Wasabi Systems, Inc.
                     30:  * All rights reserved.
                     31:  *
                     32:  * Written by Frank van der Linden for Wasabi Systems, Inc.
                     33:  *
                     34:  * Redistribution and use in source and binary forms, with or without
                     35:  * modification, are permitted provided that the following conditions
                     36:  * are met:
                     37:  * 1. Redistributions of source code must retain the above copyright
                     38:  *    notice, this list of conditions and the following disclaimer.
                     39:  * 2. Redistributions in binary form must reproduce the above copyright
                     40:  *    notice, this list of conditions and the following disclaimer in the
                     41:  *    documentation and/or other materials provided with the distribution.
                     42:  * 3. All advertising materials mentioning features or use of this software
                     43:  *    must display the following acknowledgement:
                     44:  *      This product includes software developed for the NetBSD Project by
                     45:  *      Wasabi Systems, Inc.
                     46:  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
                     47:  *    or promote products derived from this software without specific prior
                     48:  *    written permission.
                     49:  *
                     50:  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
                     51:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     52:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     53:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
                     54:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     55:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     56:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     57:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     58:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     59:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     60:  * POSSIBILITY OF SUCH DAMAGE.
                     61:  */
                     62:
                     63: /*
                     64:  * pmap.h: see pmap.c for the history of this pmap module.
                     65:  */
                     66:
                     67: #ifndef _X86_PMAP_H_
                     68: #define        _X86_PMAP_H_
                     69:
                     70: /*
                     71:  * pl*_pi: index in the ptp page for a pde mapping a VA.
                     72:  * (pl*_i below is the index in the virtual array of all pdes per level)
                     73:  */
                     74: #define pl1_pi(VA)     (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
                     75: #define pl2_pi(VA)     (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
                     76: #define pl3_pi(VA)     (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
                     77: #define pl4_pi(VA)     (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
                     78:
                     79: /*
                     80:  * pl*_i: generate index into pde/pte arrays in virtual space
1.37      yamt       81:  *
                     82:  * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
1.2       yamt       83:  */
                     84: #define pl1_i(VA)      (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
                     85: #define pl2_i(VA)      (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
                     86: #define pl3_i(VA)      (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
                     87: #define pl4_i(VA)      (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
                     88: #define pl_i(va, lvl) \
                     89:         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
                     90:
                     91: #define        pl_i_roundup(va, lvl)   pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
                     92:
                     93: /*
                     94:  * PTP macros:
                     95:  *   a PTP's index is the PD index of the PDE that points to it
                     96:  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
                     97:  *   a PTP's VA is the first VA mapped by that PTP
                     98:  */
                     99:
                    100: #define ptp_va2o(va, lvl)      (pl_i(va, (lvl)+1) * PAGE_SIZE)
                    101:
1.29      jym       102: /* size of a PDP: usually one page, except for PAE */
1.12      bouyer    103: #ifdef PAE
                    104: #define PDP_SIZE 4
                    105: #else
                    106: #define PDP_SIZE 1
                    107: #endif
                    108:
                    109:
1.2       yamt      110: #if defined(_KERNEL)
1.52      rmind     111: #include <sys/kcpuset.h>
1.57      skrll     112: #include <uvm/pmap/pmap_pvt.h>
1.52      rmind     113:
1.71    ! maxv      114: #define BTSEG_NONE     0
        !           115: #define BTSEG_TEXT     1
        !           116: #define BTSEG_RODATA   2
        !           117: #define BTSEG_DATA     3
        !           118: #define BTSPACE_NSEGS  64
        !           119:
1.69      maxv      120: struct bootspace {
1.70      maxv      121:        struct {
                    122:                vaddr_t va;
                    123:                paddr_t pa;
                    124:                size_t sz;
                    125:        } head;
                    126:
1.69      maxv      127:        /* Kernel segments. */
                    128:        struct {
1.71    ! maxv      129:                int type;
1.69      maxv      130:                vaddr_t va;
                    131:                paddr_t pa;
                    132:                size_t sz;
1.71    ! maxv      133:        } segs[BTSPACE_NSEGS];
1.69      maxv      134:
                    135:        /*
                    136:         * The area used by the early kernel bootstrap. It contains the kernel
                    137:         * symbols, the preloaded modules, the bootstrap tables, and the ISA I/O
                    138:         * mem.
                    139:         */
                    140:        struct {
                    141:                vaddr_t va;
                    142:                paddr_t pa;
                    143:                size_t sz;
                    144:        } boot;
                    145:
                    146:        /* A magic VA usable by the bootstrap code. */
                    147:        vaddr_t spareva;
                    148:
                    149:        /* Virtual address of the page directory. */
                    150:        vaddr_t pdir;
                    151:
                    152:        /* End of the area dedicated to kernel modules (amd64 only). */
                    153:        vaddr_t emodule;
                    154: };
                    155:
1.2       yamt      156: /*
                    157:  * pmap data structures: see pmap.c for details of locking.
                    158:  */
                    159:
                    160: /*
                    161:  * we maintain a list of all non-kernel pmaps
                    162:  */
                    163:
                    164: LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
                    165:
                    166: /*
1.43      jym       167:  * linked list of all non-kernel pmaps
                    168:  */
                    169: extern struct pmap_head pmaps;
                    170: extern kmutex_t pmaps_lock;    /* protects pmaps */
                    171:
                    172: /*
1.46      jym       173:  * pool_cache(9) that PDPs are allocated from
                    174:  */
                    175: extern struct pool_cache pmap_pdp_cache;
                    176:
                    177: /*
1.2       yamt      178:  * the pmap structure
                    179:  *
1.39      rmind     180:  * note that the pm_obj contains the lock pointer, the reference count,
1.2       yamt      181:  * page list, and number of PTPs within the pmap.
                    182:  *
1.39      rmind     183:  * pm_lock is the same as the lock for vm object 0.  Changes to
1.2       yamt      184:  * the other objects may only be made if that lock has been taken
                    185:  * (the other object locks are only used when uvm_pagealloc is called)
                    186:  */
                    187:
                    188: struct pmap {
                    189:        struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
                    190: #define        pm_lock pm_obj[0].vmobjlock
1.39      rmind     191:        kmutex_t pm_obj_lock[PTP_LEVELS-1];     /* locks for pm_objs */
1.2       yamt      192:        LIST_ENTRY(pmap) pm_list;       /* list (lck by pm_list lock) */
                    193:        pd_entry_t *pm_pdir;            /* VA of PD (lck by object lock) */
1.33      jym       194:        paddr_t pm_pdirpa[PDP_SIZE];    /* PA of PDs (read-only after create) */
1.2       yamt      195:        struct vm_page *pm_ptphint[PTP_LEVELS-1];
                    196:                                        /* pointer to a PTP in our pmap */
                    197:        struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
                    198:
                    199: #if !defined(__x86_64__)
                    200:        vaddr_t pm_hiexec;              /* highest executable mapping */
                    201: #endif /* !defined(__x86_64__) */
                    202:        int pm_flags;                   /* see below */
                    203:
                    204:        union descriptor *pm_ldt;       /* user-set LDT */
1.22      ad        205:        size_t pm_ldt_len;              /* size of LDT in bytes */
1.2       yamt      206:        int pm_ldt_sel;                 /* LDT selector */
1.52      rmind     207:        kcpuset_t *pm_cpus;             /* mask of CPUs using pmap */
                    208:        kcpuset_t *pm_kernel_cpus;      /* mask of CPUs using kernel part
1.2       yamt      209:                                         of pmap */
1.52      rmind     210:        kcpuset_t *pm_xen_ptp_cpus;     /* mask of CPUs which have this pmap's
1.50      bouyer    211:                                         ptp mapped */
1.39      rmind     212:        uint64_t pm_ncsw;               /* for assertions */
                    213:        struct vm_page *pm_gc_ptp;      /* pages from pmap g/c */
1.2       yamt      214: };
                    215:
1.33      jym       216: /* macro to access pm_pdirpa slots */
1.12      bouyer    217: #ifdef PAE
                    218: #define pmap_pdirpa(pmap, index) \
                    219:        ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
                    220: #else
                    221: #define pmap_pdirpa(pmap, index) \
1.33      jym       222:        ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
1.12      bouyer    223: #endif
                    224:
1.2       yamt      225: /*
1.28      cegger    226:  * MD flags that we use for pmap_enter and pmap_kenter_pa:
1.23      cegger    227:  */
                    228:
                    229: /*
1.2       yamt      230:  * global kernel variables
                    231:  */
                    232:
1.32      jym       233: /*
                    234:  * PDPpaddr is the physical address of the kernel's PDP.
                    235:  * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
                    236:  * value associated to the kernel process, proc0.
1.33      jym       237:  * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
                    238:  * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
1.32      jym       239:  * - Xen: it corresponds to the PFN of the kernel's PDP.
                    240:  */
1.2       yamt      241: extern u_long PDPpaddr;
                    242:
1.58      maxv      243: extern pd_entry_t pmap_pg_g;                   /* do we support PG_G? */
1.59      maxv      244: extern pd_entry_t pmap_pg_nx;                  /* do we support PG_NX? */
1.68      ozaki-r   245: extern int pmap_largepages;
1.2       yamt      246: extern long nkptp[PTP_LEVELS];
                    247:
                    248: /*
                    249:  * macros
                    250:  */
                    251:
                    252: #define        pmap_resident_count(pmap)       ((pmap)->pm_stats.resident_count)
                    253: #define        pmap_wired_count(pmap)          ((pmap)->pm_stats.wired_count)
                    254:
                    255: #define pmap_clear_modify(pg)          pmap_clear_attrs(pg, PG_M)
                    256: #define pmap_clear_reference(pg)       pmap_clear_attrs(pg, PG_U)
1.55      christos  257: #define pmap_copy(DP,SP,D,L,S)         __USE(L)
1.2       yamt      258: #define pmap_is_modified(pg)           pmap_test_attrs(pg, PG_M)
                    259: #define pmap_is_referenced(pg)         pmap_test_attrs(pg, PG_U)
                    260: #define pmap_move(DP,SP,D,L,S)
1.35      jmcneill  261: #define pmap_phys_address(ppn)         (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
                    262: #define pmap_mmap_flags(ppn)           x86_mmap_flags(ppn)
1.2       yamt      263: #define pmap_valid_entry(E)            ((E) & PG_V) /* is PDE or PTE valid? */
                    264:
1.35      jmcneill  265: #if defined(__x86_64__) || defined(PAE)
                    266: #define X86_MMAP_FLAG_SHIFT    (64 - PGSHIFT)
                    267: #else
                    268: #define X86_MMAP_FLAG_SHIFT    (32 - PGSHIFT)
                    269: #endif
                    270:
                    271: #define X86_MMAP_FLAG_MASK     0xf
                    272: #define X86_MMAP_FLAG_PREFETCH 0x1
1.2       yamt      273:
                    274: /*
                    275:  * prototypes
                    276:  */
                    277:
                    278: void           pmap_activate(struct lwp *);
                    279: void           pmap_bootstrap(vaddr_t);
                    280: bool           pmap_clear_attrs(struct vm_page *, unsigned);
1.56      riastrad  281: bool           pmap_pv_clear_attrs(paddr_t, unsigned);
1.2       yamt      282: void           pmap_deactivate(struct lwp *);
1.56      riastrad  283: void           pmap_page_remove(struct vm_page *);
                    284: void           pmap_pv_remove(paddr_t);
1.2       yamt      285: void           pmap_remove(struct pmap *, vaddr_t, vaddr_t);
                    286: bool           pmap_test_attrs(struct vm_page *, unsigned);
                    287: void           pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
                    288: void           pmap_load(void);
1.6       jmcneill  289: paddr_t                pmap_init_tmp_pgtbl(paddr_t);
1.18      ad        290: void           pmap_remove_all(struct pmap *);
1.60      maya      291: void           pmap_ldt_cleanup(struct lwp *);
1.22      ad        292: void           pmap_ldt_sync(struct pmap *);
1.53      chs       293: void           pmap_kremove_local(vaddr_t, vsize_t);
1.2       yamt      294:
1.25      rmind     295: void           pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t);
                    296: void           pmap_emap_remove(vaddr_t, vsize_t);
1.26      rmind     297: void           pmap_emap_sync(bool);
1.25      rmind     298:
1.56      riastrad  299: #define        __HAVE_PMAP_PV_TRACK    1
                    300: void           pmap_pv_init(void);
                    301: void           pmap_pv_track(paddr_t, psize_t);
                    302: void           pmap_pv_untrack(paddr_t, psize_t);
                    303:
1.30      dyoung    304: void           pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
                    305:                    pd_entry_t * const **);
                    306: void           pmap_unmap_ptes(struct pmap *, struct pmap *);
                    307:
                    308: int            pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
                    309:
1.35      jmcneill  310: u_int          x86_mmap_flags(paddr_t);
                    311:
1.40      tls       312: bool           pmap_is_curpmap(struct pmap *);
                    313:
1.62      maxv      314: #ifndef __HAVE_DIRECT_MAP
                    315: void           pmap_vpage_cpu_init(struct cpu_info *);
                    316: #endif
                    317:
1.2       yamt      318: vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
                    319:
1.39      rmind     320: typedef enum tlbwhy {
                    321:        TLBSHOOT_APTE,
                    322:        TLBSHOOT_KENTER,
                    323:        TLBSHOOT_KREMOVE,
                    324:        TLBSHOOT_FREE_PTP1,
                    325:        TLBSHOOT_FREE_PTP2,
                    326:        TLBSHOOT_REMOVE_PTE,
                    327:        TLBSHOOT_REMOVE_PTES,
                    328:        TLBSHOOT_SYNC_PV1,
                    329:        TLBSHOOT_SYNC_PV2,
                    330:        TLBSHOOT_WRITE_PROTECT,
                    331:        TLBSHOOT_ENTER,
                    332:        TLBSHOOT_UPDATE,
                    333:        TLBSHOOT_BUS_DMA,
                    334:        TLBSHOOT_BUS_SPACE,
                    335:        TLBSHOOT__MAX,
                    336: } tlbwhy_t;
                    337:
                    338: void           pmap_tlb_init(void);
1.52      rmind     339: void           pmap_tlb_cpu_init(struct cpu_info *);
1.39      rmind     340: void           pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
                    341: void           pmap_tlb_shootnow(void);
                    342: void           pmap_tlb_intr(void);
1.2       yamt      343:
1.25      rmind     344: #define        __HAVE_PMAP_EMAP
                    345:
1.2       yamt      346: #define PMAP_GROWKERNEL                /* turn on pmap_growkernel interface */
1.19      jmcneill  347: #define PMAP_FORK              /* turn on pmap_fork interface */
1.2       yamt      348:
                    349: /*
                    350:  * Do idle page zero'ing uncached to avoid polluting the cache.
                    351:  */
                    352: bool   pmap_pageidlezero(paddr_t);
                    353: #define        PMAP_PAGEIDLEZERO(pa)   pmap_pageidlezero((pa))
                    354:
                    355: /*
                    356:  * inline functions
                    357:  */
                    358:
1.30      dyoung    359: __inline static bool __unused
                    360: pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
                    361: {
                    362:        return pmap_pdes_invalid(va, pdes, lastpde) == 0;
                    363: }
                    364:
1.2       yamt      365: /*
                    366:  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
                    367:  *     if hardware doesn't support one-page flushing)
                    368:  */
                    369:
1.7       perry     370: __inline static void __unused
1.2       yamt      371: pmap_update_pg(vaddr_t va)
                    372: {
1.4       ad        373:        invlpg(va);
1.2       yamt      374: }
                    375:
                    376: /*
                    377:  * pmap_update_2pg: flush two pages from the TLB
                    378:  */
                    379:
1.7       perry     380: __inline static void __unused
1.2       yamt      381: pmap_update_2pg(vaddr_t va, vaddr_t vb)
                    382: {
1.4       ad        383:        invlpg(va);
                    384:        invlpg(vb);
1.2       yamt      385: }
                    386:
                    387: /*
                    388:  * pmap_page_protect: change the protection of all recorded mappings
                    389:  *     of a managed page
                    390:  *
                    391:  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
                    392:  * => we only have to worry about making the page more protected.
                    393:  *     unprotecting a page is done on-demand at fault time.
                    394:  */
                    395:
1.7       perry     396: __inline static void __unused
1.2       yamt      397: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                    398: {
                    399:        if ((prot & VM_PROT_WRITE) == 0) {
                    400:                if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
                    401:                        (void) pmap_clear_attrs(pg, PG_RW);
                    402:                } else {
                    403:                        pmap_page_remove(pg);
                    404:                }
                    405:        }
                    406: }
                    407:
                    408: /*
1.56      riastrad  409:  * pmap_pv_protect: change the protection of all recorded mappings
                    410:  *     of an unmanaged page
                    411:  */
                    412:
                    413: __inline static void __unused
                    414: pmap_pv_protect(paddr_t pa, vm_prot_t prot)
                    415: {
                    416:        if ((prot & VM_PROT_WRITE) == 0) {
                    417:                if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
                    418:                        (void) pmap_pv_clear_attrs(pa, PG_RW);
                    419:                } else {
                    420:                        pmap_pv_remove(pa);
                    421:                }
                    422:        }
                    423: }
                    424:
                    425: /*
1.2       yamt      426:  * pmap_protect: change the protection of pages in a pmap
                    427:  *
                    428:  * => this function is a frontend for pmap_remove/pmap_write_protect
                    429:  * => we only have to worry about making the page more protected.
                    430:  *     unprotecting a page is done on-demand at fault time.
                    431:  */
                    432:
1.7       perry     433: __inline static void __unused
1.2       yamt      434: pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
                    435: {
                    436:        if ((prot & VM_PROT_WRITE) == 0) {
                    437:                if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
                    438:                        pmap_write_protect(pmap, sva, eva, prot);
                    439:                } else {
                    440:                        pmap_remove(pmap, sva, eva);
                    441:                }
                    442:        }
                    443: }
                    444:
                    445: /*
                    446:  * various address inlines
                    447:  *
                    448:  *  vtopte: return a pointer to the PTE mapping a VA, works only for
                    449:  *  user and PT addresses
                    450:  *
                    451:  *  kvtopte: return a pointer to the PTE mapping a kernel VA
                    452:  */
                    453:
                    454: #include <lib/libkern/libkern.h>
                    455:
1.7       perry     456: static __inline pt_entry_t * __unused
1.2       yamt      457: vtopte(vaddr_t va)
                    458: {
                    459:
                    460:        KASSERT(va < VM_MIN_KERNEL_ADDRESS);
                    461:
                    462:        return (PTE_BASE + pl1_i(va));
                    463: }
                    464:
1.7       perry     465: static __inline pt_entry_t * __unused
1.2       yamt      466: kvtopte(vaddr_t va)
                    467: {
                    468:        pd_entry_t *pde;
                    469:
                    470:        KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
                    471:
                    472:        pde = L2_BASE + pl2_i(va);
                    473:        if (*pde & PG_PS)
                    474:                return ((pt_entry_t *)pde);
                    475:
                    476:        return (PTE_BASE + pl1_i(va));
                    477: }
                    478:
                    479: paddr_t vtophys(vaddr_t);
                    480: vaddr_t        pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
                    481: void   pmap_cpu_init_late(struct cpu_info *);
1.15      ad        482: bool   sse2_idlezero_page(void *);
1.2       yamt      483:
1.5       bouyer    484: #ifdef XEN
1.38      jym       485: #include <sys/bitops.h>
                    486:
1.5       bouyer    487: #define XPTE_MASK      L1_FRAME
1.38      jym       488: /* Selects the index of a PTE in (A)PTE_BASE */
                    489: #define XPTE_SHIFT     (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
1.5       bouyer    490:
                    491: /* PTE access inline fuctions */
                    492:
                    493: /*
                    494:  * Get the machine address of the pointed pte
                    495:  * We use hardware MMU to get value so works only for levels 1-3
                    496:  */
                    497:
                    498: static __inline paddr_t
                    499: xpmap_ptetomach(pt_entry_t *pte)
                    500: {
                    501:        pt_entry_t *up_pte;
                    502:        vaddr_t va = (vaddr_t) pte;
                    503:
                    504:        va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
                    505:        up_pte = (pt_entry_t *) va;
                    506:
                    507:        return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
                    508: }
                    509:
                    510: /* Xen helpers to change bits of a pte */
                    511: #define XPMAP_UPDATE_DIRECT    1       /* Update direct map entry flags too */
                    512:
1.30      dyoung    513: paddr_t        vtomach(vaddr_t);
                    514: #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
                    515: #endif /* XEN */
                    516:
1.5       bouyer    517: /* pmap functions with machine addresses */
1.27      cegger    518: void   pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
1.5       bouyer    519: int    pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
1.24      cegger    520:            vm_prot_t, u_int, int);
1.5       bouyer    521: bool   pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
1.61      christos  522: void   pmap_free_ptps(struct vm_page *);
1.20      bouyer    523:
1.2       yamt      524: /*
                    525:  * Hooks for the pool allocator.
                    526:  */
                    527: #define        POOL_VTOPHYS(va)        vtophys((vaddr_t) (va))
                    528:
1.49      chs       529: #ifdef __HAVE_DIRECT_MAP
                    530:
1.67      maxv      531: #define L4_SLOT_DIRECT         456
1.49      chs       532: #define PDIR_SLOT_DIRECT       L4_SLOT_DIRECT
                    533:
1.66      maxv      534: #define NL4_SLOT_DIRECT                32
                    535:
1.49      chs       536: #define PMAP_DIRECT_BASE       (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
1.66      maxv      537: #define PMAP_DIRECT_END                (PMAP_DIRECT_BASE + NL4_SLOT_DIRECT * NBPD_L4)
1.49      chs       538:
                    539: #define PMAP_DIRECT_MAP(pa)    ((vaddr_t)PMAP_DIRECT_BASE + (pa))
                    540: #define PMAP_DIRECT_UNMAP(va)  ((paddr_t)(va) - PMAP_DIRECT_BASE)
                    541:
                    542: /*
                    543:  * Alternate mapping hooks for pool pages.
                    544:  */
                    545: #define PMAP_MAP_POOLPAGE(pa)  PMAP_DIRECT_MAP((pa))
                    546: #define PMAP_UNMAP_POOLPAGE(va)        PMAP_DIRECT_UNMAP((va))
                    547:
                    548: void   pagezero(vaddr_t);
                    549:
                    550: #endif /* __HAVE_DIRECT_MAP */
                    551:
1.2       yamt      552: #endif /* _KERNEL */
                    553:
                    554: #endif /* _X86_PMAP_H_ */

CVSweb <webmaster@jp.NetBSD.org>