[BACK]Return to pmap.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / amd64 / include

Annotation of src/sys/arch/amd64/include/pmap.h, Revision 1.57

1.57    ! maxv        1: /*     $NetBSD: pmap.h,v 1.56 2018/08/29 06:28:50 maxv Exp $   */
1.1       fvdl        2:
                      3: /*
                      4:  * Copyright (c) 1997 Charles D. Cranor and Washington University.
                      5:  * All rights reserved.
                      6:  *
                      7:  * Redistribution and use in source and binary forms, with or without
                      8:  * modification, are permitted provided that the following conditions
                      9:  * are met:
                     10:  * 1. Redistributions of source code must retain the above copyright
                     11:  *    notice, this list of conditions and the following disclaimer.
                     12:  * 2. Redistributions in binary form must reproduce the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer in the
                     14:  *    documentation and/or other materials provided with the distribution.
                     15:  *
                     16:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     17:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     18:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     19:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     20:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     21:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     22:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     23:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     24:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     25:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     26:  */
                     27:
                     28: /*
                     29:  * Copyright (c) 2001 Wasabi Systems, Inc.
                     30:  * All rights reserved.
                     31:  *
                     32:  * Written by Frank van der Linden for Wasabi Systems, Inc.
                     33:  *
                     34:  * Redistribution and use in source and binary forms, with or without
                     35:  * modification, are permitted provided that the following conditions
                     36:  * are met:
                     37:  * 1. Redistributions of source code must retain the above copyright
                     38:  *    notice, this list of conditions and the following disclaimer.
                     39:  * 2. Redistributions in binary form must reproduce the above copyright
                     40:  *    notice, this list of conditions and the following disclaimer in the
                     41:  *    documentation and/or other materials provided with the distribution.
                     42:  * 3. All advertising materials mentioning features or use of this software
                     43:  *    must display the following acknowledgement:
                     44:  *      This product includes software developed for the NetBSD Project by
                     45:  *      Wasabi Systems, Inc.
                     46:  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
                     47:  *    or promote products derived from this software without specific prior
                     48:  *    written permission.
                     49:  *
                     50:  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
                     51:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     52:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     53:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
                     54:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     55:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     56:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     57:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     58:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     59:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     60:  * POSSIBILITY OF SUCH DAMAGE.
                     61:  */
                     62:
                     63: #ifndef        _AMD64_PMAP_H_
                     64: #define        _AMD64_PMAP_H_
                     65:
1.22      mrg        66: #ifdef __x86_64__
                     67:
1.15      bouyer     68: #if defined(_KERNEL_OPT)
                     69: #include "opt_xen.h"
1.55      maxv       70: #include "opt_kasan.h"
1.15      bouyer     71: #endif
                     72:
1.16      ad         73: #include <sys/atomic.h>
                     74:
1.1       fvdl       75: #include <machine/pte.h>
                     76: #include <machine/segments.h>
1.12      ad         77: #ifdef _KERNEL
                     78: #include <machine/cpufunc.h>
                     79: #endif
1.11      ad         80:
1.1       fvdl       81: #include <uvm/uvm_object.h>
1.15      bouyer     82: #ifdef XEN
                     83: #include <xen/xenfunc.h>
                     84: #include <xen/xenpmap.h>
1.54      maxv       85: #endif
1.1       fvdl       86:
                     87: /*
                     88:  * Mask to get rid of the sign-extended part of addresses.
                     89:  */
                     90: #define VA_SIGN_MASK           0xffff000000000000
                     91: #define VA_SIGN_NEG(va)                ((va) | VA_SIGN_MASK)
1.54      maxv       92: /* XXXfvdl this one's not right. */
1.1       fvdl       93: #define VA_SIGN_POS(va)                ((va) & ~VA_SIGN_MASK)
                     94:
1.55      maxv       95: #ifdef KASAN
                     96: #define L4_SLOT_KASAN          256
                     97: #define NL4_SLOT_KASAN         32
                     98: #endif
                     99:
1.56      maxv      100: #define NL4_SLOT_DIRECT                32
                    101:
1.53      maxv      102: #ifndef XEN
                    103: #define L4_SLOT_PTE            slotspace.area[SLAREA_PTE].sslot
                    104: #else
1.52      maxv      105: #define L4_SLOT_PTE            509
1.53      maxv      106: #endif
1.49      maxv      107: #define L4_SLOT_KERN           slotspace.area[SLAREA_MAIN].sslot
1.37      maxv      108: #define L4_SLOT_KERNBASE       511 /* pl4_i(KERNBASE) */
1.1       fvdl      109:
1.51      maxv      110: #define PDIR_SLOT_USERLIM      255
1.1       fvdl      111: #define PDIR_SLOT_KERN L4_SLOT_KERN
                    112: #define PDIR_SLOT_PTE  L4_SLOT_PTE
                    113:
                    114: /*
1.36      maxv      115:  * The following defines give the virtual addresses of various MMU
1.1       fvdl      116:  * data structures:
1.32      cherry    117:  * PTE_BASE: the base VA of the linear PTE mappings
1.36      maxv      118:  * PDP_BASE: the base VA of the recursive mapping of the PTD
1.1       fvdl      119:  */
                    120:
1.53      maxv      121: #ifndef XEN
                    122: extern pt_entry_t *pte_base;
                    123: #define PTE_BASE       pte_base
                    124: #else
1.52      maxv      125: #define PTE_BASE       ((pt_entry_t *)VA_SIGN_NEG((L4_SLOT_PTE * NBPD_L4)))
1.53      maxv      126: #endif
1.1       fvdl      127:
1.36      maxv      128: #define L1_BASE        PTE_BASE
                    129: #define L2_BASE        ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
                    130: #define L3_BASE        ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
                    131: #define L4_BASE        ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
1.1       fvdl      132:
                    133: #define PDP_BASE       L4_BASE
                    134:
1.40      maxv      135: #define NKL4_MAX_ENTRIES       (unsigned long)64
1.1       fvdl      136: #define NKL3_MAX_ENTRIES       (unsigned long)(NKL4_MAX_ENTRIES * 512)
                    137: #define NKL2_MAX_ENTRIES       (unsigned long)(NKL3_MAX_ENTRIES * 512)
                    138: #define NKL1_MAX_ENTRIES       (unsigned long)(NKL2_MAX_ENTRIES * 512)
                    139:
                    140: #define NKL4_KIMG_ENTRIES      1
                    141: #define NKL3_KIMG_ENTRIES      1
1.42      maxv      142: #define NKL2_KIMG_ENTRIES      48
1.1       fvdl      143:
                    144: /*
                    145:  * Since kva space is below the kernel in its entirety, we start off
                    146:  * with zero entries on each level.
                    147:  */
                    148: #define NKL4_START_ENTRIES     0
                    149: #define NKL3_START_ENTRIES     0
                    150: #define NKL2_START_ENTRIES     0
1.54      maxv      151: #define NKL1_START_ENTRIES     0
1.1       fvdl      152:
1.57    ! maxv      153: #define PTP_FRAME_INITIALIZER  { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
1.1       fvdl      154: #define PTP_SHIFT_INITIALIZER  { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
                    155: #define NKPTP_INITIALIZER      { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
                    156:                                  NKL3_START_ENTRIES, NKL4_START_ENTRIES }
                    157: #define NKPTPMAX_INITIALIZER   { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
                    158:                                  NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
                    159: #define NBPD_INITIALIZER       { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
                    160: #define PDES_INITIALIZER       { L2_BASE, L3_BASE, L4_BASE }
                    161:
                    162: #define PTP_LEVELS     4
                    163:
                    164: /*
                    165:  * PG_AVAIL usage: we make use of the ignored bits of the PTE
                    166:  */
                    167:
                    168: #define PG_W           PG_AVAIL1       /* "wired" mapping */
                    169: #define PG_PVLIST      PG_AVAIL2       /* mapping has entry on pvlist */
                    170: /* PG_AVAIL3 not used */
                    171:
1.56      maxv      172: #define        PG_X            0               /* dummy */
1.14      yamt      173:
1.41      maxv      174: void svs_pmap_sync(struct pmap *, int);
                    175: void svs_lwp_switch(struct lwp *, struct lwp *);
                    176: void svs_pdir_switch(struct pmap *);
1.45      maxv      177: void svs_init(void);
1.43      maxv      178: extern bool svs_enabled;
1.41      maxv      179:
1.15      bouyer    180: #include <x86/pmap.h>
                    181:
                    182: #ifndef XEN
                    183: #define pmap_pa2pte(a)                 (a)
                    184: #define pmap_pte2pa(a)                 ((a) & PG_FRAME)
                    185: #define pmap_pte_set(p, n)             do { *(p) = (n); } while (0)
1.19      yamt      186: #define pmap_pte_cas(p, o, n)          atomic_cas_64((p), (o), (n))
1.16      ad        187: #define pmap_pte_testset(p, n)         \
                    188:     atomic_swap_ulong((volatile unsigned long *)p, n)
                    189: #define pmap_pte_setbits(p, b)         \
                    190:     atomic_or_ulong((volatile unsigned long *)p, b)
                    191: #define pmap_pte_clearbits(p, b)       \
                    192:     atomic_and_ulong((volatile unsigned long *)p, ~(b))
1.15      bouyer    193: #define pmap_pte_flush()               /* nothing */
                    194: #else
1.29      cherry    195: extern kmutex_t pte_lock;
                    196:
1.15      bouyer    197: static __inline pt_entry_t
                    198: pmap_pa2pte(paddr_t pa)
                    199: {
                    200:        return (pt_entry_t)xpmap_ptom_masked(pa);
                    201: }
                    202:
                    203: static __inline paddr_t
                    204: pmap_pte2pa(pt_entry_t pte)
                    205: {
                    206:        return xpmap_mtop_masked(pte & PG_FRAME);
                    207: }
1.34      jym       208:
1.15      bouyer    209: static __inline void
                    210: pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
                    211: {
1.31      bouyer    212:        int s = splvm();
1.21      bouyer    213:        xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
1.31      bouyer    214:        splx(s);
1.15      bouyer    215: }
                    216:
                    217: static __inline pt_entry_t
1.20      bouyer    218: pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
1.19      yamt      219: {
1.29      cherry    220:        pt_entry_t opte;
1.19      yamt      221:
1.29      cherry    222:        mutex_enter(&pte_lock);
                    223:        opte = *ptep;
1.19      yamt      224:        if (opte == o) {
1.21      bouyer    225:                xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
1.19      yamt      226:                xpq_flush_queue();
                    227:        }
1.29      cherry    228:
                    229:        mutex_exit(&pte_lock);
1.19      yamt      230:        return opte;
                    231: }
                    232:
                    233: static __inline pt_entry_t
1.15      bouyer    234: pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
                    235: {
1.29      cherry    236:        pt_entry_t opte;
                    237:
                    238:        mutex_enter(&pte_lock);
                    239:        opte = *pte;
1.21      bouyer    240:        xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), npte);
1.15      bouyer    241:        xpq_flush_queue();
1.29      cherry    242:        mutex_exit(&pte_lock);
1.15      bouyer    243:        return opte;
                    244: }
                    245:
                    246: static __inline void
                    247: pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
                    248: {
1.29      cherry    249:        mutex_enter(&pte_lock);
1.21      bouyer    250:        xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
1.15      bouyer    251:        xpq_flush_queue();
1.29      cherry    252:        mutex_exit(&pte_lock);
1.15      bouyer    253: }
                    254:
                    255: static __inline void
                    256: pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
1.36      maxv      257: {
1.29      cherry    258:        mutex_enter(&pte_lock);
1.21      bouyer    259:        xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
1.15      bouyer    260:            (*pte) & ~bits);
                    261:        xpq_flush_queue();
1.29      cherry    262:        mutex_exit(&pte_lock);
1.15      bouyer    263: }
1.1       fvdl      264:
1.15      bouyer    265: static __inline void
                    266: pmap_pte_flush(void)
                    267: {
                    268:        int s = splvm();
                    269:        xpq_flush_queue();
                    270:        splx(s);
                    271: }
                    272: #endif
1.11      ad        273:
1.46      jdolecek  274: #ifdef __HAVE_DIRECT_MAP
                    275: #define PMAP_DIRECT
                    276:
                    277: static __inline int
                    278: pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
                    279:     int (*process)(void *, size_t, void *), void *arg)
                    280: {
                    281:        vaddr_t va = PMAP_DIRECT_MAP(pa);
                    282:
                    283:        return process((void *)(va + pgoff), len, arg);
                    284: }
                    285:
                    286: #endif /* __HAVE_DIRECT_MAP */
                    287:
1.14      yamt      288: void pmap_changeprot_local(vaddr_t, vm_prot_t);
1.11      ad        289:
1.23      uebayasi  290: #include <x86/pmap_pv.h>
                    291:
                    292: #define        __HAVE_VM_PAGE_MD
                    293: #define        VM_MDPAGE_INIT(pg) \
                    294:        memset(&(pg)->mdpage, 0, sizeof((pg)->mdpage)); \
                    295:        PMAP_PAGE_INIT(&(pg)->mdpage.mp_pp)
                    296:
                    297: struct vm_page_md {
                    298:        struct pmap_page mp_pp;
                    299: };
                    300:
                    301: #else  /*      !__x86_64__     */
1.22      mrg       302:
                    303: #include <i386/pmap.h>
                    304:
                    305: #endif /*      __x86_64__      */
                    306:
1.1       fvdl      307: #endif /* _AMD64_PMAP_H_ */

CVSweb <webmaster@jp.NetBSD.org>