[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sparc64 / sparc64

Annotation of src/sys/arch/sparc64/sparc64/pmap.c, Revision 1.293

1.293   ! palle       1: /*     $NetBSD: pmap.c,v 1.292 2014/11/04 18:11:42 palle Exp $ */
1.1       eeh         2: /*
1.156     pk          3:  *
1.41      eeh         4:  * Copyright (C) 1996-1999 Eduardo Horvath.
1.1       eeh         5:  * All rights reserved.
                      6:  *
1.41      eeh         7:  *
1.1       eeh         8:  * Redistribution and use in source and binary forms, with or without
                      9:  * modification, are permitted provided that the following conditions
                     10:  * are met:
                     11:  * 1. Redistributions of source code must retain the above copyright
                     12:  *    notice, this list of conditions and the following disclaimer.
1.156     pk         13:  *
1.41      eeh        14:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
                     15:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     16:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     17:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
                     18:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     19:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     20:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     21:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     22:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     23:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     24:  * SUCH DAMAGE.
                     25:  *
1.1       eeh        26:  */
1.142     lukem      27:
                     28: #include <sys/cdefs.h>
1.293   ! palle      29: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.292 2014/11/04 18:11:42 palle Exp $");
1.142     lukem      30:
                     31: #undef NO_VCACHE /* Don't forget the locked TLB in dostart */
                     32: #define        HWREF
1.27      mrg        33:
1.8       eeh        34: #include "opt_ddb.h"
1.172     rjs        35: #include "opt_multiprocessor.h"
1.276     martin     36: #include "opt_modular.h"
1.1       eeh        37:
                     38: #include <sys/param.h>
                     39: #include <sys/malloc.h>
                     40: #include <sys/queue.h>
                     41: #include <sys/systm.h>
                     42: #include <sys/msgbuf.h>
1.54      eeh        43: #include <sys/pool.h>
1.2       eeh        44: #include <sys/exec.h>
                     45: #include <sys/core.h>
                     46: #include <sys/kcore.h>
1.113     chs        47: #include <sys/proc.h>
1.203     ad         48: #include <sys/atomic.h>
                     49: #include <sys/cpu.h>
1.1       eeh        50:
1.238     matt       51: #include <sys/exec_aout.h>     /* for MID_* */
                     52:
1.1       eeh        53: #include <uvm/uvm.h>
                     54:
                     55: #include <machine/pcb.h>
                     56: #include <machine/sparc64.h>
                     57: #include <machine/ctlreg.h>
1.156     pk         58: #include <machine/promlib.h>
1.2       eeh        59: #include <machine/kcore.h>
1.168     cdi        60: #include <machine/bootinfo.h>
1.1       eeh        61:
1.224     nakayama   62: #include <sparc64/sparc64/cache.h>
1.282     palle      63: #ifdef SUN4V
                     64: #include <sparc64/hypervisor.h>
                     65: #endif
1.1       eeh        66:
1.8       eeh        67: #ifdef DDB
                     68: #include <machine/db_machdep.h>
                     69: #include <ddb/db_command.h>
                     70: #include <ddb/db_sym.h>
                     71: #include <ddb/db_variables.h>
                     72: #include <ddb/db_extern.h>
                     73: #include <ddb/db_access.h>
                     74: #include <ddb/db_output.h>
                     75: #else
1.7       mrg        76: #define Debugger()
1.25      eeh        77: #define db_printf      printf
1.7       mrg        78: #endif
1.1       eeh        79:
1.98      eeh        80: #define        MEG             (1<<20) /* 1MB */
                     81: #define        KB              (1<<10) /* 1KB */
                     82:
1.152     petrov     83: paddr_t cpu0paddr;             /* contigious phys memory preallocated for cpus */
1.54      eeh        84:
1.1       eeh        85: /* These routines are in assembly to allow access thru physical mappings */
1.259     mrg        86: extern int64_t pseg_get_real(struct pmap *, vaddr_t);
                     87: extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t);
1.41      eeh        88:
                     89: /*
                     90:  * Diatribe on ref/mod counting:
                     91:  *
                     92:  * First of all, ref/mod info must be non-volatile.  Hence we need to keep it
1.156     pk         93:  * in the pv_entry structure for each page.  (We could bypass this for the
1.97      chs        94:  * vm_page, but that's a long story....)
1.156     pk         95:  *
1.41      eeh        96:  * This architecture has nice, fast traps with lots of space for software bits
1.104     wiz        97:  * in the TTE.  To accelerate ref/mod counts we make use of these features.
1.41      eeh        98:  *
1.156     pk         99:  * When we map a page initially, we place a TTE in the page table.  It's
1.41      eeh       100:  * inserted with the TLB_W and TLB_ACCESS bits cleared.  If a page is really
1.131     wiz       101:  * writable we set the TLB_REAL_W bit for the trap handler.
1.41      eeh       102:  *
                    103:  * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS
                    104:  * bit in the approprate TTE in the page table.  Whenever we take a protection
                    105:  * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD
                    106:  * bits to enable writing and mark the page as modified.
                    107:  *
                    108:  * This means that we may have ref/mod information all over the place.  The
                    109:  * pmap routines must traverse the page tables of all pmaps with a given page
                    110:  * and collect/clear all the ref/mod information and copy it into the pv_entry.
                    111:  */
                    112:
1.56      eeh       113: #ifdef NO_VCACHE
                    114: #define        FORCE_ALIAS     1
                    115: #else
                    116: #define FORCE_ALIAS    0
                    117: #endif
                    118:
1.1       eeh       119: #define        PV_ALIAS        0x1LL
                    120: #define PV_REF         0x2LL
                    121: #define PV_MOD         0x4LL
1.2       eeh       122: #define PV_NVC         0x8LL
1.86      eeh       123: #define PV_NC          0x10LL
1.127     chs       124: #define PV_WE          0x20LL  /* Debug -- this page was writable somtime */
1.86      eeh       125: #define PV_MASK                (0x03fLL)
1.138     thorpej   126: #define PV_VAMASK      (~(PAGE_SIZE - 1))
1.127     chs       127: #define PV_MATCH(pv,va)        (!(((pv)->pv_va ^ (va)) & PV_VAMASK))
                    128: #define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \
                    129:                                        (((pv)->pv_va) & PV_MASK)))
                    130:
1.203     ad        131: struct pool_cache pmap_cache;
                    132: struct pool_cache pmap_pv_cache;
1.127     chs       133:
1.170     cdi       134: pv_entry_t     pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *);
                    135: void   pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *,
                    136:                           pv_entry_t);
                    137: void   pmap_page_cache(struct pmap *, paddr_t, int);
1.1       eeh       138:
                    139: /*
1.127     chs       140:  * First and last managed physical addresses.
                    141:  * XXX only used for dumping the system.
1.1       eeh       142:  */
1.4       eeh       143: paddr_t        vm_first_phys, vm_num_phys;
1.1       eeh       144:
                    145: /*
                    146:  * Here's the CPU TSB stuff.  It's allocated in pmap_bootstrap.
                    147:  */
                    148: int tsbsize;           /* tsbents = 512 * 2^^tsbsize */
                    149: #define TSBENTS (512<<tsbsize)
                    150: #define        TSBSIZE (TSBENTS * 16)
                    151:
1.230     martin    152: static struct pmap kernel_pmap_;
1.231     pooka     153: struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
1.1       eeh       154:
1.212     nakayama  155: static int ctx_alloc(struct pmap *);
1.226     spz       156: static bool pmap_is_referenced_locked(struct vm_page *);
                    157:
1.212     nakayama  158: static void ctx_free(struct pmap *, struct cpu_info *);
1.216     martin    159:
                    160: /*
                    161:  * Check if any MMU has a non-zero context
                    162:  */
                    163: static inline bool
                    164: pmap_has_ctx(struct pmap *p)
                    165: {
                    166:        int i;
                    167:
                    168:        /* any context on any cpu? */
                    169:        for (i = 0; i < sparc_ncpus; i++)
                    170:                if (p->pm_ctx[i] > 0)
                    171:                        return true;
                    172:
                    173:        return false;
                    174: }
                    175:
1.257     mrg       176: #ifdef MULTIPROCESSOR
                    177: #define pmap_ctx(PM)   ((PM)->pm_ctx[cpu_number()])
                    178: #else
                    179: #define pmap_ctx(PM)   ((PM)->pm_ctx[0])
                    180: #endif
                    181:
1.216     martin    182: /*
                    183:  * Check if this pmap has a live mapping on some MMU.
                    184:  */
                    185: static inline bool
                    186: pmap_is_on_mmu(struct pmap *p)
                    187: {
                    188:        /* The kernel pmap is always on all MMUs */
                    189:        if (p == pmap_kernel())
                    190:                return true;
                    191:
                    192:        return pmap_has_ctx(p);
                    193: }
1.210     martin    194:
1.56      eeh       195: /*
                    196:  * Virtual and physical addresses of the start and end of kernel text
                    197:  * and data segments.
                    198:  */
                    199: vaddr_t ktext;
                    200: paddr_t ktextp;
                    201: vaddr_t ektext;
                    202: paddr_t ektextp;
                    203: vaddr_t kdata;
                    204: paddr_t kdatap;
                    205: vaddr_t ekdata;
                    206: paddr_t ekdatap;
                    207:
1.168     cdi       208: /*
                    209:  * Kernel 4MB pages.
                    210:  */
                    211: extern struct tlb_entry *kernel_tlbs;
                    212: extern int kernel_tlb_slots;
                    213:
1.1       eeh       214: static int npgs;
                    215:
1.56      eeh       216: vaddr_t        vmmap;                  /* one reserved MI vpage for /dev/mem */
1.2       eeh       217:
1.168     cdi       218: int phys_installed_size;               /* Installed physical memory */
                    219: struct mem_region *phys_installed;
1.1       eeh       220:
1.123     eeh       221: paddr_t avail_start, avail_end;        /* These are used by ps & family */
1.1       eeh       222:
1.170     cdi       223: static int ptelookup_va(vaddr_t va);
1.143     chs       224:
1.166     perry     225: static inline void
1.143     chs       226: clrx(void *addr)
                    227: {
1.166     perry     228:        __asm volatile("clrx [%0]" : : "r" (addr) : "memory");
1.143     chs       229: }
                    230:
1.212     nakayama  231: static void
                    232: tsb_invalidate(vaddr_t va, pmap_t pm)
                    233: {
                    234:        struct cpu_info *ci;
                    235:        int ctx;
                    236:        bool kpm = (pm == pmap_kernel());
                    237:        int i;
                    238:        int64_t tag;
                    239:
                    240:        i = ptelookup_va(va);
1.257     mrg       241: #ifdef MULTIPROCESSOR
1.212     nakayama  242:        for (ci = cpus; ci != NULL; ci = ci->ci_next) {
                    243:                if (!CPUSET_HAS(cpus_active, ci->ci_index))
                    244:                        continue;
1.257     mrg       245: #else
                    246:                ci = curcpu();
                    247: #endif
1.212     nakayama  248:                ctx = pm->pm_ctx[ci->ci_index];
                    249:                if (kpm || ctx > 0) {
                    250:                        tag = TSB_TAG(0, ctx, va);
                    251:                        if (ci->ci_tsb_dmmu[i].tag == tag) {
                    252:                                clrx(&ci->ci_tsb_dmmu[i].data);
                    253:                        }
                    254:                        if (ci->ci_tsb_immu[i].tag == tag) {
                    255:                                clrx(&ci->ci_tsb_immu[i].data);
                    256:                        }
                    257:                }
1.257     mrg       258: #ifdef MULTIPROCESSOR
1.212     nakayama  259:        }
1.257     mrg       260: #endif
1.212     nakayama  261: }
1.1       eeh       262:
                    263: struct prom_map *prom_map;
                    264: int prom_map_size;
                    265:
1.278     mrg       266: #define        PDB_CREATE              0x000001
                    267: #define        PDB_DESTROY             0x000002
                    268: #define        PDB_REMOVE              0x000004
                    269: #define        PDB_CHANGEPROT          0x000008
                    270: #define        PDB_ENTER               0x000010
                    271: #define        PDB_DEMAP               0x000020        /* used in locore */
                    272: #define        PDB_REF                 0x000040
                    273: #define        PDB_COPY                0x000080
                    274: #define        PDB_MMU_ALLOC           0x000100
                    275: #define        PDB_MMU_STEAL           0x000200
                    276: #define        PDB_CTX_ALLOC           0x000400
                    277: #define        PDB_CTX_STEAL           0x000800
                    278: #define        PDB_MMUREG_ALLOC        0x001000
                    279: #define        PDB_MMUREG_STEAL        0x002000
                    280: #define        PDB_CACHESTUFF          0x004000
                    281: #define        PDB_ALIAS               0x008000
                    282: #define PDB_EXTRACT            0x010000
                    283: #define        PDB_BOOT                0x020000
                    284: #define        PDB_BOOT1               0x040000
                    285: #define        PDB_GROW                0x080000
                    286: #define        PDB_CTX_FLUSHALL        0x100000
                    287: #define        PDB_ACTIVATE            0x200000
                    288:
                    289: #if defined(DEBUG) && !defined(PMAP_DEBUG)
                    290: #define PMAP_DEBUG
                    291: #endif
                    292:
                    293: #ifdef PMAP_DEBUG
1.1       eeh       294: struct {
                    295:        int kernel;     /* entering kernel mapping */
                    296:        int user;       /* entering user mapping */
                    297:        int ptpneeded;  /* needed to allocate a PT page */
                    298:        int pwchange;   /* no mapping change, just wiring or protection */
                    299:        int wchange;    /* no mapping change, just wiring */
                    300:        int mchange;    /* was mapped but mapping to different page */
                    301:        int managed;    /* a managed page */
                    302:        int firstpv;    /* first mapping for this PA */
                    303:        int secondpv;   /* second mapping for this PA */
                    304:        int ci;         /* cache inhibited */
                    305:        int unmanaged;  /* not a managed page */
                    306:        int flushes;    /* cache flushes */
                    307:        int cachehit;   /* new entry forced valid entry out */
                    308: } enter_stats;
                    309: struct {
                    310:        int calls;
                    311:        int removes;
                    312:        int flushes;
1.4       eeh       313:        int tflushes;   /* TLB flushes */
1.1       eeh       314:        int pidflushes; /* HW pid stolen */
                    315:        int pvfirst;
                    316:        int pvsearch;
                    317: } remove_stats;
1.256     mrg       318: #define        ENTER_STAT(x)   do { enter_stats.x ++; } while (0)
                    319: #define        REMOVE_STAT(x)  do { remove_stats.x ++; } while (0)
1.127     chs       320:
1.75      eeh       321: int    pmapdebug = 0;
1.278     mrg       322: //int  pmapdebug = 0 | PDB_CTX_ALLOC | PDB_ACTIVATE;
1.1       eeh       323: /* Number of H/W pages stolen for page tables */
                    324: int    pmap_pages_stolen = 0;
1.66      eeh       325:
                    326: #define        BDPRINTF(n, f)  if (pmapdebug & (n)) prom_printf f
1.156     pk        327: #define        DPRINTF(n, f)   if (pmapdebug & (n)) printf f
1.69      mrg       328: #else
1.256     mrg       329: #define        ENTER_STAT(x)   do { /* nothing */ } while (0)
                    330: #define        REMOVE_STAT(x)  do { /* nothing */ } while (0)
1.69      mrg       331: #define        BDPRINTF(n, f)
1.77      eeh       332: #define        DPRINTF(n, f)
1.278     mrg       333: #define pmapdebug 0
1.1       eeh       334: #endif
                    335:
                    336: #define pv_check()
                    337:
1.262     mrg       338: static int pmap_get_page(paddr_t *);
                    339: static void pmap_free_page(paddr_t, sparc64_cpuset_t);
                    340: static void pmap_free_page_noflush(paddr_t);
1.56      eeh       341:
1.203     ad        342: /*
1.262     mrg       343:  * Global pmap locks.
1.203     ad        344:  */
                    345: static kmutex_t pmap_lock;
1.250     martin    346: static bool lock_available = false;
1.56      eeh       347:
                    348: /*
1.98      eeh       349:  * Support for big page sizes.  This maps the page size to the
                    350:  * page bits.  That is: these are the bits between 8K pages and
                    351:  * larger page sizes that cause aliasing.
                    352:  */
1.180     martin    353: #define PSMAP_ENTRY(MASK, CODE)        { .mask = MASK, .code = CODE }
1.98      eeh       354: struct page_size_map page_size_map[] = {
                    355: #ifdef DEBUG
1.180     martin    356:        PSMAP_ENTRY(0, PGSZ_8K & 0),    /* Disable large pages */
1.98      eeh       357: #endif
1.180     martin    358:        PSMAP_ENTRY((4 * 1024 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_4M),
                    359:        PSMAP_ENTRY((512 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_512K),
                    360:        PSMAP_ENTRY((64 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_64K),
                    361:        PSMAP_ENTRY((8 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_8K),
                    362:        PSMAP_ENTRY(0, 0),
1.98      eeh       363: };
                    364:
                    365: /*
1.259     mrg       366:  * This probably shouldn't be necessary, but it stops USIII machines from
                    367:  * breaking in general, and not just for MULTIPROCESSOR.
                    368:  */
                    369: #define USE_LOCKSAFE_PSEG_GETSET
                    370: #if defined(USE_LOCKSAFE_PSEG_GETSET)
                    371:
                    372: static kmutex_t pseg_lock;
                    373:
                    374: static __inline__ int64_t
                    375: pseg_get_locksafe(struct pmap *pm, vaddr_t va)
                    376: {
                    377:        int64_t rv;
                    378:        bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
                    379:
                    380:        if (__predict_true(took_lock))
                    381:                mutex_enter(&pseg_lock);
                    382:        rv = pseg_get_real(pm, va);
                    383:        if (__predict_true(took_lock))
                    384:                mutex_exit(&pseg_lock);
                    385:        return rv;
                    386: }
                    387:
                    388: static __inline__ int
                    389: pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp)
                    390: {
                    391:        int rv;
                    392:        bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
                    393:
                    394:        if (__predict_true(took_lock))
                    395:                mutex_enter(&pseg_lock);
                    396:        rv = pseg_set_real(pm, va, data, ptp);
                    397:        if (__predict_true(took_lock))
                    398:                mutex_exit(&pseg_lock);
                    399:        return rv;
                    400: }
                    401:
                    402: #define pseg_get(pm, va)               pseg_get_locksafe(pm, va)
                    403: #define pseg_set(pm, va, data, ptp)    pseg_set_locksafe(pm, va, data, ptp)
                    404:
                    405: #else /* USE_LOCKSAFE_PSEG_GETSET */
                    406:
                    407: #define pseg_get(pm, va)               pseg_get_real(pm, va)
                    408: #define pseg_set(pm, va, data, ptp)    pseg_set_real(pm, va, data, ptp)
                    409:
                    410: #endif /* USE_LOCKSAFE_PSEG_GETSET */
                    411:
                    412: /*
1.56      eeh       413:  * Enter a TTE into the kernel pmap only.  Don't do anything else.
1.156     pk        414:  *
                    415:  * Use only during bootstrapping since it does no locking and
1.73      eeh       416:  * can lose ref/mod info!!!!
                    417:  *
1.56      eeh       418:  */
1.156     pk        419: static void pmap_enter_kpage(vaddr_t va, int64_t data)
1.56      eeh       420: {
                    421:        paddr_t newp;
                    422:
1.146     petrov    423:        newp = 0UL;
1.127     chs       424:        while (pseg_set(pmap_kernel(), va, data, newp) & 1) {
1.151     petrov    425:                if (!pmap_get_page(&newp)) {
1.73      eeh       426:                        prom_printf("pmap_enter_kpage: out of pages\n");
                    427:                        panic("pmap_enter_kpage");
                    428:                }
1.127     chs       429:
                    430:                ENTER_STAT(ptpneeded);
1.156     pk        431:                BDPRINTF(PDB_BOOT1,
                    432:                         ("pseg_set: pm=%p va=%p data=%lx newp %lx\n",
1.66      eeh       433:                          pmap_kernel(), va, (long)data, (long)newp));
                    434:                if (pmapdebug & PDB_BOOT1)
1.56      eeh       435:                {int i; for (i=0; i<140000000; i++) ;}
                    436:        }
                    437: }
                    438:
1.1       eeh       439: /*
1.111     eeh       440:  * Check the bootargs to see if we need to enable bootdebug.
1.75      eeh       441:  */
                    442: #ifdef DEBUG
1.156     pk        443: static void pmap_bootdebug(void)
1.75      eeh       444: {
1.162     christos  445:        const char *cp = prom_getbootargs();
1.75      eeh       446:
1.156     pk        447:        for (;;)
1.75      eeh       448:                switch (*++cp) {
                    449:                case '\0':
                    450:                        return;
                    451:                case 'V':
1.77      eeh       452:                        pmapdebug |= PDB_BOOT|PDB_BOOT1;
1.75      eeh       453:                        break;
                    454:                case 'D':
                    455:                        pmapdebug |= PDB_BOOT1;
                    456:                        break;
                    457:                }
                    458: }
1.278     mrg       459: #else
                    460: #define pmap_bootdebug()       /* nothing */
1.75      eeh       461: #endif
                    462:
1.103     eeh       463:
                    464: /*
                    465:  * Calculate the correct number of page colors to use.  This should be the
1.138     thorpej   466:  * size of the E$/PAGE_SIZE.  However, different CPUs can have different sized
1.103     eeh       467:  * E$, so we need to take the GCM of the E$ size.
                    468:  */
1.156     pk        469: static int pmap_calculate_colors(void)
                    470: {
                    471:        int node;
1.103     eeh       472:        int size, assoc, color, maxcolor = 1;
                    473:
1.156     pk        474:        for (node = prom_firstchild(prom_findroot()); node != 0;
                    475:             node = prom_nextsibling(node)) {
                    476:                char *name = prom_getpropstring(node, "device_type");
                    477:                if (strcmp("cpu", name) != 0)
                    478:                        continue;
                    479:
                    480:                /* Found a CPU, get the E$ info. */
                    481:                size = prom_getpropint(node, "ecache-size", -1);
                    482:                if (size == -1) {
1.288     palle     483:                        /* XXX sun4v support missing */
1.156     pk        484:                        prom_printf("pmap_calculate_colors: node %x has "
                    485:                                "no ecache-size\n", node);
                    486:                        /* If we can't get the E$ size, skip the node */
                    487:                        continue;
1.103     eeh       488:                }
1.156     pk        489:
                    490:                assoc = prom_getpropint(node, "ecache-associativity", 1);
                    491:                color = size/assoc/PAGE_SIZE;
                    492:                if (color > maxcolor)
                    493:                        maxcolor = color;
1.103     eeh       494:        }
                    495:        return (maxcolor);
                    496: }
                    497:
1.156     pk        498: static void pmap_alloc_bootargs(void)
1.152     petrov    499: {
                    500:        char *v;
                    501:
                    502:        v = OF_claim(NULL, 2*PAGE_SIZE, PAGE_SIZE);
                    503:        if ((v == NULL) || (v == (void*)-1))
1.263     mrg       504:                panic("Can't claim two pages of memory.");
1.152     petrov    505:
                    506:        memset(v, 0, 2*PAGE_SIZE);
                    507:
                    508:        cpu_args = (struct cpu_bootargs*)v;
                    509: }
                    510:
1.168     cdi       511: #if defined(MULTIPROCESSOR)
                    512: static void pmap_mp_init(void);
                    513:
                    514: static void
                    515: pmap_mp_init(void)
                    516: {
                    517:        pte_t *tp;
                    518:        char *v;
                    519:        int i;
                    520:
                    521:        extern void cpu_mp_startup(void);
                    522:
1.173     mrg       523:        if ((v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE)) == NULL) {
1.168     cdi       524:                panic("pmap_mp_init: Cannot claim a page.");
                    525:        }
                    526:
1.241     cegger    527:        memcpy(v, mp_tramp_code, mp_tramp_code_len);
1.168     cdi       528:        *(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots;
                    529:        *(u_long *)(v + mp_tramp_func) = (u_long)cpu_mp_startup;
                    530:        *(u_long *)(v + mp_tramp_ci) = (u_long)cpu_args;
                    531:        tp = (pte_t *)(v + mp_tramp_code_len);
                    532:        for (i = 0; i < kernel_tlb_slots; i++) {
                    533:                tp[i].tag  = kernel_tlbs[i].te_va;
                    534:                tp[i].data = TSB_DATA(0,                /* g */
                    535:                                PGSZ_4M,                /* sz */
                    536:                                kernel_tlbs[i].te_pa,   /* pa */
                    537:                                1, /* priv */
1.293   ! palle     538:                                0, /* write */
1.168     cdi       539:                                1, /* cache */
                    540:                                1, /* aliased */
                    541:                                1, /* valid */
                    542:                                0 /* ie */);
                    543:                tp[i].data |= TLB_L | TLB_CV;
1.293   ! palle     544:
        !           545:                /*
        !           546:                 * Assuming that the last tlb slot entry is the only data slot.
        !           547:                 *
        !           548:                 * If more than one data slot is required on day, perhaps
        !           549:                 * the bootinfo structure shared between ofwboot and the kernel
        !           550:                 * should be expanded to include the number of data slots.
        !           551:                 */
        !           552:                if (i == kernel_tlb_slots-1)
        !           553:                        tp[i].data |= TLB_W;
        !           554:                else
        !           555:                        if (CPU_ISSUN4V)
        !           556:                                tp[i].data |= SUN4V_TLB_X;
1.291     palle     557:
1.220     martin    558:                DPRINTF(PDB_BOOT1, ("xtlb[%d]: Tag: %" PRIx64 " Data: %"
                    559:                                PRIx64 "\n", i, tp[i].tag, tp[i].data));
1.168     cdi       560:        }
                    561:
                    562:        for (i = 0; i < PAGE_SIZE; i += sizeof(long))
                    563:                flush(v + i);
                    564:
                    565:        cpu_spinup_trampoline = (vaddr_t)v;
                    566: }
                    567: #else
                    568: #define pmap_mp_init() ((void)0)
                    569: #endif
                    570:
                    571: paddr_t pmap_kextract(vaddr_t va);
                    572:
                    573: paddr_t
                    574: pmap_kextract(vaddr_t va)
                    575: {
                    576:        int i;
                    577:        paddr_t paddr = (paddr_t)-1;
                    578:
                    579:        for (i = 0; i < kernel_tlb_slots; i++) {
                    580:                if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) {
                    581:                        paddr = kernel_tlbs[i].te_pa +
                    582:                                (paddr_t)(va & PAGE_MASK_4M);
                    583:                        break;
                    584:                }
                    585:        }
                    586:
                    587:        if (i == kernel_tlb_slots) {
                    588:                panic("pmap_kextract: Address %p is not from kernel space.\n"
                    589:                                "Data segment is too small?\n", (void*)va);
                    590:        }
                    591:
                    592:        return (paddr);
                    593: }
                    594:
                    595: /*
                    596:  * Bootstrap kernel allocator, allocates from unused space in 4MB kernel
                    597:  * data segment meaning that
                    598:  *
                    599:  * - Access to allocated memory will never generate a trap
                    600:  * - Allocated chunks are never reclaimed or freed
                    601:  * - Allocation calls do not change PROM memlists
                    602:  */
                    603: static struct mem_region kdata_mem_pool;
                    604:
                    605: static void
                    606: kdata_alloc_init(vaddr_t va_start, vaddr_t va_end)
                    607: {
                    608:        vsize_t va_size = va_end - va_start;
                    609:
                    610:        kdata_mem_pool.start = va_start;
                    611:        kdata_mem_pool.size  = va_size;
                    612:
                    613:        BDPRINTF(PDB_BOOT, ("kdata_alloc_init(): %d bytes @%p.\n", va_size,
                    614:                                va_start));
                    615: }
                    616:
                    617: static vaddr_t
                    618: kdata_alloc(vsize_t size, vsize_t align)
                    619: {
                    620:        vaddr_t va;
                    621:        vsize_t asize;
                    622:
                    623:        asize = roundup(kdata_mem_pool.start, align) - kdata_mem_pool.start;
                    624:
                    625:        kdata_mem_pool.start += asize;
                    626:        kdata_mem_pool.size  -= asize;
                    627:
                    628:        if (kdata_mem_pool.size < size) {
                    629:                panic("kdata_alloc(): Data segment is too small.\n");
                    630:        }
                    631:
                    632:        va = kdata_mem_pool.start;
                    633:        kdata_mem_pool.start += size;
                    634:        kdata_mem_pool.size  -= size;
                    635:
                    636:        BDPRINTF(PDB_BOOT, ("kdata_alloc(): Allocated %d@%p, %d free.\n",
                    637:                                size, (void*)va, kdata_mem_pool.size));
                    638:
                    639:        return (va);
                    640: }
                    641:
                    642: /*
                    643:  * Unified routine for reading PROM properties.
                    644:  */
                    645: static void
                    646: pmap_read_memlist(const char *device, const char *property, void **ml,
                    647:                  int *ml_size, vaddr_t (* ml_alloc)(vsize_t, vsize_t))
                    648: {
                    649:        void *va;
                    650:        int size, handle;
                    651:
                    652:        if ( (handle = prom_finddevice(device)) == 0) {
                    653:                prom_printf("pmap_read_memlist(): No %s device found.\n",
                    654:                                device);
                    655:                prom_halt();
                    656:        }
                    657:        if ( (size = OF_getproplen(handle, property)) < 0) {
                    658:                prom_printf("pmap_read_memlist(): %s/%s has no length.\n",
                    659:                                device, property);
                    660:                prom_halt();
                    661:        }
1.170     cdi       662:        if ( (va = (void*)(* ml_alloc)(size, sizeof(uint64_t))) == NULL) {
1.168     cdi       663:                prom_printf("pmap_read_memlist(): Cannot allocate memlist.\n");
                    664:                prom_halt();
                    665:        }
                    666:        if (OF_getprop(handle, property, va, size) <= 0) {
                    667:                prom_printf("pmap_read_memlist(): Cannot read %s/%s.\n",
                    668:                                device, property);
                    669:                prom_halt();
                    670:        }
                    671:
                    672:        *ml = va;
                    673:        *ml_size = size;
                    674: }
                    675:
1.75      eeh       676: /*
1.41      eeh       677:  * This is called during bootstrap, before the system is really initialized.
1.1       eeh       678:  *
1.56      eeh       679:  * It's called with the start and end virtual addresses of the kernel.  We
                    680:  * bootstrap the pmap allocator now.  We will allocate the basic structures we
                    681:  * need to bootstrap the VM system here: the page frame tables, the TSB, and
                    682:  * the free memory lists.
                    683:  *
                    684:  * Now all this is becoming a bit obsolete.  maxctx is still important, but by
                    685:  * separating the kernel text and data segments we really would need to
                    686:  * provide the start and end of each segment.  But we can't.  The rodata
                    687:  * segment is attached to the end of the kernel segment and has nothing to
                    688:  * delimit its end.  We could still pass in the beginning of the kernel and
                    689:  * the beginning and end of the data segment but we could also just as easily
                    690:  * calculate that all in here.
                    691:  *
                    692:  * To handle the kernel text, we need to do a reverse mapping of the start of
                    693:  * the kernel, then traverse the free memory lists to find out how big it is.
1.1       eeh       694:  */
1.56      eeh       695:
1.1       eeh       696: void
1.171     cdi       697: pmap_bootstrap(u_long kernelstart, u_long kernelend)
1.1       eeh       698: {
1.276     martin    699: #ifdef MODULAR
                    700:        extern vaddr_t module_start, module_end;
                    701: #endif
1.200     martin    702:        extern char etext[], data_start[];      /* start of data segment */
1.1       eeh       703:        extern int msgbufmapped;
1.168     cdi       704:        struct mem_region *mp, *mp1, *avail, *orig;
                    705:        int i, j, pcnt, msgbufsiz;
1.9       eeh       706:        size_t s, sz;
1.56      eeh       707:        int64_t data;
1.168     cdi       708:        vaddr_t va, intstk;
1.170     cdi       709:        uint64_t phys_msgbuf;
1.181     mrg       710:        paddr_t newp = 0;
1.168     cdi       711:
                    712:        void *prom_memlist;
                    713:        int prom_memlist_size;
                    714:
                    715:        BDPRINTF(PDB_BOOT, ("Entered pmap_bootstrap.\n"));
                    716:
1.272     mrg       717:        cache_setup_funcs();
                    718:
1.168     cdi       719:        /*
                    720:         * Calculate kernel size.
                    721:         */
                    722:        ktext   = kernelstart;
                    723:        ktextp  = pmap_kextract(ktext);
1.200     martin    724:        ektext  = roundup((vaddr_t)etext, PAGE_SIZE_4M);
                    725:        ektextp = roundup(pmap_kextract((vaddr_t)etext), PAGE_SIZE_4M);
1.168     cdi       726:
                    727:        kdata   = (vaddr_t)data_start;
                    728:        kdatap  = pmap_kextract(kdata);
                    729:        ekdata  = roundup(kernelend, PAGE_SIZE_4M);
                    730:        ekdatap = roundup(pmap_kextract(kernelend), PAGE_SIZE_4M);
                    731:
                    732:        BDPRINTF(PDB_BOOT, ("Virtual layout: text %lx-%lx, data %lx-%lx.\n",
                    733:                                ktext, ektext, kdata, ekdata));
                    734:        BDPRINTF(PDB_BOOT, ("Physical layout: text %lx-%lx, data %lx-%lx.\n",
                    735:                                ktextp, ektextp, kdatap, ekdatap));
                    736:
                    737:        /* Initialize bootstrap allocator. */
                    738:        kdata_alloc_init(kernelend + 1 * 1024 * 1024, ekdata);
1.75      eeh       739:
                    740:        pmap_bootdebug();
1.152     petrov    741:        pmap_alloc_bootargs();
1.168     cdi       742:        pmap_mp_init();
1.152     petrov    743:
1.1       eeh       744:        /*
                    745:         * set machine page size
                    746:         */
                    747:        uvmexp.pagesize = NBPG;
1.103     eeh       748:        uvmexp.ncolors = pmap_calculate_colors();
1.1       eeh       749:        uvm_setpagesize();
1.103     eeh       750:
1.1       eeh       751:        /*
                    752:         * Get hold or the message buffer.
                    753:         */
1.71      eeh       754:        msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA;
1.289     jdc       755:        msgbufsiz = MSGBUFSIZE;
1.156     pk        756:        BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\n",
1.66      eeh       757:                            (long)msgbufp, (long)msgbufsiz));
1.22      mrg       758:        if ((long)msgbufp !=
                    759:            (long)(phys_msgbuf = prom_claim_virt((vaddr_t)msgbufp, msgbufsiz)))
                    760:                prom_printf(
1.156     pk        761:                    "cannot get msgbuf VA, msgbufp=%p, phys_msgbuf=%lx\n",
1.22      mrg       762:                    (void *)msgbufp, (long)phys_msgbuf);
1.4       eeh       763:        phys_msgbuf = prom_get_msgbuf(msgbufsiz, MMU_PAGE_ALIGN);
1.156     pk        764:        BDPRINTF(PDB_BOOT,
                    765:                ("We should have the memory at %lx, let's map it in\n",
1.98      eeh       766:                        phys_msgbuf));
1.156     pk        767:        if (prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp,
1.177     martin    768:                          -1/* sunos does this */) == -1) {
1.156     pk        769:                prom_printf("Failed to map msgbuf\n");
1.177     martin    770:        } else {
1.156     pk        771:                BDPRINTF(PDB_BOOT, ("msgbuf mapped at %p\n",
1.98      eeh       772:                        (void *)msgbufp));
1.177     martin    773:        }
1.1       eeh       774:        msgbufmapped = 1;       /* enable message buffer */
1.186     christos  775:        initmsgbuf((void *)msgbufp, msgbufsiz);
1.1       eeh       776:
1.156     pk        777:        /*
1.1       eeh       778:         * Find out how much RAM we have installed.
                    779:         */
1.156     pk        780:        BDPRINTF(PDB_BOOT, ("pmap_bootstrap: getting phys installed\n"));
1.168     cdi       781:        pmap_read_memlist("/memory", "reg", &prom_memlist, &prom_memlist_size,
                    782:                        kdata_alloc);
                    783:        phys_installed = prom_memlist;
                    784:        phys_installed_size = prom_memlist_size / sizeof(*phys_installed);
1.1       eeh       785:
1.66      eeh       786:        if (pmapdebug & PDB_BOOT1) {
                    787:                /* print out mem list */
1.156     pk        788:                prom_printf("Installed physical memory:\n");
1.168     cdi       789:                for (i = 0; i < phys_installed_size; i++) {
1.156     pk        790:                        prom_printf("memlist start %lx size %lx\n",
1.168     cdi       791:                                        (u_long)phys_installed[i].start,
                    792:                                        (u_long)phys_installed[i].size);
1.66      eeh       793:                }
1.1       eeh       794:        }
1.168     cdi       795:
1.66      eeh       796:        BDPRINTF(PDB_BOOT1, ("Calculating physmem:"));
1.168     cdi       797:        for (i = 0; i < phys_installed_size; i++)
                    798:                physmem += btoc(phys_installed[i].size);
1.156     pk        799:        BDPRINTF(PDB_BOOT1, (" result %x or %d pages\n",
1.66      eeh       800:                             (int)physmem, (int)physmem));
1.168     cdi       801:
1.156     pk        802:        /*
1.1       eeh       803:         * Calculate approx TSB size.  This probably needs tweaking.
                    804:         */
1.127     chs       805:        if (physmem < btoc(64 * 1024 * 1024))
1.22      mrg       806:                tsbsize = 0;
1.127     chs       807:        else if (physmem < btoc(512 * 1024 * 1024))
1.22      mrg       808:                tsbsize = 1;
                    809:        else
                    810:                tsbsize = 2;
1.1       eeh       811:
                    812:        /*
                    813:         * Save the prom translations
                    814:         */
1.168     cdi       815:        pmap_read_memlist("/virtual-memory", "translations", &prom_memlist,
                    816:                        &prom_memlist_size, kdata_alloc);
                    817:        prom_map = prom_memlist;
                    818:        prom_map_size = prom_memlist_size / sizeof(struct prom_map);
                    819:
1.66      eeh       820:        if (pmapdebug & PDB_BOOT) {
                    821:                /* print out mem list */
1.156     pk        822:                prom_printf("Prom xlations:\n");
1.66      eeh       823:                for (i = 0; i < prom_map_size; i++) {
1.156     pk        824:                        prom_printf("start %016lx size %016lx tte %016lx\n",
                    825:                                    (u_long)prom_map[i].vstart,
1.66      eeh       826:                                    (u_long)prom_map[i].vsize,
                    827:                                    (u_long)prom_map[i].tte);
                    828:                }
1.156     pk        829:                prom_printf("End of prom xlations\n");
1.1       eeh       830:        }
1.56      eeh       831:
                    832:        /*
1.22      mrg       833:         * Here's a quick in-lined reverse bubble sort.  It gets rid of
1.56      eeh       834:         * any translations inside the kernel data VA range.
1.1       eeh       835:         */
1.175     mrg       836:        for (i = 0; i < prom_map_size; i++) {
                    837:                for (j = i; j < prom_map_size; j++) {
1.22      mrg       838:                        if (prom_map[j].vstart > prom_map[i].vstart) {
1.1       eeh       839:                                struct prom_map tmp;
1.175     mrg       840:
1.1       eeh       841:                                tmp = prom_map[i];
                    842:                                prom_map[i] = prom_map[j];
                    843:                                prom_map[j] = tmp;
                    844:                        }
                    845:                }
                    846:        }
1.66      eeh       847:        if (pmapdebug & PDB_BOOT) {
                    848:                /* print out mem list */
1.156     pk        849:                prom_printf("Prom xlations:\n");
1.66      eeh       850:                for (i = 0; i < prom_map_size; i++) {
1.156     pk        851:                        prom_printf("start %016lx size %016lx tte %016lx\n",
                    852:                                    (u_long)prom_map[i].vstart,
1.66      eeh       853:                                    (u_long)prom_map[i].vsize,
                    854:                                    (u_long)prom_map[i].tte);
                    855:                }
1.156     pk        856:                prom_printf("End of prom xlations\n");
1.1       eeh       857:        }
                    858:
                    859:        /*
1.195     martin    860:         * Allocate a ncpu*64KB page for the cpu_info & stack structure now.
1.56      eeh       861:         */
1.195     martin    862:        cpu0paddr = prom_alloc_phys(8 * PAGE_SIZE * sparc_ncpus, 8 * PAGE_SIZE);
1.175     mrg       863:        if (cpu0paddr == 0) {
                    864:                prom_printf("Cannot allocate cpu_infos\n");
1.156     pk        865:                prom_halt();
1.56      eeh       866:        }
                    867:
                    868:        /*
                    869:         * Now the kernel text segment is in its final location we can try to
1.156     pk        870:         * find out how much memory really is free.
1.56      eeh       871:         */
1.168     cdi       872:        pmap_read_memlist("/memory", "available", &prom_memlist,
                    873:                        &prom_memlist_size, kdata_alloc);
                    874:        orig = prom_memlist;
                    875:        sz  = prom_memlist_size;
                    876:        pcnt = prom_memlist_size / sizeof(*orig);
                    877:
                    878:        BDPRINTF(PDB_BOOT1, ("Available physical memory:\n"));
1.170     cdi       879:        avail = (struct mem_region*)kdata_alloc(sz, sizeof(uint64_t));
1.168     cdi       880:        for (i = 0; i < pcnt; i++) {
                    881:                avail[i] = orig[i];
                    882:                BDPRINTF(PDB_BOOT1, ("memlist start %lx size %lx\n",
                    883:                                        (u_long)orig[i].start,
                    884:                                        (u_long)orig[i].size));
1.56      eeh       885:        }
1.168     cdi       886:        BDPRINTF(PDB_BOOT1, ("End of available physical memory\n"));
1.56      eeh       887:
1.168     cdi       888:        BDPRINTF(PDB_BOOT, ("ktext %08lx[%08lx] - %08lx[%08lx] : "
                    889:                                "kdata %08lx[%08lx] - %08lx[%08lx]\n",
                    890:                                (u_long)ktext, (u_long)ktextp,
                    891:                                (u_long)ektext, (u_long)ektextp,
                    892:                                (u_long)kdata, (u_long)kdatap,
                    893:                                (u_long)ekdata, (u_long)ekdatap));
1.66      eeh       894:        if (pmapdebug & PDB_BOOT1) {
                    895:                /* print out mem list */
1.156     pk        896:                prom_printf("Available %lx physical memory before cleanup:\n",
1.66      eeh       897:                            (u_long)avail);
1.168     cdi       898:                for (i = 0; i < pcnt; i++) {
1.156     pk        899:                        prom_printf("memlist start %lx size %lx\n",
1.168     cdi       900:                                    (u_long)avail[i].start,
                    901:                                    (u_long)avail[i].size);
1.66      eeh       902:                }
1.156     pk        903:                prom_printf("End of available physical memory before cleanup\n");
                    904:                prom_printf("kernel physical text size %08lx - %08lx\n",
1.66      eeh       905:                            (u_long)ktextp, (u_long)ektextp);
1.156     pk        906:                prom_printf("kernel physical data size %08lx - %08lx\n",
1.66      eeh       907:                            (u_long)kdatap, (u_long)ekdatap);
                    908:        }
1.278     mrg       909:
1.1       eeh       910:        /*
                    911:         * Here's a another quick in-lined bubble sort.
                    912:         */
1.22      mrg       913:        for (i = 0; i < pcnt; i++) {
                    914:                for (j = i; j < pcnt; j++) {
                    915:                        if (avail[j].start < avail[i].start) {
1.1       eeh       916:                                struct mem_region tmp;
                    917:                                tmp = avail[i];
                    918:                                avail[i] = avail[j];
                    919:                                avail[j] = tmp;
                    920:                        }
                    921:                }
                    922:        }
                    923:
1.56      eeh       924:        /* Throw away page zero if we have it. */
                    925:        if (avail->start == 0) {
1.138     thorpej   926:                avail->start += PAGE_SIZE;
                    927:                avail->size -= PAGE_SIZE;
1.56      eeh       928:        }
1.155     chs       929:
1.56      eeh       930:        /*
                    931:         * Now we need to remove the area we valloc'ed from the available
                    932:         * memory lists.  (NB: we may have already alloc'ed the entire space).
                    933:         */
1.1       eeh       934:        npgs = 0;
1.168     cdi       935:        for (mp = avail, i = 0; i < pcnt; i++, mp = &avail[i]) {
1.1       eeh       936:                /*
                    937:                 * Now page align the start of the region.
                    938:                 */
1.138     thorpej   939:                s = mp->start % PAGE_SIZE;
1.1       eeh       940:                if (mp->size >= s) {
                    941:                        mp->size -= s;
                    942:                        mp->start += s;
                    943:                }
                    944:                /*
                    945:                 * And now align the size of the region.
                    946:                 */
1.138     thorpej   947:                mp->size -= mp->size % PAGE_SIZE;
1.1       eeh       948:                /*
                    949:                 * Check whether some memory is left here.
                    950:                 */
                    951:                if (mp->size == 0) {
1.149     martin    952:                        memcpy(mp, mp + 1,
1.1       eeh       953:                              (pcnt - (mp - avail)) * sizeof *mp);
                    954:                        pcnt--;
                    955:                        mp--;
                    956:                        continue;
                    957:                }
                    958:                s = mp->start;
                    959:                sz = mp->size;
                    960:                npgs += btoc(sz);
                    961:                for (mp1 = avail; mp1 < mp; mp1++)
                    962:                        if (s < mp1->start)
                    963:                                break;
                    964:                if (mp1 < mp) {
1.149     martin    965:                        memcpy(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1.1       eeh       966:                        mp1->start = s;
                    967:                        mp1->size = sz;
                    968:                }
1.73      eeh       969: #ifdef DEBUG
                    970: /* Clear all memory we give to the VM system.  I want to make sure
                    971:  * the PROM isn't using it for something, so this should break the PROM.
                    972:  */
1.116     pk        973:
                    974: /* Calling pmap_zero_page() at this point also hangs some machines
                    975:  * so don't do it at all. -- pk 26/02/2002
                    976:  */
                    977: #if 0
1.73      eeh       978:                {
                    979:                        paddr_t p;
1.138     thorpej   980:                        for (p = mp->start; p < mp->start+mp->size;
                    981:                             p += PAGE_SIZE)
1.73      eeh       982:                                pmap_zero_page(p);
                    983:                }
                    984: #endif
1.116     pk        985: #endif /* DEBUG */
1.156     pk        986:                /*
1.1       eeh       987:                 * In future we should be able to specify both allocated
                    988:                 * and free.
                    989:                 */
1.168     cdi       990:                BDPRINTF(PDB_BOOT1, ("uvm_page_physload(%lx, %lx)\n",
                    991:                                        (long)mp->start,
                    992:                                        (long)(mp->start + mp->size)));
1.1       eeh       993:                uvm_page_physload(
                    994:                        atop(mp->start),
1.10      eeh       995:                        atop(mp->start+mp->size),
1.1       eeh       996:                        atop(mp->start),
1.10      eeh       997:                        atop(mp->start+mp->size),
1.3       thorpej   998:                        VM_FREELIST_DEFAULT);
1.1       eeh       999:        }
                   1000:
1.66      eeh      1001:        if (pmapdebug & PDB_BOOT) {
                   1002:                /* print out mem list */
1.156     pk       1003:                prom_printf("Available physical memory after cleanup:\n");
1.168     cdi      1004:                for (i = 0; i < pcnt; i++) {
1.156     pk       1005:                        prom_printf("avail start %lx size %lx\n",
1.168     cdi      1006:                                    (long)avail[i].start, (long)avail[i].size);
1.66      eeh      1007:                }
1.156     pk       1008:                prom_printf("End of available physical memory after cleanup\n");
1.1       eeh      1009:        }
1.278     mrg      1010:
1.1       eeh      1011:        /*
1.8       eeh      1012:         * Allocate and clear out pmap_kernel()->pm_segs[]
1.1       eeh      1013:         */
1.127     chs      1014:        pmap_kernel()->pm_refs = 1;
1.210     martin   1015:        memset(&pmap_kernel()->pm_ctx, 0, sizeof(pmap_kernel()->pm_ctx));
1.127     chs      1016:
                   1017:        /* Throw away page zero */
                   1018:        do {
                   1019:                pmap_get_page(&newp);
                   1020:        } while (!newp);
                   1021:        pmap_kernel()->pm_segs=(paddr_t *)(u_long)newp;
                   1022:        pmap_kernel()->pm_physaddr = newp;
1.1       eeh      1023:
                   1024:        /*
                   1025:         * finish filling out kernel pmap.
                   1026:         */
                   1027:
1.156     pk       1028:        BDPRINTF(PDB_BOOT, ("pmap_kernel()->pm_physaddr = %lx\n",
1.66      eeh      1029:            (long)pmap_kernel()->pm_physaddr));
1.1       eeh      1030:        /*
                   1031:         * Tell pmap about our mesgbuf -- Hope this works already
                   1032:         */
1.156     pk       1033:        BDPRINTF(PDB_BOOT1, ("Calling consinit()\n"));
1.127     chs      1034:        if (pmapdebug & PDB_BOOT1)
                   1035:                consinit();
1.156     pk       1036:        BDPRINTF(PDB_BOOT1, ("Inserting mesgbuf into pmap_kernel()\n"));
1.1       eeh      1037:        /* it's not safe to call pmap_enter so we need to do this ourselves */
1.56      eeh      1038:        va = (vaddr_t)msgbufp;
                   1039:        while (msgbufsiz) {
1.156     pk       1040:                data = TSB_DATA(0 /* global */,
1.160     martin   1041:                        PGSZ_8K,
1.56      eeh      1042:                        phys_msgbuf,
                   1043:                        1 /* priv */,
                   1044:                        1 /* Write */,
                   1045:                        1 /* Cacheable */,
                   1046:                        FORCE_ALIAS /* ALIAS -- Disable D$ */,
                   1047:                        1 /* valid */,
                   1048:                        0 /* IE */);
1.160     martin   1049:                pmap_enter_kpage(va, data);
                   1050:                va += PAGE_SIZE;
                   1051:                msgbufsiz -= PAGE_SIZE;
                   1052:                phys_msgbuf += PAGE_SIZE;
1.56      eeh      1053:        }
1.156     pk       1054:        BDPRINTF(PDB_BOOT1, ("Done inserting mesgbuf into pmap_kernel()\n"));
                   1055:
                   1056:        BDPRINTF(PDB_BOOT1, ("Inserting PROM mappings into pmap_kernel()\n"));
1.22      mrg      1057:        for (i = 0; i < prom_map_size; i++)
1.143     chs      1058:                if (prom_map[i].vstart && ((prom_map[i].vstart >> 32) == 0))
1.138     thorpej  1059:                        for (j = 0; j < prom_map[i].vsize; j += PAGE_SIZE) {
1.1       eeh      1060:                                int k;
1.156     pk       1061:
1.22      mrg      1062:                                for (k = 0; page_size_map[k].mask; k++) {
                   1063:                                        if (((prom_map[i].vstart |
                   1064:                                              prom_map[i].tte) &
                   1065:                                              page_size_map[k].mask) == 0 &&
                   1066:                                              page_size_map[k].mask <
                   1067:                                              prom_map[i].vsize)
1.1       eeh      1068:                                                break;
                   1069:                                }
                   1070:                                page_size_map[k].use++;
1.37      eeh      1071:                                /* Enter PROM map into pmap_kernel() */
1.56      eeh      1072:                                pmap_enter_kpage(prom_map[i].vstart + j,
1.143     chs      1073:                                        (prom_map[i].tte + j) | TLB_EXEC |
1.56      eeh      1074:                                        page_size_map[k].code);
1.1       eeh      1075:                        }
1.156     pk       1076:        BDPRINTF(PDB_BOOT1, ("Done inserting PROM mappings into pmap_kernel()\n"));
1.1       eeh      1077:
1.54      eeh      1078:        /*
                   1079:         * Fix up start of kernel heap.
                   1080:         */
1.98      eeh      1081:        vmmap = (vaddr_t)roundup(ekdata, 4*MEG);
1.54      eeh      1082:        /* Let's keep 1 page of redzone after the kernel */
1.138     thorpej  1083:        vmmap += PAGE_SIZE;
1.156     pk       1084:        {
1.170     cdi      1085:                extern void main(void);
1.178     mrg      1086:                vaddr_t u0va;
1.54      eeh      1087:                paddr_t pa;
                   1088:
1.178     mrg      1089:                u0va = vmmap;
1.54      eeh      1090:
1.156     pk       1091:                BDPRINTF(PDB_BOOT1,
1.243     matt     1092:                        ("Inserting lwp0 USPACE into pmap_kernel() at %p\n",
1.98      eeh      1093:                                vmmap));
1.56      eeh      1094:
1.178     mrg      1095:                while (vmmap < u0va + 2*USPACE) {
1.162     christos 1096:                        int64_t data1;
1.1       eeh      1097:
1.151     petrov   1098:                        if (!pmap_get_page(&pa))
                   1099:                                panic("pmap_bootstrap: no pages");
1.138     thorpej  1100:                        prom_map_phys(pa, PAGE_SIZE, vmmap, -1);
1.162     christos 1101:                        data1 = TSB_DATA(0 /* global */,
1.98      eeh      1102:                                PGSZ_8K,
1.55      mrg      1103:                                pa,
1.54      eeh      1104:                                1 /* priv */,
                   1105:                                1 /* Write */,
                   1106:                                1 /* Cacheable */,
1.56      eeh      1107:                                FORCE_ALIAS /* ALIAS -- Disable D$ */,
1.54      eeh      1108:                                1 /* valid */,
                   1109:                                0 /* IE */);
1.162     christos 1110:                        pmap_enter_kpage(vmmap, data1);
1.138     thorpej  1111:                        vmmap += PAGE_SIZE;
1.56      eeh      1112:                }
1.156     pk       1113:                BDPRINTF(PDB_BOOT1,
                   1114:                         ("Done inserting stack 0 into pmap_kernel()\n"));
1.56      eeh      1115:
                   1116:                /* Now map in and initialize our cpu_info structure */
                   1117: #ifdef DIAGNOSTIC
1.138     thorpej  1118:                vmmap += PAGE_SIZE; /* redzone -- XXXX do we need one? */
1.56      eeh      1119: #endif
1.155     chs      1120:                if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK)
1.138     thorpej  1121:                        vmmap += PAGE_SIZE; /* Matchup virtual color for D$ */
1.57      eeh      1122:                intstk = vmmap;
1.155     chs      1123:                cpus = (struct cpu_info *)(intstk + CPUINFO_VA - INTSTACK);
1.56      eeh      1124:
1.98      eeh      1125:                BDPRINTF(PDB_BOOT1,
1.156     pk       1126:                        ("Inserting cpu_info into pmap_kernel() at %p\n",
1.98      eeh      1127:                                 cpus));
1.195     martin   1128:                /* Now map in all 8 pages of interrupt stack/cpu_info */
1.56      eeh      1129:                pa = cpu0paddr;
1.195     martin   1130:                prom_map_phys(pa, 64*KB, vmmap, -1);
1.155     chs      1131:
1.156     pk       1132:                /*
1.106     eeh      1133:                 * Also map it in as the interrupt stack.
                   1134:                 * This lets the PROM see this if needed.
                   1135:                 *
                   1136:                 * XXXX locore.s does not flush these mappings
                   1137:                 * before installing the locked TTE.
                   1138:                 */
1.178     mrg      1139:                prom_map_phys(pa, 64*KB, INTSTACK, -1);
1.195     martin   1140:                for (i = 0; i < 8; i++) {
1.162     christos 1141:                        int64_t data1;
1.56      eeh      1142:
1.162     christos 1143:                        data1 = TSB_DATA(0 /* global */,
1.98      eeh      1144:                                PGSZ_8K,
1.55      mrg      1145:                                pa,
1.54      eeh      1146:                                1 /* priv */,
                   1147:                                1 /* Write */,
                   1148:                                1 /* Cacheable */,
1.56      eeh      1149:                                FORCE_ALIAS /* ALIAS -- Disable D$ */,
1.54      eeh      1150:                                1 /* valid */,
                   1151:                                0 /* IE */);
1.162     christos 1152:                        pmap_enter_kpage(vmmap, data1);
1.138     thorpej  1153:                        vmmap += PAGE_SIZE;
                   1154:                        pa += PAGE_SIZE;
1.56      eeh      1155:                }
1.156     pk       1156:                BDPRINTF(PDB_BOOT1, ("Initializing cpu_info\n"));
1.56      eeh      1157:
                   1158:                /* Initialize our cpu_info structure */
1.195     martin   1159:                memset((void *)intstk, 0, 64 * KB);
1.155     chs      1160:                cpus->ci_self = cpus;
1.152     petrov   1161:                cpus->ci_next = NULL;
1.133     thorpej  1162:                cpus->ci_curlwp = &lwp0;
1.152     petrov   1163:                cpus->ci_flags = CPUF_PRIMARY;
1.282     palle    1164:                cpus->ci_cpuid = cpu_myid();
1.133     thorpej  1165:                cpus->ci_fplwp = NULL;
1.274     nakayama 1166:                cpus->ci_eintstack = NULL;
1.56      eeh      1167:                cpus->ci_spinup = main; /* Call main when we're running. */
                   1168:                cpus->ci_paddr = cpu0paddr;
1.290     palle    1169:                if (CPU_ISSUN4V) {
1.285     palle    1170:                        cpus->ci_mmfsa = cpu0paddr;
1.290     palle    1171:                        cpus->ci_tsb_desc = NULL;
                   1172:                }
1.178     mrg      1173:                cpus->ci_cpcb = (struct pcb *)u0va;
1.202     martin   1174:                cpus->ci_idepth = -1;
1.211     nakayama 1175:                memset(cpus->ci_intrpending, -1, sizeof(cpus->ci_intrpending));
1.152     petrov   1176:
1.244     rmind    1177:                uvm_lwp_setuarea(&lwp0, u0va);
1.192     rjs      1178:                lwp0.l_md.md_tf = (struct trapframe64*)(u0va + USPACE
                   1179:                    - sizeof(struct trapframe64));
1.191     martin   1180:
1.195     martin   1181:                cpu0paddr += 64 * KB;
1.155     chs      1182:
                   1183:                CPUSET_CLEAR(cpus_active);
                   1184:                CPUSET_ADD(cpus_active, 0);
1.150     cdi      1185:
1.210     martin   1186:                cpu_pmap_prepare(cpus, true);
                   1187:                cpu_pmap_init(cpus);
                   1188:
1.56      eeh      1189:                /* The rest will be done at CPU attach time. */
1.156     pk       1190:                BDPRINTF(PDB_BOOT1,
                   1191:                         ("Done inserting cpu_info into pmap_kernel()\n"));
1.54      eeh      1192:        }
1.63      eeh      1193:
1.186     christos 1194:        vmmap = (vaddr_t)reserve_dumppages((void *)(u_long)vmmap);
1.155     chs      1195:
1.276     martin   1196: #ifdef MODULAR
                   1197:        /*
1.279     martin   1198:         * For 32bit kernels:
                   1199:         *   Reserve 16 MB of VA for module loading. Right now our full
                   1200:         *   GENERIC kernel is about 13 MB, so this looks good enough.
                   1201:         * For 64bit kernels:
                   1202:         *   We can use all the space left before the special addresses,
                   1203:         *   but leave 2 pages at vmmap alone (see pmap_virtual_space)
                   1204:         *   and another red zone page.
                   1205:         */
                   1206: #ifdef __arch64__
                   1207:        module_start = vmmap + 3*PAGE_SIZE;
                   1208:        module_end = 0x08000000;        /* keep all modules within 2GB */
                   1209:        KASSERT(module_end < KERNEND);  /* of kernel text */
                   1210: #else
1.276     martin   1211:        module_start = vmmap;
                   1212:        vmmap += 16 * 1024*1024;
                   1213:        module_end = vmmap;
                   1214: #endif
1.279     martin   1215: #endif
1.276     martin   1216:
1.1       eeh      1217:        /*
                   1218:         * Set up bounds of allocatable memory for vmstat et al.
                   1219:         */
1.245     mrg      1220:        avail_start = avail->start;
1.1       eeh      1221:        for (mp = avail; mp->size; mp++)
                   1222:                avail_end = mp->start+mp->size;
1.205     martin   1223:
1.156     pk       1224:        BDPRINTF(PDB_BOOT1, ("Finished pmap_bootstrap()\n"));
1.139     thorpej  1225:
1.168     cdi      1226:        BDPRINTF(PDB_BOOT, ("left kdata: %" PRId64 " @%" PRIx64 ".\n",
                   1227:                                kdata_mem_pool.size, kdata_mem_pool.start));
1.1       eeh      1228: }
                   1229:
                   1230: /*
1.210     martin   1231:  * Allocate TSBs for both mmus from the locked kernel data segment page.
                   1232:  * This is run before the cpu itself is activated (or by the first cpu
                   1233:  * itself)
                   1234:  */
                   1235: void
                   1236: cpu_pmap_prepare(struct cpu_info *ci, bool initial)
                   1237: {
                   1238:        /* allocate our TSBs */
                   1239:        ci->ci_tsb_dmmu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE);
                   1240:        ci->ci_tsb_immu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE);
                   1241:        memset(ci->ci_tsb_dmmu, 0, TSBSIZE);
                   1242:        memset(ci->ci_tsb_immu, 0, TSBSIZE);
                   1243:        if (!initial) {
                   1244:                KASSERT(ci != curcpu());
                   1245:                /*
                   1246:                 * Initially share ctxbusy with the boot cpu, the
                   1247:                 * cpu will replace it as soon as it runs (and can
                   1248:                 * probe the number of available contexts itself).
                   1249:                 * Untill then only context 0 (aka kernel) will be
                   1250:                 * referenced anyway.
                   1251:                 */
                   1252:                ci->ci_numctx = curcpu()->ci_numctx;
                   1253:                ci->ci_ctxbusy = curcpu()->ci_ctxbusy;
                   1254:        }
                   1255:
1.290     palle    1256:        if (CPU_ISSUN4V) {
                   1257:                ci->ci_tsb_desc = (struct tsb_desc *)kdata_alloc(
1.282     palle    1258:                        sizeof(struct tsb_desc), 16);
1.290     palle    1259:                memset(ci->ci_tsb_desc, 0, sizeof(struct tsb_desc));
1.282     palle    1260:                /* 8K page size used for TSB index computation */
1.290     palle    1261:                ci->ci_tsb_desc->td_idxpgsz = 0;
                   1262:                ci->ci_tsb_desc->td_assoc = 1;
                   1263:                ci->ci_tsb_desc->td_size = TSBENTS;
                   1264:                ci->ci_tsb_desc->td_ctxidx = -1;
                   1265:                ci->ci_tsb_desc->td_pgsz = 0xf;
                   1266:                ci->ci_tsb_desc->td_pa = pmap_kextract((vaddr_t)ci->ci_tsb_dmmu);
1.282     palle    1267:                BDPRINTF(PDB_BOOT1, ("cpu %d: TSB descriptor allocated at %p "
                   1268:                    "size %08x - td_pa at %p\n",
1.290     palle    1269:                    ci->ci_index, ci->ci_tsb_desc, sizeof(struct tsb_desc),
                   1270:                    ci->ci_tsb_desc->td_pa));
1.282     palle    1271:        }
                   1272:
1.210     martin   1273:        BDPRINTF(PDB_BOOT1, ("cpu %d: TSB allocated at %p/%p size %08x\n",
                   1274:            ci->ci_index, ci->ci_tsb_dmmu, ci->ci_tsb_immu, TSBSIZE));
                   1275: }
                   1276:
                   1277: /*
1.264     mrg      1278:  * Initialize the per CPU parts for the cpu running this code.
1.210     martin   1279:  */
                   1280: void
                   1281: cpu_pmap_init(struct cpu_info *ci)
                   1282: {
                   1283:        size_t ctxsize;
                   1284:
1.264     mrg      1285:        /*
                   1286:         * We delay initialising ci_ctx_lock here as LOCKDEBUG isn't
                   1287:         * running for cpu0 yet..
                   1288:         */
1.210     martin   1289:        ci->ci_pmap_next_ctx = 1;
1.282     palle    1290:        /* all SUN4U use 13 bit contexts - SUN4V use at least 13 bit contexts */
                   1291:        ci->ci_numctx = 0x2000;
1.210     martin   1292:        ctxsize = sizeof(paddr_t)*ci->ci_numctx;
                   1293:        ci->ci_ctxbusy = (paddr_t *)kdata_alloc(ctxsize, sizeof(uint64_t));
                   1294:        memset(ci->ci_ctxbusy, 0, ctxsize);
                   1295:        LIST_INIT(&ci->ci_pmap_ctxlist);
                   1296:
                   1297:        /* mark kernel context as busy */
                   1298:        ci->ci_ctxbusy[0] = pmap_kernel()->pm_physaddr;
                   1299: }
                   1300:
                   1301: /*
1.1       eeh      1302:  * Initialize anything else for pmap handling.
                   1303:  * Called during vm_init().
                   1304:  */
                   1305: void
1.234     cegger   1306: pmap_init(void)
1.1       eeh      1307: {
1.127     chs      1308:        struct vm_page *pg;
                   1309:        struct pglist pglist;
1.170     cdi      1310:        uint64_t data;
1.54      eeh      1311:        paddr_t pa;
                   1312:        psize_t size;
                   1313:        vaddr_t va;
                   1314:
1.156     pk       1315:        BDPRINTF(PDB_BOOT1, ("pmap_init()\n"));
1.54      eeh      1316:
                   1317:        size = sizeof(struct pv_entry) * physmem;
                   1318:        if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
1.138     thorpej  1319:                (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
1.152     petrov   1320:                panic("pmap_init: no memory");
1.54      eeh      1321:
1.161     yamt     1322:        va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
1.54      eeh      1323:        if (va == 0)
1.152     petrov   1324:                panic("pmap_init: no memory");
1.54      eeh      1325:
                   1326:        /* Map the pages */
1.218     ad       1327:        TAILQ_FOREACH(pg, &pglist, pageq.queue) {
1.127     chs      1328:                pa = VM_PAGE_TO_PHYS(pg);
1.54      eeh      1329:                pmap_zero_page(pa);
1.156     pk       1330:                data = TSB_DATA(0 /* global */,
1.98      eeh      1331:                        PGSZ_8K,
1.54      eeh      1332:                        pa,
                   1333:                        1 /* priv */,
                   1334:                        1 /* Write */,
                   1335:                        1 /* Cacheable */,
1.56      eeh      1336:                        FORCE_ALIAS /* ALIAS -- Disable D$ */,
1.54      eeh      1337:                        1 /* valid */,
                   1338:                        0 /* IE */);
1.56      eeh      1339:                pmap_enter_kpage(va, data);
1.138     thorpej  1340:                va += PAGE_SIZE;
1.54      eeh      1341:        }
1.73      eeh      1342:
                   1343:        /*
1.127     chs      1344:         * initialize the pmap pools.
1.73      eeh      1345:         */
1.275     mrg      1346:        pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap),
                   1347:            SPARC64_BLOCK_SIZE, 0, 0, "pmappl", NULL, IPL_NONE, NULL, NULL,
                   1348:            NULL);
1.222     nakayama 1349:        pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
1.223     nakayama 1350:            PR_LARGECACHE, "pv_entry", NULL, IPL_NONE, NULL, NULL, NULL);
1.54      eeh      1351:
1.1       eeh      1352:        vm_first_phys = avail_start;
                   1353:        vm_num_phys = avail_end - avail_start;
1.206     martin   1354:
1.261     mrg      1355:        mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE);
1.259     mrg      1356: #if defined(USE_LOCKSAFE_PSEG_GETSET)
                   1357:        mutex_init(&pseg_lock, MUTEX_SPIN, IPL_VM);
                   1358: #endif
1.250     martin   1359:        lock_available = true;
1.140     martin   1360: }
                   1361:
                   1362: /*
                   1363:  * How much virtual space is available to the kernel?
                   1364:  */
                   1365: static vaddr_t kbreak; /* End of kernel VA */
                   1366: void
1.233     dsl      1367: pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1.140     martin   1368: {
                   1369:
                   1370:        /*
1.279     martin   1371:         * Reserve one segment for kernel virtual memory.
                   1372:         */
                   1373: #ifdef __arch64__
                   1374:        /*
                   1375:         * On 64 bit kernels, start it beyound firmware, so
                   1376:         * we are basically unrestricted.
                   1377:         */
                   1378:        *start = kbreak = VM_KERNEL_MEM_VA_START;
                   1379:        *end = VM_MAX_KERNEL_ADDRESS;
                   1380: #else
                   1381:        /*
                   1382:         * Reserve two pages for pmap_copy_page && /dev/mem, but otherwise
                   1383:         * end it beyound the iospace and other special fixed addresses.
1.140     martin   1384:         */
                   1385:        *start = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE);
                   1386:        *end = VM_MAX_KERNEL_ADDRESS;
1.279     martin   1387: #endif
1.156     pk       1388:        BDPRINTF(PDB_BOOT1, ("pmap_virtual_space: %x-%x\n", *start, *end));
1.1       eeh      1389: }
                   1390:
1.77      eeh      1391: /*
                   1392:  * Preallocate kernel page tables to a specified VA.
                   1393:  * This simply loops through the first TTE for each
1.156     pk       1394:  * page table from the beginning of the kernel pmap,
1.77      eeh      1395:  * reads the entry, and if the result is
                   1396:  * zero (either invalid entry or no page table) it stores
                   1397:  * a zero there, populating page tables in the process.
                   1398:  * This is not the most efficient technique but i don't
                   1399:  * expect it to be called that often.
                   1400:  */
1.156     pk       1401: vaddr_t
1.232     dsl      1402: pmap_growkernel(vaddr_t maxkvaddr)
1.77      eeh      1403: {
                   1404:        struct pmap *pm = pmap_kernel();
1.127     chs      1405:        paddr_t pa;
1.156     pk       1406:
1.279     martin   1407:        if (maxkvaddr >= VM_MAX_KERNEL_ADDRESS) {
1.98      eeh      1408:                printf("WARNING: cannot extend kernel pmap beyond %p to %p\n",
1.279     martin   1409:                       (void *)VM_MAX_KERNEL_ADDRESS, (void *)maxkvaddr);
1.98      eeh      1410:                return (kbreak);
                   1411:        }
1.127     chs      1412:        DPRINTF(PDB_GROW, ("pmap_growkernel(%lx...%lx)\n", kbreak, maxkvaddr));
1.77      eeh      1413:        /* Align with the start of a page table */
1.127     chs      1414:        for (kbreak &= (-1 << PDSHIFT); kbreak < maxkvaddr;
                   1415:             kbreak += (1 << PDSHIFT)) {
1.254     mrg      1416:                if (pseg_get(pm, kbreak) & TLB_V)
1.127     chs      1417:                        continue;
1.77      eeh      1418:
1.127     chs      1419:                pa = 0;
                   1420:                while (pseg_set(pm, kbreak, 0, pa) & 1) {
1.156     pk       1421:                        DPRINTF(PDB_GROW,
1.127     chs      1422:                            ("pmap_growkernel: extending %lx\n", kbreak));
                   1423:                        pa = 0;
                   1424:                        if (!pmap_get_page(&pa))
1.175     mrg      1425:                                panic("pmap_growkernel: no pages");
1.127     chs      1426:                        ENTER_STAT(ptpneeded);
1.77      eeh      1427:                }
                   1428:        }
1.78      eeh      1429:        return (kbreak);
1.77      eeh      1430: }
                   1431:
1.1       eeh      1432: /*
                   1433:  * Create and return a physical map.
                   1434:  */
1.4       eeh      1435: struct pmap *
1.234     cegger   1436: pmap_create(void)
1.1       eeh      1437: {
                   1438:        struct pmap *pm;
1.41      eeh      1439:
1.77      eeh      1440:        DPRINTF(PDB_CREATE, ("pmap_create()\n"));
1.44      chs      1441:
1.203     ad       1442:        pm = pool_cache_get(&pmap_cache, PR_WAITOK);
1.127     chs      1443:        memset(pm, 0, sizeof *pm);
                   1444:        DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm));
1.1       eeh      1445:
1.273     rmind    1446:        mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
                   1447:        uvm_obj_init(&pm->pm_obj, NULL, false, 1);
                   1448:        uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
                   1449:
1.127     chs      1450:        if (pm != pmap_kernel()) {
1.151     petrov   1451:                while (!pmap_get_page(&pm->pm_physaddr)) {
1.127     chs      1452:                        uvm_wait("pmap_create");
                   1453:                }
1.49      mrg      1454:                pm->pm_segs = (paddr_t *)(u_long)pm->pm_physaddr;
1.1       eeh      1455:        }
1.210     martin   1456:        DPRINTF(PDB_CREATE, ("pmap_create(%p): ctx %d\n", pm, pmap_ctx(pm)));
1.127     chs      1457:        return pm;
1.1       eeh      1458: }
                   1459:
                   1460: /*
                   1461:  * Add a reference to the given pmap.
                   1462:  */
                   1463: void
1.232     dsl      1464: pmap_reference(struct pmap *pm)
1.1       eeh      1465: {
1.73      eeh      1466:
1.203     ad       1467:        atomic_inc_uint(&pm->pm_refs);
1.1       eeh      1468: }
                   1469:
                   1470: /*
                   1471:  * Retire the given pmap from service.
                   1472:  * Should only be called if the map contains no valid mappings.
                   1473:  */
                   1474: void
1.232     dsl      1475: pmap_destroy(struct pmap *pm)
1.1       eeh      1476: {
1.212     nakayama 1477: #ifdef MULTIPROCESSOR
                   1478:        struct cpu_info *ci;
1.262     mrg      1479:        sparc64_cpuset_t pmap_cpus_active;
                   1480: #else
                   1481: #define pmap_cpus_active 0
1.212     nakayama 1482: #endif
1.127     chs      1483:        struct vm_page *pg, *nextpg;
1.1       eeh      1484:
1.208     ad       1485:        if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) {
1.127     chs      1486:                return;
1.73      eeh      1487:        }
1.127     chs      1488:        DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm));
1.212     nakayama 1489: #ifdef MULTIPROCESSOR
1.262     mrg      1490:        CPUSET_CLEAR(pmap_cpus_active);
1.212     nakayama 1491:        for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1.262     mrg      1492:                /* XXXMRG: Move the lock inside one or both tests? */
                   1493:                mutex_enter(&ci->ci_ctx_lock);
                   1494:                if (CPUSET_HAS(cpus_active, ci->ci_index)) {
                   1495:                        if (pm->pm_ctx[ci->ci_index] > 0) {
                   1496:                                CPUSET_ADD(pmap_cpus_active, ci->ci_index);
                   1497:                                ctx_free(pm, ci);
                   1498:                        }
                   1499:                }
                   1500:                mutex_exit(&ci->ci_ctx_lock);
1.212     nakayama 1501:        }
                   1502: #else
1.262     mrg      1503:        if (pmap_ctx(pm)) {
                   1504:                mutex_enter(&curcpu()->ci_ctx_lock);
                   1505:                ctx_free(pm, curcpu());
                   1506:                mutex_exit(&curcpu()->ci_ctx_lock);
                   1507:        }
1.212     nakayama 1508: #endif
1.127     chs      1509:
                   1510:        /* we could be a little smarter and leave pages zeroed */
                   1511:        for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) {
1.268     uebayasi 1512: #ifdef DIAGNOSTIC
1.267     uebayasi 1513:                struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1.268     uebayasi 1514: #endif
1.267     uebayasi 1515:
1.265     hannken  1516:                KASSERT((pg->flags & PG_MARKER) == 0);
1.218     ad       1517:                nextpg = TAILQ_NEXT(pg, listq.queue);
                   1518:                TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue);
1.266     uebayasi 1519:                KASSERT(md->mdpg_pvh.pv_pmap == NULL);
1.262     mrg      1520:                dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active);
1.127     chs      1521:                uvm_pagefree(pg);
1.1       eeh      1522:        }
1.262     mrg      1523:        pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active);
1.273     rmind    1524:
                   1525:        uvm_obj_destroy(&pm->pm_obj, false);
                   1526:        mutex_destroy(&pm->pm_obj_lock);
1.203     ad       1527:        pool_cache_put(&pmap_cache, pm);
1.1       eeh      1528: }
                   1529:
                   1530: /*
                   1531:  * Copy the range specified by src_addr/len
                   1532:  * from the source map to the range dst_addr/len
                   1533:  * in the destination map.
                   1534:  *
                   1535:  * This routine is only advisory and need not do anything.
                   1536:  */
                   1537: void
1.233     dsl      1538: pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, vsize_t len, vaddr_t src_addr)
1.1       eeh      1539: {
1.127     chs      1540:
                   1541:        DPRINTF(PDB_CREATE, ("pmap_copy(%p, %p, %p, %lx, %p)\n",
                   1542:                             dst_pmap, src_pmap, (void *)(u_long)dst_addr,
                   1543:                             (u_long)len, (void *)(u_long)src_addr));
1.1       eeh      1544: }
                   1545:
                   1546: /*
                   1547:  * Activate the address space for the specified process.  If the
                   1548:  * process is the current process, load the new MMU context.
                   1549:  */
                   1550: void
1.232     dsl      1551: pmap_activate(struct lwp *l)
1.1       eeh      1552: {
1.133     thorpej  1553:        struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.1       eeh      1554:
1.154     chs      1555:        if (pmap == pmap_kernel()) {
                   1556:                return;
                   1557:        }
                   1558:
1.1       eeh      1559:        /*
1.246     skrll    1560:         * This is essentially the same thing that happens in cpu_switchto()
1.1       eeh      1561:         * when the newly selected process is about to run, except that we
                   1562:         * have to make sure to clean the register windows before we set
                   1563:         * the new context.
                   1564:         */
                   1565:
1.133     thorpej  1566:        if (l != curlwp) {
1.127     chs      1567:                return;
                   1568:        }
                   1569:        write_user_windows();
                   1570:        pmap_activate_pmap(pmap);
                   1571: }
                   1572:
                   1573: void
                   1574: pmap_activate_pmap(struct pmap *pmap)
                   1575: {
                   1576:
1.210     martin   1577:        if (pmap_ctx(pmap) == 0) {
1.127     chs      1578:                (void) ctx_alloc(pmap);
1.1       eeh      1579:        }
1.278     mrg      1580:        DPRINTF(PDB_ACTIVATE,
                   1581:                ("%s: cpu%d activating ctx %d\n", __func__,
                   1582:                 cpu_number(), pmap_ctx(pmap)));
1.210     martin   1583:        dmmu_set_secondary_context(pmap_ctx(pmap));
1.1       eeh      1584: }
                   1585:
                   1586: /*
                   1587:  * Deactivate the address space of the specified process.
                   1588:  */
                   1589: void
1.232     dsl      1590: pmap_deactivate(struct lwp *l)
1.1       eeh      1591: {
1.278     mrg      1592:
                   1593:        DPRINTF(PDB_ACTIVATE,
                   1594:                ("%s: cpu%d deactivating ctx %d\n", __func__,
                   1595:                 cpu_number(), pmap_ctx(l->l_proc->p_vmspace->vm_map.pmap)));
1.1       eeh      1596: }
                   1597:
1.4       eeh      1598: /*
                   1599:  * pmap_kenter_pa:             [ INTERFACE ]
                   1600:  *
                   1601:  *     Enter a va -> pa mapping into the kernel pmap without any
                   1602:  *     physical->virtual tracking.
                   1603:  *
                   1604:  *     Note: no locking is necessary in this function.
                   1605:  */
                   1606: void
1.242     cegger   1607: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.4       eeh      1608: {
                   1609:        pte_t tte;
1.127     chs      1610:        paddr_t ptp;
1.4       eeh      1611:        struct pmap *pm = pmap_kernel();
1.127     chs      1612:        int i;
1.4       eeh      1613:
1.127     chs      1614:        KASSERT(va < INTSTACK || va > EINTSTACK);
                   1615:        KASSERT(va < kdata || va > ekdata);
1.102     eeh      1616:
1.4       eeh      1617:        /*
                   1618:         * Construct the TTE.
                   1619:         */
1.127     chs      1620:
                   1621:        ENTER_STAT(unmanaged);
1.177     martin   1622:        if (pa & (PMAP_NVC|PMAP_NC)) {
1.127     chs      1623:                ENTER_STAT(ci);
1.177     martin   1624:        }
1.127     chs      1625:
1.106     eeh      1626:        tte.data = TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */,
1.127     chs      1627:                            (VM_PROT_WRITE & prot),
                   1628:                            !(pa & PMAP_NC), pa & (PMAP_NVC), 1, 0);
                   1629:        /* We don't track mod/ref here. */
                   1630:        if (prot & VM_PROT_WRITE)
                   1631:                tte.data |= TLB_REAL_W|TLB_W;
1.190     martin   1632:        if (prot & VM_PROT_EXECUTE)
                   1633:                tte.data |= TLB_EXEC;
1.105     eeh      1634:        tte.data |= TLB_TSB_LOCK;       /* wired */
1.127     chs      1635:        ptp = 0;
                   1636:
                   1637:  retry:
                   1638:        i = pseg_set(pm, va, tte.data, ptp);
                   1639:        if (i & 1) {
                   1640:                KASSERT((i & 4) == 0);
                   1641:                ptp = 0;
                   1642:                if (!pmap_get_page(&ptp))
1.111     eeh      1643:                        panic("pmap_kenter_pa: no pages");
1.127     chs      1644:                ENTER_STAT(ptpneeded);
                   1645:                goto retry;
1.4       eeh      1646:        }
1.127     chs      1647:        if (ptp && i == 0) {
1.86      eeh      1648:                /* We allocated a spare page but didn't use it.  Free it. */
1.156     pk       1649:                printf("pmap_kenter_pa: freeing unused page %llx\n",
1.127     chs      1650:                       (long long)ptp);
1.262     mrg      1651:                pmap_free_page_noflush(ptp);
1.86      eeh      1652:        }
1.278     mrg      1653: #ifdef PMAP_DEBUG
1.4       eeh      1654:        i = ptelookup_va(va);
1.125     mrg      1655:        if (pmapdebug & PDB_ENTER)
                   1656:                prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x "
1.156     pk       1657:                        "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1.210     martin   1658:                        (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
                   1659:        if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1.125     mrg      1660:                prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x "
1.156     pk       1661:                        "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1.210     martin   1662:                        (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
                   1663:                        (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data,
                   1664:                        i, &curcpu()->ci_tsb_dmmu[i]);
1.156     pk       1665:                prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1.143     chs      1666:                        va, (int)(tte.data>>32), (int)tte.data, i,
1.210     martin   1667:                        &curcpu()->ci_tsb_dmmu[i]);
1.4       eeh      1668:        }
                   1669: #endif
                   1670: }
1.91      eeh      1671:
1.4       eeh      1672: /*
                   1673:  * pmap_kremove:               [ INTERFACE ]
                   1674:  *
1.94      thorpej  1675:  *     Remove a mapping entered with pmap_kenter_pa() starting at va,
                   1676:  *     for size bytes (assumed to be page rounded).
1.4       eeh      1677:  */
                   1678: void
1.232     dsl      1679: pmap_kremove(vaddr_t va, vsize_t size)
1.4       eeh      1680: {
                   1681:        struct pmap *pm = pmap_kernel();
                   1682:        int64_t data;
1.127     chs      1683:        paddr_t pa;
1.158     martin   1684:        int rv;
1.185     thorpej  1685:        bool flush = FALSE;
1.4       eeh      1686:
1.127     chs      1687:        KASSERT(va < INTSTACK || va > EINTSTACK);
                   1688:        KASSERT(va < kdata || va > ekdata);
1.102     eeh      1689:
1.127     chs      1690:        DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size));
1.138     thorpej  1691:        for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) {
1.127     chs      1692:
                   1693: #ifdef DIAGNOSTIC
1.4       eeh      1694:                /*
                   1695:                 * Is this part of the permanent 4MB mapping?
                   1696:                 */
1.127     chs      1697:                if (va >= ktext && va < roundup(ekdata, 4*MEG))
1.156     pk       1698:                        panic("pmap_kremove: va=%08x in locked TLB", (u_int)va);
1.4       eeh      1699: #endif
1.127     chs      1700:
                   1701:                data = pseg_get(pm, va);
1.254     mrg      1702:                if ((data & TLB_V) == 0) {
1.127     chs      1703:                        continue;
                   1704:                }
                   1705:
                   1706:                flush = TRUE;
                   1707:                pa = data & TLB_PA_MASK;
                   1708:
                   1709:                /*
                   1710:                 * We need to flip the valid bit and
                   1711:                 * clear the access statistics.
                   1712:                 */
                   1713:
1.158     martin   1714:                rv = pseg_set(pm, va, 0, 0);
                   1715:                if (rv & 1)
                   1716:                        panic("pmap_kremove: pseg_set needs spare, rv=%d\n",
                   1717:                            rv);
1.127     chs      1718:                DPRINTF(PDB_DEMAP, ("pmap_kremove: seg %x pdir %x pte %x\n",
                   1719:                    (int)va_to_seg(va), (int)va_to_dir(va),
                   1720:                    (int)va_to_pte(va)));
                   1721:                REMOVE_STAT(removes);
                   1722:
1.212     nakayama 1723:                tsb_invalidate(va, pm);
1.127     chs      1724:                REMOVE_STAT(tflushes);
                   1725:
                   1726:                /*
                   1727:                 * Here we assume nothing can get into the TLB
                   1728:                 * unless it has a PTE.
                   1729:                 */
                   1730:
1.212     nakayama 1731:                tlb_flush_pte(va, pm);
1.255     mrg      1732:                dcache_flush_page_all(pa);
1.4       eeh      1733:        }
1.255     mrg      1734:        if (flush)
1.127     chs      1735:                REMOVE_STAT(flushes);
1.4       eeh      1736: }
                   1737:
1.1       eeh      1738: /*
                   1739:  * Insert physical page at pa into the given pmap at virtual address va.
                   1740:  * Supports 64-bit pa so we can map I/O space.
                   1741:  */
1.143     chs      1742:
1.45      thorpej  1743: int
1.236     cegger   1744: pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       eeh      1745: {
                   1746:        pte_t tte;
1.127     chs      1747:        int64_t data;
1.148     christos 1748:        paddr_t opa = 0, ptp; /* XXX: gcc */
1.217     ad       1749:        pv_entry_t pvh, npv = NULL, freepv;
1.127     chs      1750:        struct vm_page *pg, *opg, *ptpg;
1.217     ad       1751:        int s, i, uncached = 0, error = 0;
1.160     martin   1752:        int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */
1.185     thorpej  1753:        bool wired = (flags & PMAP_WIRED) != 0;
                   1754:        bool wasmapped = FALSE;
                   1755:        bool dopv = TRUE;
1.1       eeh      1756:
                   1757:        /*
1.102     eeh      1758:         * Is this part of the permanent mappings?
1.1       eeh      1759:         */
1.127     chs      1760:        KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK);
                   1761:        KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata);
                   1762:
1.217     ad       1763:        /* Grab a spare PV. */
                   1764:        freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
                   1765:        if (__predict_false(freepv == NULL)) {
                   1766:                if (flags & PMAP_CANFAIL)
                   1767:                        return (ENOMEM);
                   1768:                panic("pmap_enter: no pv entries available");
                   1769:        }
                   1770:        freepv->pv_next = NULL;
                   1771:
1.1       eeh      1772:        /*
1.127     chs      1773:         * If a mapping at this address already exists, check if we're
                   1774:         * entering the same PA again.  if it's different remove it.
1.38      eeh      1775:         */
1.127     chs      1776:
1.203     ad       1777:        mutex_enter(&pmap_lock);
1.127     chs      1778:        data = pseg_get(pm, va);
                   1779:        if (data & TLB_V) {
                   1780:                wasmapped = TRUE;
                   1781:                opa = data & TLB_PA_MASK;
                   1782:                if (opa != pa) {
                   1783:                        opg = PHYS_TO_VM_PAGE(opa);
                   1784:                        if (opg != NULL) {
                   1785:                                npv = pmap_remove_pv(pm, va, opg);
                   1786:                        }
                   1787:                }
1.41      eeh      1788:        }
                   1789:
1.38      eeh      1790:        /*
1.1       eeh      1791:         * Construct the TTE.
                   1792:         */
1.127     chs      1793:        pg = PHYS_TO_VM_PAGE(pa);
                   1794:        if (pg) {
1.266     uebayasi 1795:                struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                   1796:
                   1797:                pvh = &md->mdpg_pvh;
1.127     chs      1798:                uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC));
1.38      eeh      1799: #ifdef DIAGNOSTIC
1.45      thorpej  1800:                if ((flags & VM_PROT_ALL) & ~prot)
1.38      eeh      1801:                        panic("pmap_enter: access_type exceeds prot");
                   1802: #endif
1.127     chs      1803:                /*
                   1804:                 * If we don't have the traphandler do it,
                   1805:                 * set the ref/mod bits now.
                   1806:                 */
1.119     eeh      1807:                if (flags & VM_PROT_ALL)
1.127     chs      1808:                        pvh->pv_va |= PV_REF;
1.119     eeh      1809:                if (flags & VM_PROT_WRITE)
1.127     chs      1810:                        pvh->pv_va |= PV_MOD;
                   1811:
                   1812:                /*
                   1813:                 * make sure we have a pv entry ready if we need one.
                   1814:                 */
                   1815:                if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) {
                   1816:                        if (npv != NULL) {
1.217     ad       1817:                                /* free it */
                   1818:                                npv->pv_next = freepv;
                   1819:                                freepv = npv;
1.127     chs      1820:                                npv = NULL;
                   1821:                        }
                   1822:                        if (wasmapped && opa == pa) {
                   1823:                                dopv = FALSE;
                   1824:                        }
                   1825:                } else if (npv == NULL) {
1.217     ad       1826:                        /* use the pre-allocated pv */
                   1827:                        npv = freepv;
                   1828:                        freepv = freepv->pv_next;
1.127     chs      1829:                }
                   1830:                ENTER_STAT(managed);
1.1       eeh      1831:        } else {
1.127     chs      1832:                ENTER_STAT(unmanaged);
                   1833:                dopv = FALSE;
                   1834:                if (npv != NULL) {
1.217     ad       1835:                        /* free it */
                   1836:                        npv->pv_next = freepv;
                   1837:                        freepv = npv;
1.127     chs      1838:                        npv = NULL;
                   1839:                }
1.1       eeh      1840:        }
1.127     chs      1841:
                   1842: #ifndef NO_VCACHE
                   1843:        if (pa & PMAP_NVC)
1.4       eeh      1844: #endif
1.127     chs      1845:                uncached = 1;
1.177     martin   1846:        if (uncached) {
1.127     chs      1847:                ENTER_STAT(ci);
1.177     martin   1848:        }
1.105     eeh      1849:        tte.data = TSB_DATA(0, size, pa, pm == pmap_kernel(),
1.156     pk       1850:                flags & VM_PROT_WRITE, !(pa & PMAP_NC),
1.127     chs      1851:                uncached, 1, pa & PMAP_LITTLE);
1.41      eeh      1852: #ifdef HWREF
1.127     chs      1853:        if (prot & VM_PROT_WRITE)
                   1854:                tte.data |= TLB_REAL_W;
1.143     chs      1855:        if (prot & VM_PROT_EXECUTE)
                   1856:                tte.data |= TLB_EXEC;
1.73      eeh      1857: #else
                   1858:        /* If it needs ref accounting do nothing. */
1.119     eeh      1859:        if (!(flags & VM_PROT_READ)) {
1.203     ad       1860:                mutex_exit(&pmap_lock);
1.217     ad       1861:                goto out;
1.73      eeh      1862:        }
1.41      eeh      1863: #endif
1.119     eeh      1864:        if (flags & VM_PROT_EXECUTE) {
                   1865:                if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) == 0)
                   1866:                        tte.data |= TLB_EXEC_ONLY|TLB_EXEC;
                   1867:                else
                   1868:                        tte.data |= TLB_EXEC;
                   1869:        }
1.127     chs      1870:        if (wired)
                   1871:                tte.data |= TLB_TSB_LOCK;
                   1872:        ptp = 0;
                   1873:
                   1874:  retry:
                   1875:        i = pseg_set(pm, va, tte.data, ptp);
1.281     martin   1876:        if (i == -2) {
                   1877:                if (flags & PMAP_CANFAIL)
                   1878:                        return (ENOMEM);
                   1879:                panic("pmap_enter: invalid VA (inside hole)");
                   1880:        }
1.127     chs      1881:        if (i & 4) {
                   1882:                /* ptp used as L3 */
                   1883:                KASSERT(ptp != 0);
                   1884:                KASSERT((i & 3) == 0);
                   1885:                ptpg = PHYS_TO_VM_PAGE(ptp);
                   1886:                if (ptpg) {
                   1887:                        ptpg->offset = (uint64_t)va & (0xfffffLL << 23);
1.218     ad       1888:                        TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1.127     chs      1889:                } else {
                   1890:                        KASSERT(pm == pmap_kernel());
                   1891:                }
                   1892:        }
                   1893:        if (i & 2) {
                   1894:                /* ptp used as L2 */
                   1895:                KASSERT(ptp != 0);
                   1896:                KASSERT((i & 4) == 0);
                   1897:                ptpg = PHYS_TO_VM_PAGE(ptp);
                   1898:                if (ptpg) {
                   1899:                        ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13;
1.218     ad       1900:                        TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1.127     chs      1901:                } else {
                   1902:                        KASSERT(pm == pmap_kernel());
                   1903:                }
                   1904:        }
                   1905:        if (i & 1) {
                   1906:                KASSERT((i & 4) == 0);
                   1907:                ptp = 0;
                   1908:                if (!pmap_get_page(&ptp)) {
1.217     ad       1909:                        mutex_exit(&pmap_lock);
1.127     chs      1910:                        if (flags & PMAP_CANFAIL) {
                   1911:                                if (npv != NULL) {
1.217     ad       1912:                                        /* free it */
                   1913:                                        npv->pv_next = freepv;
                   1914:                                        freepv = npv;
1.127     chs      1915:                                }
1.217     ad       1916:                                error = ENOMEM;
                   1917:                                goto out;
1.127     chs      1918:                        } else {
1.111     eeh      1919:                                panic("pmap_enter: no pages");
1.127     chs      1920:                        }
1.111     eeh      1921:                }
1.127     chs      1922:                ENTER_STAT(ptpneeded);
                   1923:                goto retry;
                   1924:        }
                   1925:        if (ptp && i == 0) {
                   1926:                /* We allocated a spare page but didn't use it.  Free it. */
                   1927:                printf("pmap_enter: freeing unused page %llx\n",
                   1928:                       (long long)ptp);
1.262     mrg      1929:                pmap_free_page_noflush(ptp);
1.127     chs      1930:        }
                   1931:        if (dopv) {
                   1932:                pmap_enter_pv(pm, va, pa, pg, npv);
1.1       eeh      1933:        }
                   1934:
1.203     ad       1935:        mutex_exit(&pmap_lock);
1.278     mrg      1936: #ifdef PMAP_DEBUG
1.1       eeh      1937:        i = ptelookup_va(va);
1.125     mrg      1938:        if (pmapdebug & PDB_ENTER)
                   1939:                prom_printf("pmap_enter: va=%08x data=%08x:%08x "
1.156     pk       1940:                        "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1.210     martin   1941:                        (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
                   1942:        if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1.125     mrg      1943:                prom_printf("pmap_enter: evicting entry tag=%x:%08x "
1.156     pk       1944:                        "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1.210     martin   1945:                        (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
                   1946:                        (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, i,
                   1947:                        &curcpu()->ci_tsb_dmmu[i]);
1.156     pk       1948:                prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1.143     chs      1949:                        va, (int)(tte.data>>32), (int)tte.data, i,
1.210     martin   1950:                        &curcpu()->ci_tsb_dmmu[i]);
1.1       eeh      1951:        }
                   1952: #endif
1.143     chs      1953:
                   1954:        if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) {
1.127     chs      1955:
                   1956:                /*
                   1957:                 * preload the TSB with the new entry,
                   1958:                 * since we're going to need it immediately anyway.
                   1959:                 */
                   1960:
1.210     martin   1961:                KASSERT(pmap_ctx(pm)>=0);
1.127     chs      1962:                i = ptelookup_va(va);
1.210     martin   1963:                tte.tag = TSB_TAG(0, pmap_ctx(pm), va);
1.127     chs      1964:                s = splhigh();
1.216     martin   1965:                if (wasmapped && pmap_is_on_mmu(pm)) {
1.212     nakayama 1966:                        tsb_invalidate(va, pm);
1.144     chs      1967:                }
1.143     chs      1968:                if (flags & (VM_PROT_READ | VM_PROT_WRITE)) {
1.210     martin   1969:                        curcpu()->ci_tsb_dmmu[i].tag = tte.tag;
1.166     perry    1970:                        __asm volatile("" : : : "memory");
1.210     martin   1971:                        curcpu()->ci_tsb_dmmu[i].data = tte.data;
1.143     chs      1972:                }
                   1973:                if (flags & VM_PROT_EXECUTE) {
1.210     martin   1974:                        curcpu()->ci_tsb_immu[i].tag = tte.tag;
1.166     perry    1975:                        __asm volatile("" : : : "memory");
1.210     martin   1976:                        curcpu()->ci_tsb_immu[i].data = tte.data;
1.143     chs      1977:                }
1.127     chs      1978:
                   1979:                /*
                   1980:                 * it's only necessary to flush the TLB if this page was
                   1981:                 * previously mapped, but for some reason it's a lot faster
                   1982:                 * for the fork+exit microbenchmark if we always do it.
                   1983:                 */
                   1984:
1.210     martin   1985:                KASSERT(pmap_ctx(pm)>=0);
1.212     nakayama 1986: #ifdef MULTIPROCESSOR
1.216     martin   1987:                if (wasmapped && pmap_is_on_mmu(pm))
1.212     nakayama 1988:                        tlb_flush_pte(va, pm);
1.252     mrg      1989:                else
                   1990:                        sp_tlb_flush_pte(va, pmap_ctx(pm));
1.212     nakayama 1991: #else
                   1992:                tlb_flush_pte(va, pm);
                   1993: #endif
1.127     chs      1994:                splx(s);
1.216     martin   1995:        } else if (wasmapped && pmap_is_on_mmu(pm)) {
1.41      eeh      1996:                /* Force reload -- protections may be changed */
1.210     martin   1997:                KASSERT(pmap_ctx(pm)>=0);
1.212     nakayama 1998:                tsb_invalidate(va, pm);
                   1999:                tlb_flush_pte(va, pm);
1.30      eeh      2000:        }
1.70      eeh      2001:
1.1       eeh      2002:        /* We will let the fast mmu miss interrupt load the new translation */
                   2003:        pv_check();
1.217     ad       2004:  out:
                   2005:        /* Catch up on deferred frees. */
                   2006:        for (; freepv != NULL; freepv = npv) {
                   2007:                npv = freepv->pv_next;
                   2008:                pool_cache_put(&pmap_pv_cache, freepv);
                   2009:        }
                   2010:        return error;
1.1       eeh      2011: }
                   2012:
1.127     chs      2013: void
1.232     dsl      2014: pmap_remove_all(struct pmap *pm)
1.127     chs      2015: {
1.212     nakayama 2016: #ifdef MULTIPROCESSOR
                   2017:        struct cpu_info *ci;
1.258     mrg      2018:        sparc64_cpuset_t pmap_cpus_active;
1.212     nakayama 2019: #endif
1.127     chs      2020:
                   2021:        if (pm == pmap_kernel()) {
                   2022:                return;
                   2023:        }
1.188     martin   2024:        write_user_windows();
1.127     chs      2025:        pm->pm_refs = 0;
1.258     mrg      2026:
1.262     mrg      2027:        /*
                   2028:         * XXXMRG: pmap_destroy() does exactly the same dance here.
                   2029:         * surely one of them isn't necessary?
                   2030:         */
1.212     nakayama 2031: #ifdef MULTIPROCESSOR
1.258     mrg      2032:        CPUSET_CLEAR(pmap_cpus_active);
1.212     nakayama 2033:        for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1.262     mrg      2034:                /* XXXMRG: Move the lock inside one or both tests? */
                   2035:                mutex_enter(&ci->ci_ctx_lock);
1.258     mrg      2036:                if (CPUSET_HAS(cpus_active, ci->ci_index)) {
1.262     mrg      2037:                        if (pm->pm_ctx[ci->ci_index] > 0) {
1.258     mrg      2038:                                CPUSET_ADD(pmap_cpus_active, ci->ci_index);
1.262     mrg      2039:                                ctx_free(pm, ci);
                   2040:                        }
1.258     mrg      2041:                }
1.262     mrg      2042:                mutex_exit(&ci->ci_ctx_lock);
1.212     nakayama 2043:        }
                   2044: #else
1.262     mrg      2045:        if (pmap_ctx(pm)) {
                   2046:                mutex_enter(&curcpu()->ci_ctx_lock);
                   2047:                ctx_free(pm, curcpu());
                   2048:                mutex_exit(&curcpu()->ci_ctx_lock);
                   2049:        }
1.212     nakayama 2050: #endif
1.258     mrg      2051:
1.188     martin   2052:        REMOVE_STAT(flushes);
1.262     mrg      2053:        /*
                   2054:         * XXXMRG: couldn't we do something less severe here, and
                   2055:         * only flush the right context on each CPU?
                   2056:         */
1.270     mrg      2057:        blast_dcache();
1.127     chs      2058: }
                   2059:
1.1       eeh      2060: /*
                   2061:  * Remove the given range of mapping entries.
                   2062:  */
                   2063: void
1.233     dsl      2064: pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
1.1       eeh      2065: {
                   2066:        int64_t data;
1.127     chs      2067:        paddr_t pa;
                   2068:        struct vm_page *pg;
1.217     ad       2069:        pv_entry_t pv, freepv = NULL;
1.158     martin   2070:        int rv;
1.185     thorpej  2071:        bool flush = FALSE;
1.1       eeh      2072:
1.156     pk       2073:        /*
1.1       eeh      2074:         * In here we should check each pseg and if there are no more entries,
                   2075:         * free it.  It's just that linear scans of 8K pages gets expensive.
                   2076:         */
                   2077:
1.127     chs      2078:        KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK);
                   2079:        KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata);
1.102     eeh      2080:
1.203     ad       2081:        mutex_enter(&pmap_lock);
1.127     chs      2082:        DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm,
                   2083:                             (void *)(u_long)va, (void *)(u_long)endva));
                   2084:        REMOVE_STAT(calls);
1.1       eeh      2085:
                   2086:        /* Now do the real work */
1.138     thorpej  2087:        for (; va < endva; va += PAGE_SIZE) {
1.127     chs      2088: #ifdef DIAGNOSTIC
1.1       eeh      2089:                /*
                   2090:                 * Is this part of the permanent 4MB mapping?
                   2091:                 */
1.156     pk       2092:                if (pm == pmap_kernel() && va >= ktext &&
1.98      eeh      2093:                        va < roundup(ekdata, 4*MEG))
1.156     pk       2094:                        panic("pmap_remove: va=%08llx in locked TLB",
1.127     chs      2095:                              (long long)va);
1.4       eeh      2096: #endif
1.73      eeh      2097:
1.127     chs      2098:                data = pseg_get(pm, va);
1.254     mrg      2099:                if ((data & TLB_V) == 0) {
1.127     chs      2100:                        continue;
                   2101:                }
                   2102:
                   2103:                flush = TRUE;
                   2104:                /* First remove the pv entry, if there is one */
                   2105:                pa = data & TLB_PA_MASK;
                   2106:                pg = PHYS_TO_VM_PAGE(pa);
                   2107:                if (pg) {
                   2108:                        pv = pmap_remove_pv(pm, va, pg);
                   2109:                        if (pv != NULL) {
1.217     ad       2110:                                /* free it */
                   2111:                                pv->pv_next = freepv;
                   2112:                                freepv = pv;
1.1       eeh      2113:                        }
1.127     chs      2114:                }
                   2115:
                   2116:                /*
                   2117:                 * We need to flip the valid bit and
                   2118:                 * clear the access statistics.
                   2119:                 */
                   2120:
1.158     martin   2121:                rv = pseg_set(pm, va, 0, 0);
                   2122:                if (rv & 1)
                   2123:                        panic("pmap_remove: pseg_set needed spare, rv=%d!\n",
                   2124:                            rv);
                   2125:
1.127     chs      2126:                DPRINTF(PDB_REMOVE, (" clearing seg %x pte %x\n",
                   2127:                                     (int)va_to_seg(va), (int)va_to_pte(va)));
                   2128:                REMOVE_STAT(removes);
                   2129:
1.216     martin   2130:                if (pm != pmap_kernel() && !pmap_has_ctx(pm))
1.127     chs      2131:                        continue;
1.143     chs      2132:
                   2133:                /*
1.262     mrg      2134:                 * if the pmap is being torn down, don't bother flushing,
                   2135:                 * we already have done so.
1.143     chs      2136:                 */
                   2137:
                   2138:                if (!pm->pm_refs)
                   2139:                        continue;
1.127     chs      2140:
                   2141:                /*
                   2142:                 * Here we assume nothing can get into the TLB
                   2143:                 * unless it has a PTE.
                   2144:                 */
                   2145:
1.210     martin   2146:                KASSERT(pmap_ctx(pm)>=0);
1.212     nakayama 2147:                tsb_invalidate(va, pm);
1.143     chs      2148:                REMOVE_STAT(tflushes);
1.212     nakayama 2149:                tlb_flush_pte(va, pm);
1.255     mrg      2150:                dcache_flush_page_all(pa);
1.4       eeh      2151:        }
1.255     mrg      2152:        if (flush && pm->pm_refs)
1.127     chs      2153:                REMOVE_STAT(flushes);
                   2154:        DPRINTF(PDB_REMOVE, ("\n"));
1.1       eeh      2155:        pv_check();
1.203     ad       2156:        mutex_exit(&pmap_lock);
1.217     ad       2157:
                   2158:        /* Catch up on deferred frees. */
                   2159:        for (; freepv != NULL; freepv = pv) {
                   2160:                pv = freepv->pv_next;
                   2161:                pool_cache_put(&pmap_pv_cache, freepv);
                   2162:        }
1.1       eeh      2163: }
                   2164:
                   2165: /*
                   2166:  * Change the protection on the specified range of this pmap.
                   2167:  */
                   2168: void
1.233     dsl      2169: pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       eeh      2170: {
1.4       eeh      2171:        paddr_t pa;
1.1       eeh      2172:        int64_t data;
1.127     chs      2173:        struct vm_page *pg;
                   2174:        pv_entry_t pv;
1.158     martin   2175:        int rv;
1.143     chs      2176:
1.127     chs      2177:        KASSERT(pm != pmap_kernel() || eva < INTSTACK || sva > EINTSTACK);
                   2178:        KASSERT(pm != pmap_kernel() || eva < kdata || sva > ekdata);
1.102     eeh      2179:
1.1       eeh      2180:        if (prot == VM_PROT_NONE) {
                   2181:                pmap_remove(pm, sva, eva);
                   2182:                return;
                   2183:        }
1.156     pk       2184:
1.240     nakayama 2185:        sva = trunc_page(sva);
1.277     mrg      2186:        mutex_enter(&pmap_lock);
1.138     thorpej  2187:        for (; sva < eva; sva += PAGE_SIZE) {
1.278     mrg      2188: #ifdef PMAP_DEBUG
1.1       eeh      2189:                /*
                   2190:                 * Is this part of the permanent 4MB mapping?
                   2191:                 */
1.156     pk       2192:                if (pm == pmap_kernel() && sva >= ktext &&
1.127     chs      2193:                    sva < roundup(ekdata, 4 * MEG)) {
1.277     mrg      2194:                        mutex_exit(&pmap_lock);
1.156     pk       2195:                        prom_printf("pmap_protect: va=%08x in locked TLB\n",
1.127     chs      2196:                            sva);
1.156     pk       2197:                        prom_abort();
1.1       eeh      2198:                        return;
                   2199:                }
1.127     chs      2200: #endif
                   2201:                DPRINTF(PDB_CHANGEPROT, ("pmap_protect: va %p\n",
                   2202:                    (void *)(u_long)sva));
                   2203:                data = pseg_get(pm, sva);
                   2204:                if ((data & TLB_V) == 0) {
                   2205:                        continue;
                   2206:                }
                   2207:
                   2208:                pa = data & TLB_PA_MASK;
                   2209:                DPRINTF(PDB_CHANGEPROT|PDB_REF,
                   2210:                        ("pmap_protect: va=%08x data=%08llx "
1.156     pk       2211:                         "seg=%08x pte=%08x\n",
1.127     chs      2212:                         (u_int)sva, (long long)pa, (int)va_to_seg(sva),
                   2213:                         (int)va_to_pte(sva)));
1.1       eeh      2214:
1.127     chs      2215:                pg = PHYS_TO_VM_PAGE(pa);
                   2216:                if (pg) {
1.266     uebayasi 2217:                        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                   2218:
1.127     chs      2219:                        /* Save REF/MOD info */
1.266     uebayasi 2220:                        pv = &md->mdpg_pvh;
1.127     chs      2221:                        if (data & TLB_ACCESS)
                   2222:                                pv->pv_va |= PV_REF;
1.156     pk       2223:                        if (data & TLB_MODIFY)
1.127     chs      2224:                                pv->pv_va |= PV_MOD;
                   2225:                }
                   2226:
                   2227:                /* Just do the pmap and TSB, not the pv_list */
1.143     chs      2228:                if ((prot & VM_PROT_WRITE) == 0)
                   2229:                        data &= ~(TLB_W|TLB_REAL_W);
                   2230:                if ((prot & VM_PROT_EXECUTE) == 0)
                   2231:                        data &= ~(TLB_EXEC);
                   2232:
1.158     martin   2233:                rv = pseg_set(pm, sva, data, 0);
                   2234:                if (rv & 1)
                   2235:                        panic("pmap_protect: pseg_set needs spare! rv=%d\n",
                   2236:                            rv);
1.73      eeh      2237:
1.216     martin   2238:                if (pm != pmap_kernel() && !pmap_has_ctx(pm))
1.127     chs      2239:                        continue;
1.143     chs      2240:
1.210     martin   2241:                KASSERT(pmap_ctx(pm)>=0);
1.212     nakayama 2242:                tsb_invalidate(sva, pm);
                   2243:                tlb_flush_pte(sva, pm);
1.1       eeh      2244:        }
                   2245:        pv_check();
1.277     mrg      2246:        mutex_exit(&pmap_lock);
1.1       eeh      2247: }
                   2248:
                   2249: /*
                   2250:  * Extract the physical page address associated
                   2251:  * with the given map/virtual_address pair.
                   2252:  */
1.185     thorpej  2253: bool
1.232     dsl      2254: pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
1.1       eeh      2255: {
1.4       eeh      2256:        paddr_t pa;
1.197     martin   2257:        int64_t data = 0;
1.1       eeh      2258:
1.127     chs      2259:        if (pm == pmap_kernel() && va >= kdata && va < roundup(ekdata, 4*MEG)) {
1.56      eeh      2260:                /* Need to deal w/locked TLB entry specially. */
1.168     cdi      2261:                pa = pmap_kextract(va);
1.127     chs      2262:                DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n",
                   2263:                                      (u_long)va, (unsigned long long)pa));
1.225     nakayama 2264:                if (pap != NULL)
                   2265:                        *pap = pa;
                   2266:                return TRUE;
1.127     chs      2267:        } else if (pm == pmap_kernel() && va >= ktext && va < ektext) {
1.1       eeh      2268:                /* Need to deal w/locked TLB entry specially. */
1.168     cdi      2269:                pa = pmap_kextract(va);
1.127     chs      2270:                DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n",
                   2271:                    (u_long)va, (unsigned long long)pa));
1.225     nakayama 2272:                if (pap != NULL)
                   2273:                        *pap = pa;
                   2274:                return TRUE;
1.178     mrg      2275:        } else if (pm == pmap_kernel() && va >= INTSTACK && va < (INTSTACK + 64*KB)) {
1.174     mrg      2276:                pa = (paddr_t)(curcpu()->ci_paddr - INTSTACK + va);
1.178     mrg      2277:                DPRINTF(PDB_EXTRACT, ("pmap_extract (intstack): va=%lx pa=%llx\n",
                   2278:                    (u_long)va, (unsigned long long)pa));
1.183     martin   2279:                if (pap != NULL)
                   2280:                        *pap = pa;
                   2281:                return TRUE;
1.127     chs      2282:        } else {
1.197     martin   2283:                data = pseg_get(pm, va);
                   2284:                pa = data & TLB_PA_MASK;
1.1       eeh      2285:                if (pmapdebug & PDB_EXTRACT) {
1.127     chs      2286:                        paddr_t npa = ldxa((vaddr_t)&pm->pm_segs[va_to_seg(va)],
                   2287:                                           ASI_PHYS_CACHED);
                   2288:                        printf("pmap_extract: va=%p segs[%ld]=%llx",
                   2289:                               (void *)(u_long)va, (long)va_to_seg(va),
                   2290:                               (unsigned long long)npa);
1.123     eeh      2291:                        if (npa) {
1.127     chs      2292:                                npa = (paddr_t)
                   2293:                                        ldxa((vaddr_t)&((paddr_t *)(u_long)npa)
                   2294:                                             [va_to_dir(va)],
                   2295:                                             ASI_PHYS_CACHED);
                   2296:                                printf(" segs[%ld][%ld]=%lx",
                   2297:                                       (long)va_to_seg(va),
                   2298:                                       (long)va_to_dir(va), (long)npa);
1.8       eeh      2299:                        }
1.123     eeh      2300:                        if (npa)        {
1.127     chs      2301:                                npa = (paddr_t)
                   2302:                                        ldxa((vaddr_t)&((paddr_t *)(u_long)npa)
                   2303:                                             [va_to_pte(va)],
                   2304:                                             ASI_PHYS_CACHED);
                   2305:                                printf(" segs[%ld][%ld][%ld]=%lx",
1.156     pk       2306:                                       (long)va_to_seg(va),
1.127     chs      2307:                                       (long)va_to_dir(va),
                   2308:                                       (long)va_to_pte(va), (long)npa);
1.8       eeh      2309:                        }
1.25      eeh      2310:                        printf(" pseg_get: %lx\n", (long)pa);
1.1       eeh      2311:                }
                   2312:        }
1.198     martin   2313:        if ((data & TLB_V) == 0)
                   2314:                return (FALSE);
1.42      thorpej  2315:        if (pap != NULL)
1.127     chs      2316:                *pap = pa + (va & PGOFSET);
1.42      thorpej  2317:        return (TRUE);
1.1       eeh      2318: }
                   2319:
                   2320: /*
1.143     chs      2321:  * Change protection on a kernel address.
                   2322:  * This should only be called from MD code.
                   2323:  */
                   2324: void
1.232     dsl      2325: pmap_kprotect(vaddr_t va, vm_prot_t prot)
1.143     chs      2326: {
                   2327:        struct pmap *pm = pmap_kernel();
                   2328:        int64_t data;
1.158     martin   2329:        int rv;
1.143     chs      2330:
                   2331:        data = pseg_get(pm, va);
1.158     martin   2332:        KASSERT(data & TLB_V);
1.143     chs      2333:        if (prot & VM_PROT_WRITE) {
                   2334:                data |= (TLB_W|TLB_REAL_W);
                   2335:        } else {
                   2336:                data &= ~(TLB_W|TLB_REAL_W);
                   2337:        }
1.158     martin   2338:        rv = pseg_set(pm, va, data, 0);
                   2339:        if (rv & 1)
                   2340:                panic("pmap_kprotect: pseg_set needs spare! rv=%d", rv);
1.210     martin   2341:        KASSERT(pmap_ctx(pm)>=0);
1.212     nakayama 2342:        tsb_invalidate(va, pm);
                   2343:        tlb_flush_pte(va, pm);
1.143     chs      2344: }
                   2345:
                   2346: /*
1.1       eeh      2347:  * Return the number bytes that pmap_dumpmmu() will dump.
                   2348:  */
                   2349: int
1.234     cegger   2350: pmap_dumpsize(void)
1.1       eeh      2351: {
1.2       eeh      2352:        int     sz;
                   2353:
                   2354:        sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
1.201     martin   2355:        sz += kernel_tlb_slots * sizeof(struct cpu_kcore_4mbseg);
1.168     cdi      2356:        sz += phys_installed_size * sizeof(phys_ram_seg_t);
1.2       eeh      2357:
                   2358:        return btodb(sz + DEV_BSIZE - 1);
1.1       eeh      2359: }
                   2360:
                   2361: /*
                   2362:  * Write the mmu contents to the dump device.
                   2363:  * This gets appended to the end of a crash dump since
                   2364:  * there is no in-core copy of kernel memory mappings on a 4/4c machine.
1.2       eeh      2365:  *
                   2366:  * Write the core dump headers and MD data to the dump device.
                   2367:  * We dump the following items:
1.156     pk       2368:  *
1.2       eeh      2369:  *     kcore_seg_t              MI header defined in <sys/kcore.h>)
                   2370:  *     cpu_kcore_hdr_t          MD header defined in <machine/kcore.h>)
1.168     cdi      2371:  *     phys_ram_seg_t[phys_installed_size]  physical memory segments
1.1       eeh      2372:  */
                   2373: int
1.186     christos 2374: pmap_dumpmmu(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t blkno)
1.1       eeh      2375: {
1.28      eeh      2376:        kcore_seg_t     *kseg;
                   2377:        cpu_kcore_hdr_t *kcpu;
1.2       eeh      2378:        phys_ram_seg_t  memseg;
1.201     martin   2379:        struct cpu_kcore_4mbseg ktlb;
1.127     chs      2380:        int     error = 0;
1.201     martin   2381:        int     i;
1.127     chs      2382:        int     buffer[dbtob(1) / sizeof(int)];
                   2383:        int     *bp, *ep;
1.2       eeh      2384:
                   2385: #define EXPEDITE(p,n) do {                                             \
1.141     nakayama 2386:        int *sp = (void *)(p);                                          \
1.2       eeh      2387:        int sz = (n);                                                   \
                   2388:        while (sz > 0) {                                                \
                   2389:                *bp++ = *sp++;                                          \
                   2390:                if (bp >= ep) {                                         \
                   2391:                        error = (*dump)(dumpdev, blkno,                 \
1.186     christos 2392:                                        (void *)buffer, dbtob(1));      \
1.2       eeh      2393:                        if (error != 0)                                 \
                   2394:                                return (error);                         \
                   2395:                        ++blkno;                                        \
                   2396:                        bp = buffer;                                    \
                   2397:                }                                                       \
                   2398:                sz -= 4;                                                \
                   2399:        }                                                               \
                   2400: } while (0)
                   2401:
                   2402:        /* Setup bookkeeping pointers */
                   2403:        bp = buffer;
                   2404:        ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
                   2405:
                   2406:        /* Fill in MI segment header */
1.28      eeh      2407:        kseg = (kcore_seg_t *)bp;
                   2408:        CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
                   2409:        kseg->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
1.2       eeh      2410:
                   2411:        /* Fill in MD segment header (interpreted by MD part of libkvm) */
1.28      eeh      2412:        kcpu = (cpu_kcore_hdr_t *)((long)bp + ALIGN(sizeof(kcore_seg_t)));
1.271     mrg      2413:        kcpu->cputype = cputyp;
1.170     cdi      2414:        kcpu->kernbase = (uint64_t)KERNBASE;
                   2415:        kcpu->cpubase = (uint64_t)CPUINFO_VA;
1.70      eeh      2416:
                   2417:        /* Describe the locked text segment */
1.170     cdi      2418:        kcpu->ktextbase = (uint64_t)ktext;
                   2419:        kcpu->ktextp = (uint64_t)ktextp;
1.201     martin   2420:        kcpu->ktextsz = (uint64_t)ektext - ktext;
                   2421:        if (kcpu->ktextsz > 4*MEG)
                   2422:                kcpu->ktextsz = 0;      /* old version can not work */
1.70      eeh      2423:
                   2424:        /* Describe locked data segment */
1.170     cdi      2425:        kcpu->kdatabase = (uint64_t)kdata;
                   2426:        kcpu->kdatap = (uint64_t)kdatap;
                   2427:        kcpu->kdatasz = (uint64_t)ekdatap - kdatap;
1.70      eeh      2428:
1.201     martin   2429:        /* new version of locked segments description */
                   2430:        kcpu->newmagic = SPARC64_KCORE_NEWMAGIC;
                   2431:        kcpu->num4mbsegs = kernel_tlb_slots;
                   2432:        kcpu->off4mbsegs = ALIGN(sizeof(cpu_kcore_hdr_t));
                   2433:
1.209     martin   2434:        /* description of per-cpu mappings */
                   2435:        kcpu->numcpuinfos = sparc_ncpus;
                   2436:        kcpu->percpusz = 64 * 1024;     /* used to be 128k for some time */
                   2437:        kcpu->thiscpu = cpu_number();   /* which cpu is doing this dump */
                   2438:        kcpu->cpusp = cpu0paddr - 64 * 1024 * sparc_ncpus;
                   2439:
1.70      eeh      2440:        /* Now the memsegs */
1.168     cdi      2441:        kcpu->nmemseg = phys_installed_size;
1.201     martin   2442:        kcpu->memsegoffset = kcpu->off4mbsegs
                   2443:                + kernel_tlb_slots * sizeof(struct cpu_kcore_4mbseg);
1.70      eeh      2444:
                   2445:        /* Now we need to point this at our kernel pmap. */
1.28      eeh      2446:        kcpu->nsegmap = STSZ;
1.170     cdi      2447:        kcpu->segmapoffset = (uint64_t)pmap_kernel()->pm_physaddr;
1.2       eeh      2448:
                   2449:        /* Note: we have assumed everything fits in buffer[] so far... */
1.28      eeh      2450:        bp = (int *)((long)kcpu + ALIGN(sizeof(cpu_kcore_hdr_t)));
1.2       eeh      2451:
1.201     martin   2452:        /* write locked kernel 4MB TLBs */
                   2453:        for (i = 0; i < kernel_tlb_slots; i++) {
                   2454:                ktlb.va = kernel_tlbs[i].te_va;
                   2455:                ktlb.pa = kernel_tlbs[i].te_pa;
                   2456:                EXPEDITE(&ktlb, sizeof(ktlb));
                   2457:        }
                   2458:
                   2459:        /* write memsegs */
1.168     cdi      2460:        for (i = 0; i < phys_installed_size; i++) {
                   2461:                memseg.start = phys_installed[i].start;
                   2462:                memseg.size = phys_installed[i].size;
1.2       eeh      2463:                EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
                   2464:        }
                   2465:
                   2466:        if (bp != buffer)
1.186     christos 2467:                error = (*dump)(dumpdev, blkno++, (void *)buffer, dbtob(1));
1.2       eeh      2468:
                   2469:        return (error);
1.1       eeh      2470: }
                   2471:
                   2472: /*
1.184     wiz      2473:  * Determine (non)existence of physical page
1.1       eeh      2474:  */
1.127     chs      2475: int
1.171     cdi      2476: pmap_pa_exists(paddr_t pa)
1.1       eeh      2477: {
1.168     cdi      2478:        int i;
1.1       eeh      2479:
                   2480:        /* Just go through physical memory list & see if we're there */
1.168     cdi      2481:        for (i = 0; i < phys_installed_size; i++) {
                   2482:                if ((phys_installed[i].start <= pa) &&
                   2483:                                (phys_installed[i].start +
                   2484:                                 phys_installed[i].size >= pa))
1.1       eeh      2485:                        return 1;
1.168     cdi      2486:        }
1.1       eeh      2487:        return 0;
                   2488: }
                   2489:
                   2490: /*
                   2491:  * Lookup the appropriate TSB entry.
                   2492:  *
                   2493:  * Here is the full official pseudo code:
                   2494:  *
                   2495:  */
                   2496:
                   2497: #ifdef NOTYET
                   2498: int64 GenerateTSBPointer(
                   2499:        int64 va,               /* Missing VA                   */
                   2500:        PointerType type,       /* 8K_POINTER or 16K_POINTER    */
                   2501:        int64 TSBBase,          /* TSB Register[63:13] << 13    */
                   2502:        Boolean split,          /* TSB Register[12]             */
                   2503:        int TSBSize)            /* TSB Register[2:0]            */
                   2504: {
                   2505:        int64 vaPortion;
                   2506:        int64 TSBBaseMask;
                   2507:        int64 splitMask;
1.156     pk       2508:
1.1       eeh      2509:        /* TSBBaseMask marks the bits from TSB Base Reg         */
                   2510:        TSBBaseMask = 0xffffffffffffe000 <<
                   2511:                (split? (TSBsize + 1) : TSBsize);
                   2512:
                   2513:        /* Shift va towards lsb appropriately and               */
                   2514:        /* zero out the original va page offset                 */
                   2515:        vaPortion = (va >> ((type == 8K_POINTER)? 9: 12)) &
                   2516:                0xfffffffffffffff0;
1.156     pk       2517:
1.1       eeh      2518:        if (split) {
                   2519:                /* There's only one bit in question for split   */
                   2520:                splitMask = 1 << (13 + TSBsize);
                   2521:                if (type == 8K_POINTER)
                   2522:                        /* Make sure we're in the lower half    */
                   2523:                        vaPortion &= ~splitMask;
                   2524:                else
                   2525:                        /* Make sure we're in the upper half    */
                   2526:                        vaPortion |= splitMask;
                   2527:        }
                   2528:        return (TSBBase & TSBBaseMask) | (vaPortion & ~TSBBaseMask);
                   2529: }
                   2530: #endif
                   2531: /*
                   2532:  * Of course, since we are not using a split TSB or variable page sizes,
1.156     pk       2533:  * we can optimize this a bit.
1.1       eeh      2534:  *
                   2535:  * The following only works for a unified 8K TSB.  It will find the slot
                   2536:  * for that particular va and return it.  IT MAY BE FOR ANOTHER MAPPING!
                   2537:  */
                   2538: int
1.170     cdi      2539: ptelookup_va(vaddr_t va)
1.1       eeh      2540: {
1.9       eeh      2541:        long tsbptr;
1.127     chs      2542: #define TSBBASEMASK    (0xffffffffffffe000LL << tsbsize)
1.1       eeh      2543:
1.127     chs      2544:        tsbptr = (((va >> 9) & 0xfffffffffffffff0LL) & ~TSBBASEMASK);
                   2545:        return (tsbptr / sizeof(pte_t));
1.1       eeh      2546: }
                   2547:
1.5       eeh      2548: /*
                   2549:  * Do whatever is needed to sync the MOD/REF flags
                   2550:  */
                   2551:
1.185     thorpej  2552: bool
1.232     dsl      2553: pmap_clear_modify(struct vm_page *pg)
1.1       eeh      2554: {
1.266     uebayasi 2555:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127     chs      2556:        pv_entry_t pv;
1.158     martin   2557:        int rv;
1.143     chs      2558:        int changed = 0;
1.51      mrg      2559: #ifdef DEBUG
1.43      eeh      2560:        int modified = 0;
1.127     chs      2561:
                   2562:        DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify(%p)\n", pg));
1.1       eeh      2563:
1.43      eeh      2564:        modified = pmap_is_modified(pg);
                   2565: #endif
1.203     ad       2566:        mutex_enter(&pmap_lock);
1.1       eeh      2567:        /* Clear all mappings */
1.266     uebayasi 2568:        pv = &md->mdpg_pvh;
1.4       eeh      2569: #ifdef DEBUG
                   2570:        if (pv->pv_va & PV_MOD)
                   2571:                pv->pv_va |= PV_WE;     /* Remember this was modified */
                   2572: #endif
1.225     nakayama 2573:        if (pv->pv_va & PV_MOD) {
1.4       eeh      2574:                changed |= 1;
1.225     nakayama 2575:                pv->pv_va &= ~PV_MOD;
                   2576:        }
1.127     chs      2577: #ifdef DEBUG
1.17      eeh      2578:        if (pv->pv_next && !pv->pv_pmap) {
                   2579:                printf("pmap_clear_modify: npv but no pmap for pv %p\n", pv);
                   2580:                Debugger();
                   2581:        }
                   2582: #endif
1.137     thorpej  2583:        if (pv->pv_pmap != NULL) {
1.1       eeh      2584:                for (; pv; pv = pv->pv_next) {
                   2585:                        int64_t data;
1.127     chs      2586:                        struct pmap *pmap = pv->pv_pmap;
                   2587:                        vaddr_t va = pv->pv_va & PV_VAMASK;
1.4       eeh      2588:
                   2589:                        /* First clear the mod bit in the PTE and make it R/O */
1.127     chs      2590:                        data = pseg_get(pmap, va);
1.158     martin   2591:                        KASSERT(data & TLB_V);
1.1       eeh      2592:                        /* Need to both clear the modify and write bits */
1.127     chs      2593:                        if (data & TLB_MODIFY)
1.4       eeh      2594:                                changed |= 1;
1.41      eeh      2595: #ifdef HWREF
1.110     chs      2596:                        data &= ~(TLB_MODIFY|TLB_W);
1.41      eeh      2597: #else
                   2598:                        data &= ~(TLB_MODIFY|TLB_W|TLB_REAL_W);
                   2599: #endif
1.158     martin   2600:                        rv = pseg_set(pmap, va, data, 0);
                   2601:                        if (rv & 1)
                   2602:                                printf("pmap_clear_modify: pseg_set needs"
                   2603:                                    " spare! rv=%d\n", rv);
1.216     martin   2604:                        if (pmap_is_on_mmu(pmap)) {
1.210     martin   2605:                                KASSERT(pmap_ctx(pmap)>=0);
1.212     nakayama 2606:                                tsb_invalidate(va, pmap);
                   2607:                                tlb_flush_pte(va, pmap);
1.41      eeh      2608:                        }
1.4       eeh      2609:                        /* Then clear the mod bit in the pv */
1.225     nakayama 2610:                        if (pv->pv_va & PV_MOD) {
1.4       eeh      2611:                                changed |= 1;
1.225     nakayama 2612:                                pv->pv_va &= ~PV_MOD;
                   2613:                        }
1.4       eeh      2614:                }
1.137     thorpej  2615:        }
1.1       eeh      2616:        pv_check();
1.203     ad       2617:        mutex_exit(&pmap_lock);
1.5       eeh      2618: #ifdef DEBUG
1.127     chs      2619:        DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify: pg %p %s\n", pg,
                   2620:            (changed ? "was modified" : "was not modified")));
1.280     hannken  2621:        if (modified && modified != changed) {
1.127     chs      2622:                printf("pmap_clear_modify: modified %d changed %d\n",
                   2623:                       modified, changed);
1.43      eeh      2624:                Debugger();
1.280     hannken  2625:        }
1.25      eeh      2626: #endif
1.4       eeh      2627:        return (changed);
1.1       eeh      2628: }
                   2629:
1.185     thorpej  2630: bool
1.232     dsl      2631: pmap_clear_reference(struct vm_page *pg)
1.1       eeh      2632: {
1.266     uebayasi 2633:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127     chs      2634:        pv_entry_t pv;
1.158     martin   2635:        int rv;
1.143     chs      2636:        int changed = 0;
1.51      mrg      2637: #ifdef DEBUG
1.43      eeh      2638:        int referenced = 0;
1.51      mrg      2639: #endif
1.1       eeh      2640:
1.203     ad       2641:        mutex_enter(&pmap_lock);
1.1       eeh      2642: #ifdef DEBUG
1.127     chs      2643:        DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_reference(%p)\n", pg));
1.221     martin   2644:        referenced = pmap_is_referenced_locked(pg);
1.43      eeh      2645: #endif
1.1       eeh      2646:        /* Clear all references */
1.266     uebayasi 2647:        pv = &md->mdpg_pvh;
1.225     nakayama 2648:        if (pv->pv_va & PV_REF) {
1.4       eeh      2649:                changed |= 1;
1.225     nakayama 2650:                pv->pv_va &= ~PV_REF;
                   2651:        }
1.156     pk       2652: #ifdef DEBUG
1.17      eeh      2653:        if (pv->pv_next && !pv->pv_pmap) {
                   2654:                printf("pmap_clear_reference: npv but no pmap for pv %p\n", pv);
                   2655:                Debugger();
                   2656:        }
                   2657: #endif
1.25      eeh      2658:        if (pv->pv_pmap != NULL) {
1.1       eeh      2659:                for (; pv; pv = pv->pv_next) {
                   2660:                        int64_t data;
1.127     chs      2661:                        struct pmap *pmap = pv->pv_pmap;
                   2662:                        vaddr_t va = pv->pv_va & PV_VAMASK;
1.73      eeh      2663:
1.127     chs      2664:                        data = pseg_get(pmap, va);
1.158     martin   2665:                        KASSERT(data & TLB_V);
1.127     chs      2666:                        DPRINTF(PDB_CHANGEPROT,
                   2667:                            ("clearing ref pm:%p va:%p ctx:%lx data:%llx\n",
1.210     martin   2668:                             pmap, (void *)(u_long)va,
                   2669:                             (u_long)pmap_ctx(pmap),
1.127     chs      2670:                             (long long)data));
1.25      eeh      2671: #ifdef HWREF
1.225     nakayama 2672:                        if (data & TLB_ACCESS) {
1.4       eeh      2673:                                changed |= 1;
1.225     nakayama 2674:                                data &= ~TLB_ACCESS;
                   2675:                        }
1.25      eeh      2676: #else
                   2677:                        if (data < 0)
                   2678:                                changed |= 1;
                   2679:                        data = 0;
1.1       eeh      2680: #endif
1.158     martin   2681:                        rv = pseg_set(pmap, va, data, 0);
                   2682:                        if (rv & 1)
                   2683:                                panic("pmap_clear_reference: pseg_set needs"
                   2684:                                    " spare! rv=%d\n", rv);
1.216     martin   2685:                        if (pmap_is_on_mmu(pmap)) {
1.210     martin   2686:                                KASSERT(pmap_ctx(pmap)>=0);
1.212     nakayama 2687:                                tsb_invalidate(va, pmap);
                   2688:                                tlb_flush_pte(va, pmap);
1.41      eeh      2689:                        }
1.225     nakayama 2690:                        if (pv->pv_va & PV_REF) {
1.4       eeh      2691:                                changed |= 1;
1.225     nakayama 2692:                                pv->pv_va &= ~PV_REF;
                   2693:                        }
1.25      eeh      2694:                }
                   2695:        }
1.255     mrg      2696:        dcache_flush_page_all(VM_PAGE_TO_PHYS(pg));
1.1       eeh      2697:        pv_check();
1.5       eeh      2698: #ifdef DEBUG
1.221     martin   2699:        if (pmap_is_referenced_locked(pg)) {
1.266     uebayasi 2700:                pv = &md->mdpg_pvh;
1.215     martin   2701:                printf("pmap_clear_reference(): %p still referenced "
                   2702:                        "(pmap = %p, ctx = %d)\n", pg, pv->pv_pmap,
                   2703:                        pv->pv_pmap ? pmap_ctx(pv->pv_pmap) : 0);
1.5       eeh      2704:                Debugger();
                   2705:        }
1.127     chs      2706:        DPRINTF(PDB_CHANGEPROT|PDB_REF,
                   2707:            ("pmap_clear_reference: pg %p %s\n", pg,
                   2708:             (changed ? "was referenced" : "was not referenced")));
1.43      eeh      2709:        if (referenced != changed) {
1.127     chs      2710:                printf("pmap_clear_reference: referenced %d changed %d\n",
                   2711:                       referenced, changed);
1.43      eeh      2712:                Debugger();
1.203     ad       2713:        } else {
                   2714:                mutex_exit(&pmap_lock);
                   2715:                return (referenced);
                   2716:        }
1.25      eeh      2717: #endif
1.203     ad       2718:        mutex_exit(&pmap_lock);
1.4       eeh      2719:        return (changed);
1.1       eeh      2720: }
                   2721:
1.185     thorpej  2722: bool
1.232     dsl      2723: pmap_is_modified(struct vm_page *pg)
1.1       eeh      2724: {
1.266     uebayasi 2725:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127     chs      2726:        pv_entry_t pv, npv;
1.221     martin   2727:        bool res = false;
                   2728:
1.1       eeh      2729:        /* Check if any mapping has been modified */
1.266     uebayasi 2730:        pv = &md->mdpg_pvh;
1.127     chs      2731:        if (pv->pv_va & PV_MOD)
1.221     martin   2732:                res = true;
1.1       eeh      2733: #ifdef HWREF
1.127     chs      2734: #ifdef DEBUG
1.17      eeh      2735:        if (pv->pv_next && !pv->pv_pmap) {
                   2736:                printf("pmap_is_modified: npv but no pmap for pv %p\n", pv);
                   2737:                Debugger();
                   2738:        }
                   2739: #endif
1.221     martin   2740:        if (!res && pv->pv_pmap != NULL) {
                   2741:                mutex_enter(&pmap_lock);
                   2742:                for (npv = pv; !res && npv && npv->pv_pmap;
1.127     chs      2743:                     npv = npv->pv_next) {
1.1       eeh      2744:                        int64_t data;
1.127     chs      2745:
                   2746:                        data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
1.158     martin   2747:                        KASSERT(data & TLB_V);
1.127     chs      2748:                        if (data & TLB_MODIFY)
1.221     martin   2749:                                res = true;
1.127     chs      2750:
1.38      eeh      2751:                        /* Migrate modify info to head pv */
1.225     nakayama 2752:                        if (npv->pv_va & PV_MOD) {
1.221     martin   2753:                                res = true;
1.225     nakayama 2754:                                npv->pv_va &= ~PV_MOD;
                   2755:                        }
1.1       eeh      2756:                }
1.221     martin   2757:                /* Save modify info */
                   2758:                if (res)
                   2759:                        pv->pv_va |= PV_MOD;
1.4       eeh      2760: #ifdef DEBUG
1.221     martin   2761:                if (res)
                   2762:                        pv->pv_va |= PV_WE;
1.4       eeh      2763: #endif
1.221     martin   2764:                mutex_exit(&pmap_lock);
                   2765:        }
1.237     martin   2766: #endif
1.1       eeh      2767:
1.221     martin   2768:        DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_is_modified(%p) = %d\n", pg,
                   2769:            res));
1.1       eeh      2770:        pv_check();
1.221     martin   2771:        return res;
1.1       eeh      2772: }
                   2773:
1.221     martin   2774: /*
                   2775:  * Variant of pmap_is_reference() where caller already holds pmap_lock
                   2776:  */
                   2777: static bool
                   2778: pmap_is_referenced_locked(struct vm_page *pg)
1.1       eeh      2779: {
1.266     uebayasi 2780:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127     chs      2781:        pv_entry_t pv, npv;
1.221     martin   2782:        bool res = false;
                   2783:
                   2784:        KASSERT(mutex_owned(&pmap_lock));
1.1       eeh      2785:
                   2786:        /* Check if any mapping has been referenced */
1.266     uebayasi 2787:        pv = &md->mdpg_pvh;
1.127     chs      2788:        if (pv->pv_va & PV_REF)
1.221     martin   2789:                return true;
                   2790:
1.156     pk       2791: #ifdef HWREF
1.127     chs      2792: #ifdef DEBUG
1.17      eeh      2793:        if (pv->pv_next && !pv->pv_pmap) {
                   2794:                printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv);
                   2795:                Debugger();
                   2796:        }
                   2797: #endif
1.221     martin   2798:        if (pv->pv_pmap == NULL)
                   2799:                return false;
                   2800:
                   2801:        for (npv = pv; npv; npv = npv->pv_next) {
                   2802:                int64_t data;
1.156     pk       2803:
1.221     martin   2804:                data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
                   2805:                KASSERT(data & TLB_V);
                   2806:                if (data & TLB_ACCESS)
                   2807:                        res = true;
1.127     chs      2808:
1.221     martin   2809:                /* Migrate ref info to head pv */
1.225     nakayama 2810:                if (npv->pv_va & PV_REF) {
1.221     martin   2811:                        res = true;
1.225     nakayama 2812:                        npv->pv_va &= ~PV_REF;
                   2813:                }
1.221     martin   2814:        }
1.38      eeh      2815:        /* Save ref info */
1.221     martin   2816:        if (res)
1.127     chs      2817:                pv->pv_va |= PV_REF;
1.1       eeh      2818: #endif
                   2819:
1.127     chs      2820:        DPRINTF(PDB_CHANGEPROT|PDB_REF,
1.221     martin   2821:                ("pmap_is_referenced(%p) = %d\n", pg, res));
                   2822:        pv_check();
                   2823:        return res;
                   2824: }
                   2825:
                   2826: bool
                   2827: pmap_is_referenced(struct vm_page *pg)
                   2828: {
1.266     uebayasi 2829:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.221     martin   2830:        pv_entry_t pv;
                   2831:        bool res = false;
                   2832:
                   2833:        /* Check if any mapping has been referenced */
1.266     uebayasi 2834:        pv = &md->mdpg_pvh;
1.221     martin   2835:        if (pv->pv_va & PV_REF)
                   2836:                return true;
                   2837:
                   2838: #ifdef HWREF
                   2839: #ifdef DEBUG
                   2840:        if (pv->pv_next && !pv->pv_pmap) {
                   2841:                printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv);
                   2842:                Debugger();
                   2843:        }
                   2844: #endif
                   2845:        if (pv->pv_pmap != NULL) {
                   2846:                mutex_enter(&pmap_lock);
                   2847:                res = pmap_is_referenced_locked(pg);
                   2848:                mutex_exit(&pmap_lock);
                   2849:        }
                   2850: #endif
                   2851:
                   2852:        DPRINTF(PDB_CHANGEPROT|PDB_REF,
                   2853:                ("pmap_is_referenced(%p) = %d\n", pg, res));
1.1       eeh      2854:        pv_check();
1.221     martin   2855:        return res;
1.1       eeh      2856: }
                   2857:
                   2858:
                   2859:
                   2860: /*
1.40      thorpej  2861:  *     Routine:        pmap_unwire
                   2862:  *     Function:       Clear the wired attribute for a map/virtual-address
1.1       eeh      2863:  *                     pair.
                   2864:  *     In/out conditions:
                   2865:  *                     The mapping must already exist in the pmap.
                   2866:  */
                   2867: void
1.232     dsl      2868: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1       eeh      2869: {
                   2870:        int64_t data;
1.158     martin   2871:        int rv;
1.127     chs      2872:
                   2873:        DPRINTF(PDB_MMU_STEAL, ("pmap_unwire(%p, %lx)\n", pmap, va));
1.1       eeh      2874:
                   2875: #ifdef DEBUG
                   2876:        /*
                   2877:         * Is this part of the permanent 4MB mapping?
                   2878:         */
1.156     pk       2879:        if (pmap == pmap_kernel() && va >= ktext &&
1.98      eeh      2880:                va < roundup(ekdata, 4*MEG)) {
1.156     pk       2881:                prom_printf("pmap_unwire: va=%08x in locked TLB\n", va);
                   2882:                prom_abort();
1.1       eeh      2883:                return;
                   2884:        }
1.127     chs      2885: #endif
                   2886:        data = pseg_get(pmap, va & PV_VAMASK);
1.158     martin   2887:        KASSERT(data & TLB_V);
1.40      thorpej  2888:        data &= ~TLB_TSB_LOCK;
1.158     martin   2889:        rv = pseg_set(pmap, va & PV_VAMASK, data, 0);
                   2890:        if (rv & 1)
                   2891:                panic("pmap_unwire: pseg_set needs spare! rv=%d\n", rv);
1.1       eeh      2892:        pv_check();
                   2893: }
                   2894:
                   2895: /*
                   2896:  * Lower the protection on the specified physical page.
                   2897:  *
                   2898:  * Never enable writing as it will break COW
                   2899:  */
1.4       eeh      2900:
                   2901: void
1.232     dsl      2902: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1       eeh      2903: {
1.266     uebayasi 2904:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127     chs      2905:        int64_t clear, set;
                   2906:        int64_t data = 0;
1.158     martin   2907:        int rv;
1.225     nakayama 2908:        pv_entry_t pv, npv, freepv = NULL;
1.127     chs      2909:        struct pmap *pmap;
                   2910:        vaddr_t va;
1.185     thorpej  2911:        bool needflush = FALSE;
1.1       eeh      2912:
1.127     chs      2913:        DPRINTF(PDB_CHANGEPROT,
                   2914:            ("pmap_page_protect: pg %p prot %x\n", pg, prot));
1.1       eeh      2915:
1.203     ad       2916:        mutex_enter(&pmap_lock);
1.266     uebayasi 2917:        pv = &md->mdpg_pvh;
1.1       eeh      2918:        if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
                   2919:                /* copy_on_write */
                   2920:
                   2921:                set = TLB_V;
1.9       eeh      2922:                clear = TLB_REAL_W|TLB_W;
1.38      eeh      2923:                if (VM_PROT_EXECUTE & prot)
1.1       eeh      2924:                        set |= TLB_EXEC;
                   2925:                else
                   2926:                        clear |= TLB_EXEC;
1.38      eeh      2927:                if (VM_PROT_EXECUTE == prot)
1.1       eeh      2928:                        set |= TLB_EXEC_ONLY;
                   2929:
1.156     pk       2930: #ifdef DEBUG
1.17      eeh      2931:                if (pv->pv_next && !pv->pv_pmap) {
1.127     chs      2932:                        printf("pmap_page_protect: no pmap for pv %p\n", pv);
1.17      eeh      2933:                        Debugger();
                   2934:                }
                   2935: #endif
1.1       eeh      2936:                if (pv->pv_pmap != NULL) {
                   2937:                        for (; pv; pv = pv->pv_next) {
1.127     chs      2938:                                pmap = pv->pv_pmap;
                   2939:                                va = pv->pv_va & PV_VAMASK;
                   2940:
                   2941:                                DPRINTF(PDB_CHANGEPROT | PDB_REF,
                   2942:                                        ("pmap_page_protect: "
                   2943:                                         "RO va %p of pg %p...\n",
                   2944:                                         (void *)(u_long)pv->pv_va, pg));
                   2945:                                data = pseg_get(pmap, va);
1.158     martin   2946:                                KASSERT(data & TLB_V);
1.73      eeh      2947:
                   2948:                                /* Save REF/MOD info */
1.127     chs      2949:                                if (data & TLB_ACCESS)
                   2950:                                        pv->pv_va |= PV_REF;
                   2951:                                if (data & TLB_MODIFY)
1.73      eeh      2952:                                        pv->pv_va |= PV_MOD;
                   2953:
1.127     chs      2954:                                data &= ~clear;
                   2955:                                data |= set;
1.158     martin   2956:                                rv = pseg_set(pmap, va, data, 0);
                   2957:                                if (rv & 1)
                   2958:                                        panic("pmap_page_protect: "
                   2959:                                               "pseg_set needs spare! rv=%d\n",
                   2960:                                               rv);
1.216     martin   2961:                                if (pmap_is_on_mmu(pmap)) {
1.210     martin   2962:                                        KASSERT(pmap_ctx(pmap)>=0);
1.212     nakayama 2963:                                        tsb_invalidate(va, pmap);
                   2964:                                        tlb_flush_pte(va, pmap);
1.41      eeh      2965:                                }
1.1       eeh      2966:                        }
                   2967:                }
                   2968:        } else {
                   2969:                /* remove mappings */
1.127     chs      2970:                DPRINTF(PDB_REMOVE,
                   2971:                        ("pmap_page_protect: demapping pg %p\n", pg));
                   2972:
1.159     chs      2973:                /* First remove the entire list of continuation pv's */
1.1       eeh      2974:                for (npv = pv->pv_next; npv; npv = pv->pv_next) {
1.127     chs      2975:                        pmap = npv->pv_pmap;
                   2976:                        va = npv->pv_va & PV_VAMASK;
                   2977:
1.1       eeh      2978:                        /* We're removing npv from pv->pv_next */
1.156     pk       2979:                        DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE,
1.127     chs      2980:                                ("pmap_page_protect: "
                   2981:                                 "demap va %p of pg %p in pmap %p...\n",
                   2982:                                 (void *)(u_long)va, pg, pmap));
                   2983:
1.1       eeh      2984:                        /* clear the entry in the page table */
1.127     chs      2985:                        data = pseg_get(pmap, va);
1.158     martin   2986:                        KASSERT(data & TLB_V);
1.1       eeh      2987:
                   2988:                        /* Save ref/mod info */
1.127     chs      2989:                        if (data & TLB_ACCESS)
1.225     nakayama 2990:                                pv->pv_va |= PV_REF;
1.127     chs      2991:                        if (data & TLB_MODIFY)
1.225     nakayama 2992:                                pv->pv_va |= PV_MOD;
1.1       eeh      2993:                        /* Clear mapping */
1.158     martin   2994:                        rv = pseg_set(pmap, va, 0, 0);
                   2995:                        if (rv & 1)
                   2996:                                panic("pmap_page_protect: pseg_set needs"
                   2997:                                     " spare! rv=%d\n", rv);
1.216     martin   2998:                        if (pmap_is_on_mmu(pmap)) {
1.210     martin   2999:                                KASSERT(pmap_ctx(pmap)>=0);
1.212     nakayama 3000:                                tsb_invalidate(va, pmap);
                   3001:                                tlb_flush_pte(va, pmap);
1.41      eeh      3002:                        }
1.127     chs      3003:                        if (pmap->pm_refs > 0) {
                   3004:                                needflush = TRUE;
                   3005:                        }
1.73      eeh      3006:
1.1       eeh      3007:                        /* free the pv */
                   3008:                        pv->pv_next = npv->pv_next;
1.203     ad       3009:                        npv->pv_next = freepv;
                   3010:                        freepv = npv;
1.1       eeh      3011:                }
                   3012:
                   3013:                /* Then remove the primary pv */
1.156     pk       3014: #ifdef DEBUG
1.17      eeh      3015:                if (pv->pv_next && !pv->pv_pmap) {
1.127     chs      3016:                        printf("pmap_page_protect: no pmap for pv %p\n", pv);
1.17      eeh      3017:                        Debugger();
                   3018:                }
                   3019: #endif
1.1       eeh      3020:                if (pv->pv_pmap != NULL) {
1.127     chs      3021:                        pmap = pv->pv_pmap;
                   3022:                        va = pv->pv_va & PV_VAMASK;
                   3023:
                   3024:                        DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE,
                   3025:                                ("pmap_page_protect: "
                   3026:                                 "demap va %p of pg %p from pm %p...\n",
                   3027:                                 (void *)(u_long)va, pg, pmap));
                   3028:
                   3029:                        data = pseg_get(pmap, va);
1.158     martin   3030:                        KASSERT(data & TLB_V);
1.1       eeh      3031:                        /* Save ref/mod info */
1.156     pk       3032:                        if (data & TLB_ACCESS)
1.1       eeh      3033:                                pv->pv_va |= PV_REF;
1.127     chs      3034:                        if (data & TLB_MODIFY)
1.1       eeh      3035:                                pv->pv_va |= PV_MOD;
1.158     martin   3036:                        rv = pseg_set(pmap, va, 0, 0);
                   3037:                        if (rv & 1)
                   3038:                                panic("pmap_page_protect: pseg_set needs"
                   3039:                                    " spare! rv=%d\n", rv);
1.216     martin   3040:                        if (pmap_is_on_mmu(pmap)) {
1.210     martin   3041:                                KASSERT(pmap_ctx(pmap)>=0);
1.212     nakayama 3042:                                tsb_invalidate(va, pmap);
                   3043:                                tlb_flush_pte(va, pmap);
1.41      eeh      3044:                        }
1.127     chs      3045:                        if (pmap->pm_refs > 0) {
                   3046:                                needflush = TRUE;
                   3047:                        }
1.1       eeh      3048:                        npv = pv->pv_next;
                   3049:                        /* dump the first pv */
                   3050:                        if (npv) {
                   3051:                                /* First save mod/ref bits */
1.159     chs      3052:                                pv->pv_pmap = npv->pv_pmap;
1.225     nakayama 3053:                                pv->pv_va = (pv->pv_va & PV_MASK) | npv->pv_va;
1.73      eeh      3054:                                pv->pv_next = npv->pv_next;
1.203     ad       3055:                                npv->pv_next = freepv;
                   3056:                                freepv = npv;
1.1       eeh      3057:                        } else {
                   3058:                                pv->pv_pmap = NULL;
                   3059:                                pv->pv_next = NULL;
                   3060:                        }
                   3061:                }
1.255     mrg      3062:                if (needflush)
                   3063:                        dcache_flush_page_all(VM_PAGE_TO_PHYS(pg));
1.1       eeh      3064:        }
                   3065:        /* We should really only flush the pages we demapped. */
                   3066:        pv_check();
1.203     ad       3067:        mutex_exit(&pmap_lock);
                   3068:
                   3069:        /* Catch up on deferred frees. */
                   3070:        for (; freepv != NULL; freepv = npv) {
                   3071:                npv = freepv->pv_next;
                   3072:                pool_cache_put(&pmap_pv_cache, freepv);
                   3073:        }
1.1       eeh      3074: }
                   3075:
1.135     martin   3076: #ifdef PMAP_COUNT_DEBUG
1.1       eeh      3077: /*
                   3078:  * count pages in pmap -- this can be slow.
                   3079:  */
                   3080: int
1.171     cdi      3081: pmap_count_res(struct pmap *pm)
1.1       eeh      3082: {
1.127     chs      3083:        int64_t data;
1.8       eeh      3084:        paddr_t *pdir, *ptbl;
1.127     chs      3085:        int i, j, k, n;
                   3086:
1.14      eeh      3087:        /* Don't want one of these pages reused while we're reading it. */
1.203     ad       3088:        mutex_enter(&pmap_lock);
1.1       eeh      3089:        n = 0;
1.127     chs      3090:        for (i = 0; i < STSZ; i++) {
                   3091:                pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
                   3092:                                               ASI_PHYS_CACHED);
                   3093:                if (pdir == NULL) {
                   3094:                        continue;
                   3095:                }
                   3096:                for (k = 0; k < PDSZ; k++) {
                   3097:                        ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k],
                   3098:                                                       ASI_PHYS_CACHED);
                   3099:                        if (ptbl == NULL) {
                   3100:                                continue;
                   3101:                        }
                   3102:                        for (j = 0; j < PTSZ; j++) {
                   3103:                                data = (int64_t)ldxa((vaddr_t)&ptbl[j],
                   3104:                                                     ASI_PHYS_CACHED);
                   3105:                                if (data & TLB_V)
                   3106:                                        n++;
1.112     chs      3107:                        }
                   3108:                }
                   3109:        }
1.203     ad       3110:        mutex_exit(&pmap_lock);
1.135     martin   3111:
1.156     pk       3112:        if (pm->pm_stats.resident_count != n)
                   3113:                printf("pmap_count_resident: pm_stats = %ld, counted: %d\n",
1.135     martin   3114:                    pm->pm_stats.resident_count, n);
                   3115:
1.112     chs      3116:        return n;
                   3117: }
                   3118:
                   3119: /*
                   3120:  * count wired pages in pmap -- this can be slow.
                   3121:  */
                   3122: int
1.171     cdi      3123: pmap_count_wired(struct pmap *pm)
1.112     chs      3124: {
1.127     chs      3125:        int64_t data;
1.112     chs      3126:        paddr_t *pdir, *ptbl;
1.127     chs      3127:        int i, j, k, n;
                   3128:
1.112     chs      3129:        /* Don't want one of these pages reused while we're reading it. */
1.273     rmind    3130:        mutex_enter(&pmap_lock);        /* XXX uvmplock */
1.112     chs      3131:        n = 0;
                   3132:        for (i = 0; i < STSZ; i++) {
1.127     chs      3133:                pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
                   3134:                                               ASI_PHYS_CACHED);
                   3135:                if (pdir == NULL) {
                   3136:                        continue;
                   3137:                }
                   3138:                for (k = 0; k < PDSZ; k++) {
                   3139:                        ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k],
                   3140:                                                       ASI_PHYS_CACHED);
                   3141:                        if (ptbl == NULL) {
                   3142:                                continue;
                   3143:                        }
                   3144:                        for (j = 0; j < PTSZ; j++) {
                   3145:                                data = (int64_t)ldxa((vaddr_t)&ptbl[j],
                   3146:                                                     ASI_PHYS_CACHED);
                   3147:                                if (data & TLB_TSB_LOCK)
                   3148:                                        n++;
1.1       eeh      3149:                        }
                   3150:                }
                   3151:        }
1.273     rmind    3152:        mutex_exit(&pmap_lock); /* XXX uvmplock */
1.135     martin   3153:
1.156     pk       3154:        if (pm->pm_stats.wired_count != n)
                   3155:                printf("pmap_count_wired: pm_stats = %ld, counted: %d\n",
1.135     martin   3156:                    pm->pm_stats.wired_count, n);
                   3157:
1.1       eeh      3158:        return n;
                   3159: }
1.135     martin   3160: #endif /* PMAP_COUNT_DEBUG */
1.1       eeh      3161:
1.127     chs      3162: void
                   3163: pmap_procwr(struct proc *p, vaddr_t va, size_t len)
                   3164: {
                   3165:
                   3166:        blast_icache();
                   3167: }
                   3168:
1.1       eeh      3169: /*
1.127     chs      3170:  * Allocate a hardware context to the given pmap.
1.1       eeh      3171:  */
1.212     nakayama 3172: static int
1.171     cdi      3173: ctx_alloc(struct pmap *pm)
1.1       eeh      3174: {
1.127     chs      3175:        int i, ctx;
                   3176:
1.132     chs      3177:        KASSERT(pm != pmap_kernel());
                   3178:        KASSERT(pm == curproc->p_vmspace->vm_map.pmap);
1.262     mrg      3179:        mutex_enter(&curcpu()->ci_ctx_lock);
1.210     martin   3180:        ctx = curcpu()->ci_pmap_next_ctx++;
1.127     chs      3181:
                   3182:        /*
                   3183:         * if we have run out of contexts, remove all user entries from
                   3184:         * the TSB, TLB and dcache and start over with context 1 again.
                   3185:         */
1.1       eeh      3186:
1.210     martin   3187:        if (ctx == curcpu()->ci_numctx) {
1.247     mrg      3188:                DPRINTF(PDB_CTX_ALLOC|PDB_CTX_FLUSHALL,
1.212     nakayama 3189:                        ("ctx_alloc: cpu%d run out of contexts %d\n",
                   3190:                         cpu_number(), curcpu()->ci_numctx));
1.127     chs      3191:                write_user_windows();
1.210     martin   3192:                while (!LIST_EMPTY(&curcpu()->ci_pmap_ctxlist)) {
1.212     nakayama 3193: #ifdef MULTIPROCESSOR
1.213     nakayama 3194:                        KASSERT(pmap_ctx(LIST_FIRST(&curcpu()->ci_pmap_ctxlist)) != 0);
1.257     mrg      3195: #endif
1.212     nakayama 3196:                        ctx_free(LIST_FIRST(&curcpu()->ci_pmap_ctxlist),
                   3197:                                 curcpu());
1.25      eeh      3198:                }
1.127     chs      3199:                for (i = TSBENTS - 1; i >= 0; i--) {
1.210     martin   3200:                        if (TSB_TAG_CTX(curcpu()->ci_tsb_dmmu[i].tag) != 0) {
                   3201:                                clrx(&curcpu()->ci_tsb_dmmu[i].data);
1.143     chs      3202:                        }
1.210     martin   3203:                        if (TSB_TAG_CTX(curcpu()->ci_tsb_immu[i].tag) != 0) {
                   3204:                                clrx(&curcpu()->ci_tsb_immu[i].data);
1.127     chs      3205:                        }
                   3206:                }
1.252     mrg      3207:                sp_tlb_flush_all();
1.127     chs      3208:                ctx = 1;
1.210     martin   3209:                curcpu()->ci_pmap_next_ctx = 2;
1.127     chs      3210:        }
1.210     martin   3211:        curcpu()->ci_ctxbusy[ctx] = pm->pm_physaddr;
1.257     mrg      3212:        LIST_INSERT_HEAD(&curcpu()->ci_pmap_ctxlist, pm, pm_list[cpu_number()]);
1.210     martin   3213:        pmap_ctx(pm) = ctx;
1.262     mrg      3214:        mutex_exit(&curcpu()->ci_ctx_lock);
1.247     mrg      3215:        DPRINTF(PDB_CTX_ALLOC, ("ctx_alloc: cpu%d allocated ctx %d\n",
                   3216:                cpu_number(), ctx));
1.127     chs      3217:        return ctx;
1.1       eeh      3218: }
                   3219:
                   3220: /*
                   3221:  * Give away a context.
                   3222:  */
1.212     nakayama 3223: static void
                   3224: ctx_free(struct pmap *pm, struct cpu_info *ci)
                   3225: {
                   3226:        int oldctx;
1.257     mrg      3227:        int cpunum;
1.212     nakayama 3228:
1.263     mrg      3229:        KASSERT(mutex_owned(&ci->ci_ctx_lock));
1.257     mrg      3230:
                   3231: #ifdef MULTIPROCESSOR
                   3232:        cpunum = ci->ci_index;
                   3233: #else
                   3234:        /* Give the compiler a hint.. */
                   3235:        cpunum = 0;
                   3236: #endif
                   3237:
                   3238:        oldctx = pm->pm_ctx[cpunum];
1.212     nakayama 3239:        if (oldctx == 0)
                   3240:                return;
                   3241:
                   3242: #ifdef DIAGNOSTIC
                   3243:        if (pm == pmap_kernel())
                   3244:                panic("ctx_free: freeing kernel context");
                   3245:        if (ci->ci_ctxbusy[oldctx] == 0)
                   3246:                printf("ctx_free: freeing free context %d\n", oldctx);
                   3247:        if (ci->ci_ctxbusy[oldctx] != pm->pm_physaddr) {
                   3248:                printf("ctx_free: freeing someone else's context\n "
                   3249:                       "ctxbusy[%d] = %p, pm(%p)->pm_ctx = %p\n",
                   3250:                       oldctx, (void *)(u_long)ci->ci_ctxbusy[oldctx], pm,
                   3251:                       (void *)(u_long)pm->pm_physaddr);
                   3252:                Debugger();
                   3253:        }
                   3254: #endif
                   3255:        /* We should verify it has not been stolen and reallocated... */
1.247     mrg      3256:        DPRINTF(PDB_CTX_ALLOC, ("ctx_free: cpu%d freeing ctx %d\n",
                   3257:                cpu_number(), oldctx));
1.212     nakayama 3258:        ci->ci_ctxbusy[oldctx] = 0UL;
1.257     mrg      3259:        pm->pm_ctx[cpunum] = 0;
                   3260:        LIST_REMOVE(pm, pm_list[cpunum]);
1.212     nakayama 3261: }
1.1       eeh      3262:
                   3263: /*
1.41      eeh      3264:  * Enter the pmap and virtual address into the
                   3265:  * physical to virtual map table.
1.86      eeh      3266:  *
                   3267:  * We enter here with the pmap locked.
1.41      eeh      3268:  */
1.127     chs      3269:
1.41      eeh      3270: void
1.170     cdi      3271: pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *pg,
                   3272:              pv_entry_t npv)
1.41      eeh      3273: {
1.266     uebayasi 3274:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127     chs      3275:        pv_entry_t pvh;
                   3276:
1.203     ad       3277:        KASSERT(mutex_owned(&pmap_lock));
                   3278:
1.266     uebayasi 3279:        pvh = &md->mdpg_pvh;
1.127     chs      3280:        DPRINTF(PDB_ENTER, ("pmap_enter: pvh %p: was %lx/%p/%p\n",
                   3281:            pvh, pvh->pv_va, pvh->pv_pmap, pvh->pv_next));
                   3282:        if (pvh->pv_pmap == NULL) {
1.41      eeh      3283:
                   3284:                /*
                   3285:                 * No entries yet, use header as the first entry
                   3286:                 */
1.127     chs      3287:                DPRINTF(PDB_ENTER, ("pmap_enter: first pv: pmap %p va %lx\n",
                   3288:                    pmap, va));
                   3289:                ENTER_STAT(firstpv);
                   3290:                PV_SETVA(pvh, va);
                   3291:                pvh->pv_pmap = pmap;
                   3292:                pvh->pv_next = NULL;
                   3293:                KASSERT(npv == NULL);
1.41      eeh      3294:        } else {
1.127     chs      3295:                if (pg->loan_count == 0 && !(pvh->pv_va & PV_ALIAS)) {
                   3296:
1.41      eeh      3297:                        /*
                   3298:                         * There is at least one other VA mapping this page.
                   3299:                         * Check if they are cache index compatible. If not
                   3300:                         * remove all mappings, flush the cache and set page
                   3301:                         * to be mapped uncached. Caching will be restored
                   3302:                         * when pages are mapped compatible again.
                   3303:                         */
1.127     chs      3304:                        if ((pvh->pv_va ^ va) & VA_ALIAS_MASK) {
                   3305:                                pvh->pv_va |= PV_ALIAS;
1.86      eeh      3306:                                pmap_page_cache(pmap, pa, 0);
1.127     chs      3307:                                ENTER_STAT(ci);
1.41      eeh      3308:                        }
                   3309:                }
1.127     chs      3310:
1.41      eeh      3311:                /*
                   3312:                 * There is at least one other VA mapping this page.
                   3313:                 * Place this entry after the header.
                   3314:                 */
                   3315:
1.127     chs      3316:                DPRINTF(PDB_ENTER, ("pmap_enter: new pv: pmap %p va %lx\n",
                   3317:                    pmap, va));
1.41      eeh      3318:                npv->pv_pmap = pmap;
1.127     chs      3319:                npv->pv_va = va & PV_VAMASK;
                   3320:                npv->pv_next = pvh->pv_next;
                   3321:                pvh->pv_next = npv;
                   3322:
1.177     martin   3323:                if (!npv->pv_next) {
1.127     chs      3324:                        ENTER_STAT(secondpv);
1.177     martin   3325:                }
1.41      eeh      3326:        }
                   3327: }
                   3328:
                   3329: /*
1.1       eeh      3330:  * Remove a physical to virtual address translation.
                   3331:  */
                   3332:
1.127     chs      3333: pv_entry_t
1.170     cdi      3334: pmap_remove_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg)
1.1       eeh      3335: {
1.266     uebayasi 3336:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127     chs      3337:        pv_entry_t pvh, npv, pv;
                   3338:        int64_t data = 0;
1.1       eeh      3339:
1.203     ad       3340:        KASSERT(mutex_owned(&pmap_lock));
                   3341:
1.266     uebayasi 3342:        pvh = &md->mdpg_pvh;
1.127     chs      3343:
                   3344:        DPRINTF(PDB_REMOVE, ("pmap_remove_pv(pm=%p, va=%p, pg=%p)\n", pmap,
                   3345:            (void *)(u_long)va, pg));
1.1       eeh      3346:        pv_check();
1.127     chs      3347:
1.1       eeh      3348:        /*
1.127     chs      3349:         * Remove page from the PV table.
1.1       eeh      3350:         * If it is the first entry on the list, it is actually
                   3351:         * in the header and we must copy the following entry up
                   3352:         * to the header.  Otherwise we must search the list for
                   3353:         * the entry.  In either case we free the now unused entry.
                   3354:         */
1.127     chs      3355:        if (pmap == pvh->pv_pmap && PV_MATCH(pvh, va)) {
                   3356:                data = pseg_get(pvh->pv_pmap, pvh->pv_va & PV_VAMASK);
1.158     martin   3357:                KASSERT(data & TLB_V);
1.127     chs      3358:                npv = pvh->pv_next;
1.1       eeh      3359:                if (npv) {
                   3360:                        /* First save mod/ref bits */
1.127     chs      3361:                        pvh->pv_va = (pvh->pv_va & PV_MASK) | npv->pv_va;
                   3362:                        pvh->pv_next = npv->pv_next;
                   3363:                        pvh->pv_pmap = npv->pv_pmap;
1.1       eeh      3364:                } else {
1.127     chs      3365:                        pvh->pv_pmap = NULL;
                   3366:                        pvh->pv_next = NULL;
                   3367:                        pvh->pv_va &= (PV_REF|PV_MOD);
1.1       eeh      3368:                }
1.159     chs      3369:                REMOVE_STAT(pvfirst);
1.1       eeh      3370:        } else {
1.127     chs      3371:                for (pv = pvh, npv = pvh->pv_next; npv;
                   3372:                     pv = npv, npv = npv->pv_next) {
1.159     chs      3373:                        REMOVE_STAT(pvsearch);
1.127     chs      3374:                        if (pmap == npv->pv_pmap && PV_MATCH(npv, va))
                   3375:                                break;
1.1       eeh      3376:                }
                   3377:                pv->pv_next = npv->pv_next;
1.127     chs      3378:                data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
1.158     martin   3379:                KASSERT(data & TLB_V);
1.1       eeh      3380:        }
                   3381:
                   3382:        /* Save ref/mod info */
1.156     pk       3383:        if (data & TLB_ACCESS)
1.127     chs      3384:                pvh->pv_va |= PV_REF;
                   3385:        if (data & TLB_MODIFY)
                   3386:                pvh->pv_va |= PV_MOD;
1.86      eeh      3387:
                   3388:        /* Check to see if the alias went away */
1.127     chs      3389:        if (pvh->pv_va & PV_ALIAS) {
                   3390:                pvh->pv_va &= ~PV_ALIAS;
                   3391:                for (pv = pvh; pv; pv = pv->pv_next) {
                   3392:                        if ((pv->pv_va ^ pvh->pv_va) & VA_ALIAS_MASK) {
                   3393:                                pvh->pv_va |= PV_ALIAS;
                   3394:                                break;
1.86      eeh      3395:                        }
                   3396:                }
1.127     chs      3397:                if (!(pvh->pv_va & PV_ALIAS))
                   3398:                        pmap_page_cache(pmap, VM_PAGE_TO_PHYS(pg), 1);
1.86      eeh      3399:        }
1.1       eeh      3400:        pv_check();
1.127     chs      3401:        return npv;
1.41      eeh      3402: }
                   3403:
                   3404: /*
                   3405:  *     pmap_page_cache:
                   3406:  *
                   3407:  *     Change all mappings of a page to cached/uncached.
                   3408:  */
                   3409: void
1.170     cdi      3410: pmap_page_cache(struct pmap *pm, paddr_t pa, int mode)
1.41      eeh      3411: {
1.127     chs      3412:        struct vm_page *pg;
1.266     uebayasi 3413:        struct vm_page_md *md;
1.41      eeh      3414:        pv_entry_t pv;
1.127     chs      3415:        vaddr_t va;
1.158     martin   3416:        int rv;
1.73      eeh      3417:
1.271     mrg      3418: #if 0
                   3419:        /*
                   3420:         * Why is this?
                   3421:         */
                   3422:        if (CPU_ISSUN4US || CPU_ISSUN4V)
                   3423:                return;
                   3424: #endif
                   3425:
1.203     ad       3426:        KASSERT(mutex_owned(&pmap_lock));
                   3427:
1.127     chs      3428:        DPRINTF(PDB_ENTER, ("pmap_page_uncache(%llx)\n",
                   3429:            (unsigned long long)pa));
                   3430:        pg = PHYS_TO_VM_PAGE(pa);
1.266     uebayasi 3431:        md = VM_PAGE_TO_MD(pg);
                   3432:        pv = &md->mdpg_pvh;
1.41      eeh      3433:        while (pv) {
1.127     chs      3434:                va = pv->pv_va & PV_VAMASK;
1.86      eeh      3435:                if (pv->pv_va & PV_NC) {
1.158     martin   3436:                        int64_t data;
                   3437:
1.86      eeh      3438:                        /* Non-cached -- I/O mapping */
1.158     martin   3439:                        data = pseg_get(pv->pv_pmap, va);
                   3440:                        KASSERT(data & TLB_V);
                   3441:                        rv = pseg_set(pv->pv_pmap, va,
                   3442:                                     data & ~(TLB_CV|TLB_CP), 0);
                   3443:                        if (rv & 1)
                   3444:                                panic("pmap_page_cache: pseg_set needs"
                   3445:                                     " spare! rv=%d\n", rv);
1.86      eeh      3446:                } else if (mode && (!(pv->pv_va & PV_NVC))) {
1.158     martin   3447:                        int64_t data;
                   3448:
1.41      eeh      3449:                        /* Enable caching */
1.158     martin   3450:                        data = pseg_get(pv->pv_pmap, va);
                   3451:                        KASSERT(data & TLB_V);
                   3452:                        rv = pseg_set(pv->pv_pmap, va, data | TLB_CV, 0);
                   3453:                        if (rv & 1)
                   3454:                                panic("pmap_page_cache: pseg_set needs"
                   3455:                                    " spare! rv=%d\n", rv);
1.41      eeh      3456:                } else {
1.158     martin   3457:                        int64_t data;
                   3458:
1.41      eeh      3459:                        /* Disable caching */
1.158     martin   3460:                        data = pseg_get(pv->pv_pmap, va);
1.254     mrg      3461:                        KASSERT(data & TLB_V);
1.158     martin   3462:                        rv = pseg_set(pv->pv_pmap, va, data & ~TLB_CV, 0);
                   3463:                        if (rv & 1)
                   3464:                                panic("pmap_page_cache: pseg_set needs"
                   3465:                                    " spare! rv=%d\n", rv);
1.41      eeh      3466:                }
1.216     martin   3467:                if (pmap_is_on_mmu(pv->pv_pmap)) {
1.127     chs      3468:                        /* Force reload -- cache bits have changed */
1.210     martin   3469:                        KASSERT(pmap_ctx(pv->pv_pmap)>=0);
1.212     nakayama 3470:                        tsb_invalidate(va, pv->pv_pmap);
                   3471:                        tlb_flush_pte(va, pv->pv_pmap);
1.41      eeh      3472:                }
                   3473:                pv = pv->pv_next;
                   3474:        }
1.1       eeh      3475: }
                   3476:
1.263     mrg      3477: /*
                   3478:  * Some routines to allocate and free PTPs.
                   3479:  */
1.111     eeh      3480: static int
1.127     chs      3481: pmap_get_page(paddr_t *p)
1.1       eeh      3482: {
1.111     eeh      3483:        struct vm_page *pg;
                   3484:        paddr_t pa;
                   3485:
1.127     chs      3486:        if (uvm.page_init_done) {
                   3487:                pg = uvm_pagealloc(NULL, 0, NULL,
                   3488:                    UVM_PGA_ZERO | UVM_PGA_USERESERVE);
                   3489:                if (pg == NULL)
                   3490:                        return (0);
                   3491:                pa = VM_PAGE_TO_PHYS(pg);
1.111     eeh      3492:        } else {
                   3493:                if (!uvm_page_physget(&pa))
                   3494:                        return (0);
                   3495:                pmap_zero_page(pa);
1.47      eeh      3496:        }
1.111     eeh      3497:        *p = pa;
                   3498:        return (1);
1.1       eeh      3499: }
                   3500:
1.111     eeh      3501: static void
1.262     mrg      3502: pmap_free_page(paddr_t pa, sparc64_cpuset_t cs)
1.1       eeh      3503: {
1.111     eeh      3504:        struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
                   3505:
1.262     mrg      3506:        dcache_flush_page_cpuset(pa, cs);
1.127     chs      3507:        uvm_pagefree(pg);
1.1       eeh      3508: }
                   3509:
1.262     mrg      3510: static void
                   3511: pmap_free_page_noflush(paddr_t pa)
                   3512: {
                   3513:        struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
                   3514:
                   3515:        uvm_pagefree(pg);
                   3516: }
1.111     eeh      3517:
1.8       eeh      3518: #ifdef DDB
1.1       eeh      3519:
1.170     cdi      3520: void db_dump_pv(db_expr_t, int, db_expr_t, const char *);
1.1       eeh      3521: void
1.170     cdi      3522: db_dump_pv(db_expr_t addr, int have_addr, db_expr_t count, const char *modif)
1.1       eeh      3523: {
1.127     chs      3524:        struct vm_page *pg;
1.266     uebayasi 3525:        struct vm_page_md *md;
1.1       eeh      3526:        struct pv_entry *pv;
                   3527:
                   3528:        if (!have_addr) {
                   3529:                db_printf("Need addr for pv\n");
                   3530:                return;
                   3531:        }
                   3532:
1.127     chs      3533:        pg = PHYS_TO_VM_PAGE((paddr_t)addr);
                   3534:        if (pg == NULL) {
                   3535:                db_printf("page is not managed\n");
                   3536:                return;
                   3537:        }
1.266     uebayasi 3538:        md = VM_PAGE_TO_MD(pg);
                   3539:        for (pv = &md->mdpg_pvh; pv; pv = pv->pv_next)
1.81      fvdl     3540:                db_printf("pv@%p: next=%p pmap=%p va=0x%llx\n",
                   3541:                          pv, pv->pv_next, pv->pv_pmap,
                   3542:                          (unsigned long long)pv->pv_va);
1.1       eeh      3543: }
                   3544:
                   3545: #endif
                   3546:
1.43      eeh      3547: #ifdef DEBUG
                   3548: /*
1.111     eeh      3549:  * Test ref/modify handling.  */
1.170     cdi      3550: void pmap_testout(void);
1.43      eeh      3551: void
1.234     cegger   3552: pmap_testout(void)
1.43      eeh      3553: {
                   3554:        vaddr_t va;
                   3555:        volatile int *loc;
                   3556:        int val = 0;
1.230     martin   3557:        paddr_t pa;
1.43      eeh      3558:        struct vm_page *pg;
                   3559:        int ref, mod;
                   3560:
                   3561:        /* Allocate a page */
1.138     thorpej  3562:        va = (vaddr_t)(vmmap - PAGE_SIZE);
1.147     kleink   3563:        KASSERT(va != 0);
1.43      eeh      3564:        loc = (int*)va;
                   3565:
1.127     chs      3566:        pmap_get_page(&pa);
1.111     eeh      3567:        pg = PHYS_TO_VM_PAGE(pa);
1.45      thorpej  3568:        pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
1.108     chris    3569:        pmap_update(pmap_kernel());
1.43      eeh      3570:
                   3571:        /* Now clear reference and modify */
                   3572:        ref = pmap_clear_reference(pg);
                   3573:        mod = pmap_clear_modify(pg);
                   3574:        printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82      mrg      3575:               (void *)(u_long)va, (long)pa,
1.43      eeh      3576:               ref, mod);
                   3577:
                   3578:        /* Check it's properly cleared */
                   3579:        ref = pmap_is_referenced(pg);
                   3580:        mod = pmap_is_modified(pg);
                   3581:        printf("Checking cleared page: ref %d, mod %d\n",
                   3582:               ref, mod);
                   3583:
                   3584:        /* Reference page */
                   3585:        val = *loc;
                   3586:
                   3587:        ref = pmap_is_referenced(pg);
                   3588:        mod = pmap_is_modified(pg);
                   3589:        printf("Referenced page: ref %d, mod %d val %x\n",
                   3590:               ref, mod, val);
                   3591:
                   3592:        /* Now clear reference and modify */
                   3593:        ref = pmap_clear_reference(pg);
                   3594:        mod = pmap_clear_modify(pg);
                   3595:        printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82      mrg      3596:               (void *)(u_long)va, (long)pa,
1.43      eeh      3597:               ref, mod);
1.156     pk       3598:
1.43      eeh      3599:        /* Modify page */
                   3600:        *loc = 1;
                   3601:
                   3602:        ref = pmap_is_referenced(pg);
                   3603:        mod = pmap_is_modified(pg);
                   3604:        printf("Modified page: ref %d, mod %d\n",
                   3605:               ref, mod);
                   3606:
                   3607:        /* Now clear reference and modify */
                   3608:        ref = pmap_clear_reference(pg);
                   3609:        mod = pmap_clear_modify(pg);
                   3610:        printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82      mrg      3611:               (void *)(u_long)va, (long)pa,
1.43      eeh      3612:               ref, mod);
                   3613:
                   3614:        /* Check it's properly cleared */
                   3615:        ref = pmap_is_referenced(pg);
                   3616:        mod = pmap_is_modified(pg);
                   3617:        printf("Checking cleared page: ref %d, mod %d\n",
                   3618:               ref, mod);
                   3619:
                   3620:        /* Modify page */
                   3621:        *loc = 1;
                   3622:
                   3623:        ref = pmap_is_referenced(pg);
                   3624:        mod = pmap_is_modified(pg);
                   3625:        printf("Modified page: ref %d, mod %d\n",
1.73      eeh      3626:               ref, mod);
                   3627:
                   3628:        /* Check pmap_protect() */
                   3629:        pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
1.108     chris    3630:        pmap_update(pmap_kernel());
1.73      eeh      3631:        ref = pmap_is_referenced(pg);
                   3632:        mod = pmap_is_modified(pg);
                   3633:        printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
                   3634:               ref, mod);
                   3635:
                   3636:        /* Now clear reference and modify */
                   3637:        ref = pmap_clear_reference(pg);
                   3638:        mod = pmap_clear_modify(pg);
                   3639:        printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82      mrg      3640:               (void *)(u_long)va, (long)pa,
1.73      eeh      3641:               ref, mod);
                   3642:
                   3643:        /* Modify page */
                   3644:        pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
1.108     chris    3645:        pmap_update(pmap_kernel());
1.73      eeh      3646:        *loc = 1;
                   3647:
                   3648:        ref = pmap_is_referenced(pg);
                   3649:        mod = pmap_is_modified(pg);
                   3650:        printf("Modified page: ref %d, mod %d\n",
                   3651:               ref, mod);
                   3652:
                   3653:        /* Check pmap_protect() */
                   3654:        pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
1.108     chris    3655:        pmap_update(pmap_kernel());
1.73      eeh      3656:        ref = pmap_is_referenced(pg);
                   3657:        mod = pmap_is_modified(pg);
                   3658:        printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
                   3659:               ref, mod);
                   3660:
                   3661:        /* Now clear reference and modify */
                   3662:        ref = pmap_clear_reference(pg);
                   3663:        mod = pmap_clear_modify(pg);
                   3664:        printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82      mrg      3665:               (void *)(u_long)va, (long)pa,
1.73      eeh      3666:               ref, mod);
                   3667:
                   3668:        /* Modify page */
                   3669:        pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
1.108     chris    3670:        pmap_update(pmap_kernel());
1.73      eeh      3671:        *loc = 1;
                   3672:
                   3673:        ref = pmap_is_referenced(pg);
                   3674:        mod = pmap_is_modified(pg);
                   3675:        printf("Modified page: ref %d, mod %d\n",
                   3676:               ref, mod);
                   3677:
                   3678:        /* Check pmap_pag_protect() */
                   3679:        pmap_page_protect(pg, VM_PROT_READ);
                   3680:        ref = pmap_is_referenced(pg);
                   3681:        mod = pmap_is_modified(pg);
                   3682:        printf("pmap_protect(): ref %d, mod %d\n",
                   3683:               ref, mod);
                   3684:
                   3685:        /* Now clear reference and modify */
                   3686:        ref = pmap_clear_reference(pg);
                   3687:        mod = pmap_clear_modify(pg);
                   3688:        printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82      mrg      3689:               (void *)(u_long)va, (long)pa,
1.73      eeh      3690:               ref, mod);
                   3691:
                   3692:
                   3693:        /* Modify page */
                   3694:        pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
1.108     chris    3695:        pmap_update(pmap_kernel());
1.73      eeh      3696:        *loc = 1;
                   3697:
                   3698:        ref = pmap_is_referenced(pg);
                   3699:        mod = pmap_is_modified(pg);
                   3700:        printf("Modified page: ref %d, mod %d\n",
                   3701:               ref, mod);
                   3702:
                   3703:        /* Check pmap_pag_protect() */
                   3704:        pmap_page_protect(pg, VM_PROT_NONE);
                   3705:        ref = pmap_is_referenced(pg);
                   3706:        mod = pmap_is_modified(pg);
                   3707:        printf("pmap_protect(): ref %d, mod %d\n",
                   3708:               ref, mod);
                   3709:
                   3710:        /* Now clear reference and modify */
                   3711:        ref = pmap_clear_reference(pg);
                   3712:        mod = pmap_clear_modify(pg);
                   3713:        printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82      mrg      3714:               (void *)(u_long)va, (long)pa,
1.43      eeh      3715:               ref, mod);
                   3716:
                   3717:        /* Unmap page */
                   3718:        pmap_remove(pmap_kernel(), va, va+1);
1.108     chris    3719:        pmap_update(pmap_kernel());
1.43      eeh      3720:        ref = pmap_is_referenced(pg);
                   3721:        mod = pmap_is_modified(pg);
1.82      mrg      3722:        printf("Unmapped page: ref %d, mod %d\n", ref, mod);
1.43      eeh      3723:
                   3724:        /* Now clear reference and modify */
                   3725:        ref = pmap_clear_reference(pg);
                   3726:        mod = pmap_clear_modify(pg);
                   3727:        printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82      mrg      3728:               (void *)(u_long)va, (long)pa, ref, mod);
1.43      eeh      3729:
                   3730:        /* Check it's properly cleared */
                   3731:        ref = pmap_is_referenced(pg);
                   3732:        mod = pmap_is_modified(pg);
                   3733:        printf("Checking cleared page: ref %d, mod %d\n",
                   3734:               ref, mod);
                   3735:
                   3736:        pmap_remove(pmap_kernel(), va, va+1);
1.108     chris    3737:        pmap_update(pmap_kernel());
1.262     mrg      3738:        pmap_free_page(pa, cpus_active);
1.43      eeh      3739: }
                   3740: #endif
1.155     chs      3741:
                   3742: void
                   3743: pmap_update(struct pmap *pmap)
                   3744: {
                   3745:
                   3746:        if (pmap->pm_refs > 0) {
                   3747:                return;
                   3748:        }
                   3749:        pmap->pm_refs = 1;
                   3750:        pmap_activate_pmap(pmap);
                   3751: }
1.255     mrg      3752:
                   3753: /*
                   3754:  * pmap_copy_page()/pmap_zero_page()
                   3755:  *
                   3756:  * we make sure that the destination page is flushed from all D$'s
                   3757:  * before we perform the copy/zero.
                   3758:  */
                   3759: extern int cold;
                   3760: void
                   3761: pmap_copy_page(paddr_t src, paddr_t dst)
                   3762: {
                   3763:
                   3764:        if (!cold)
                   3765:                dcache_flush_page_all(dst);
                   3766:        pmap_copy_page_phys(src, dst);
                   3767: }
                   3768:
                   3769: void
                   3770: pmap_zero_page(paddr_t pa)
                   3771: {
                   3772:
                   3773:        if (!cold)
                   3774:                dcache_flush_page_all(pa);
                   3775:        pmap_zero_page_phys(pa);
                   3776: }
1.281     martin   3777:
                   3778: #ifdef _LP64
                   3779: int
                   3780: sparc64_mmap_range_test(vaddr_t addr, vaddr_t eaddr)
                   3781: {
                   3782:        const vaddr_t hole_start = 0x000007ffffffffff;
                   3783:        const vaddr_t hole_end   = 0xfffff80000000000;
                   3784:
                   3785:        if (addr >= hole_end)
                   3786:                return 0;
                   3787:        if (eaddr <= hole_start)
                   3788:                return 0;
                   3789:
                   3790:        return EINVAL;
                   3791: }
                   3792: #endif
1.283     palle    3793:
                   3794: #ifdef SUN4V
                   3795: void
                   3796: pmap_setup_intstack_sun4v(paddr_t pa)
                   3797: {
                   3798:        int64_t hv_rc;
                   3799:        int64_t data;
                   3800:        data = SUN4V_TSB_DATA(
                   3801:            0 /* global */,
                   3802:            PGSZ_64K,
                   3803:            pa,
                   3804:            1 /* priv */,
                   3805:            1 /* Write */,
                   3806:            1 /* Cacheable */,
                   3807:            FORCE_ALIAS /* ALIAS -- Disable D$ */,
                   3808:            1 /* valid */,
                   3809:            0 /* IE */);
                   3810:        hv_rc = hv_mmu_map_perm_addr(INTSTACK, data, MAP_DTLB);
                   3811:        if ( hv_rc != H_EOK ) {
1.284     nakayama 3812:                panic("hv_mmu_map_perm_addr() failed - rc = %" PRId64 "\n",
                   3813:                    hv_rc);
1.283     palle    3814:        }
                   3815: }
                   3816:
                   3817: void
1.290     palle    3818: pmap_setup_tsb_sun4v(struct tsb_desc* tsb_desc)
1.283     palle    3819: {
                   3820:        int err;
                   3821:        paddr_t tsb_desc_p;
                   3822:        tsb_desc_p = pmap_kextract((vaddr_t)tsb_desc);
1.290     palle    3823:        if (!tsb_desc_p) {
1.283     palle    3824:                panic("pmap_setup_tsb_sun4v() pmap_kextract() failed");
                   3825:        }
                   3826:        err = hv_mmu_tsb_ctx0(1, tsb_desc_p);
                   3827:        if (err != H_EOK) {
                   3828:                prom_printf("hv_mmu_tsb_ctx0() err: %d\n", err);
                   3829:                panic("pmap_setup_tsb_sun4v() hv_mmu_tsb_ctx0() failed");
                   3830:        }
                   3831:        err = hv_mmu_tsb_ctxnon0(1, tsb_desc_p);
                   3832:        if (err != H_EOK) {
                   3833:                prom_printf("hv_mmu_tsb_ctxnon0() err: %d\n", err);
                   3834:                panic("pmap_setup_tsb_sun4v() hv_mmu_tsb_ctxnon0() failed");
                   3835:        }
                   3836: }
                   3837:
                   3838: #endif

CVSweb <webmaster@jp.NetBSD.org>