[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sun3 / sun3

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/arch/sun3/sun3/pmap.c between version 1.37 and 1.38

version 1.37, 1994/10/26 19:04:36 version 1.38, 1994/11/21 21:38:54
Line 1 
Line 1 
 /*      $NetBSD$        */  /*      $NetBSD$        */
   
 /*  /*
  * Copyright (c) 1993, 1994 Adam Glass   * Copyright (c) 1994 Gordon W. Ross
    * Copyright (c) 1993 Adam Glass
  * All rights reserved.   * All rights reserved.
  *   *
  * Redistribution and use in source and binary forms, with or without   * Redistribution and use in source and binary forms, with or without
Line 30 
Line 31 
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF   * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.   * SUCH DAMAGE.
  */   */
 #include <sys/param.h>  
 #include <sys/systm.h>  
 #include <sys/proc.h>  
 #include <sys/malloc.h>  
 #include <sys/user.h>  
   
 #include <vm/vm.h>  
 #include <vm/vm_kern.h>  
 #include <vm/vm_page.h>  
   
 #include <machine/pte.h>  
 #include <machine/control.h>  
   
 #include <machine/cpu.h>  
 #include <machine/mon.h>  
 #include <machine/vmparam.h>  
 #include <machine/pmap.h>  
   
 /* XXX - This is weird... */  
 #define sun3_round_up_seg(x)    (sun3_trunc_seg(x) + NBSG)  
   
 extern void printf __P((const char *, ...));  
   
 #define VA_SEGNUM(x)    ((u_int)(x) >> SEGSHIFT)  
   
 /*  /*
  * globals needed by the vm system   * XXX - current_projects:
  *   *
  * [frankly the stupid vm system should allocate these]  
  */  
   
 vm_offset_t virtual_avail, virtual_end;  
 vm_offset_t avail_start, avail_end;  
   
   
 /* current_projects:  
  *  
  * debugging support   * debugging support
  *  
  * need to write/eliminate use of/fix:  
  * memavail problem  
  * pmap_bootstrap needs to be completed, and fixed.  
  * pmap_init does nothing with its arguments....  
  * locking protocols   * locking protocols
  *   *
    * Make a call for the trap handler to use to quickly reload
    * a PMEG that is in pm_segmap but not in HW segmap.
    * (just call pmeg_cache(), and if found, load it)
  */   */
   
 /*  /*
  * Some notes:   * Some notes:
  *   *
  * sun3s have contexts (8).  In our mapping of the world, the kernel is mapped   * sun3s have contexts (8).  In our mapping of the world, the kernel is mapped
  * into all contexts.  Processes take up a known portion of the context,   * into all contexts.  Processes take up a known portion of the context,
  * and compete for the available contexts on a LRU basis.   * and compete for the available contexts on a LRU basis.
  *   *
  * sun3s also have this evil "pmeg" crapola.  Essentially each "context"'s   * sun3s also have this evil "pmeg" crapola.  Essentially each "context"'s
  * address space is defined by the 2048 one-byte entries in the segment map.   * address space is defined by the 2048 one-byte entries in the segment map.
  * Each of these 1-byte entries points to a 'pmeg' or page-something-group   * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
  * which contains the mappings for that virtual segment.  A segment is   * which contains the mappings for that virtual segment.  (This strange
  * 128Kb wide, and is mapped by 16 8Kb pages.   * terminology invented by Sun and preserved here for consistency.)
  *   * Each PMEG maps a segment of 128Kb length, with 16 pages of 8Kb each.
  * As you can tell these "pmeg's" are in short supply and heavy demand.   *
    * As you can tell these "pmeg's" are in short supply and heavy demand.
  * 'pmeg's allocated to the kernel are "static" in the sense that they can't   * 'pmeg's allocated to the kernel are "static" in the sense that they can't
  * be stolen from it.  'pmeg's allocated to a particular segment of a   * be stolen from it.  'pmeg's allocated to a particular segment of a
  * pmap's virtual space will be fought over by the other pmaps.   * pmap's virtual space will be fought over by the other pmaps.
Line 101  vm_offset_t avail_start, avail_end;
Line 68  vm_offset_t avail_start, avail_end;
  *       pmegs that aren't needed by a pmap remain in the MMU.   *       pmegs that aren't needed by a pmap remain in the MMU.
  *       quick context switches between pmaps   *       quick context switches between pmaps
  *       kernel is in all contexts   *       kernel is in all contexts
  *  
  *  
  */   */
   
   #include <sys/param.h>
   #include <sys/systm.h>
   #include <sys/proc.h>
   #include <sys/malloc.h>
   #include <sys/user.h>
   #include <sys/queue.h>
   
   #include <vm/vm.h>
   #include <vm/vm_kern.h>
   #include <vm/vm_page.h>
   
   #include <machine/pte.h>
   #include <machine/control.h>
   
   #include <machine/cpu.h>
   #include <machine/mon.h>
   #include <machine/vmparam.h>
   #include <machine/pmap.h>
   
   #if     (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
   #error  "PMAP_XXX definitions don't match pte.h!"
   #endif
   
   /*
    * globals shared between here and sun3_startup
    * XXX - Look at relationship twix pmap and ^
    * These are set in pmap_bootstrap() and used
    * in pmap_next_page().
    */
   extern vm_offset_t virtual_avail, virtual_end;
   extern vm_offset_t avail_start, avail_end;
   /* used to skip the Sun3/50 video RAM */
   extern vm_offset_t hole_start, hole_size;
   
   /* statistics... */
   struct pmap_stats {
           int     ps_unlink_pvfirst;      /* # of pv_unlinks on head */
           int     ps_unlink_pvsearch;     /* # of pv_unlink searches */
           int     ps_changeprots;         /* # of calls to changeprot */
           int     ps_useless_changeprots; /* # of changeprots for wiring */
           int     ps_enter_firstpv;       /* pv heads entered */
           int     ps_enter_secondpv;      /* pv nonheads entered */
           int     ps_useless_changewire;  /* useless wiring changes */
           int     ps_npg_prot_all;        /* # of active pages protected */
           int     ps_npg_prot_actual;     /* # pages actually affected */
   } pmap_stats;
   
   struct context_state {
           TAILQ_ENTRY(context_state) context_link;
           int            context_num;
           struct pmap   *context_upmap;
   };
   
   typedef struct context_state *context_t;
   
   
   #define VA_SEGNUM(x)    ((u_int)(x) >> SEGSHIFT)
   
   /* This is for pmap_next_page() */
   static vm_offset_t avail_next;
   
   /* This is where we map a PMEG without a context. */
   static vm_offset_t temp_seg_va;
   
   /* XXX - Why do we need this? */
   #define managed(pa)     (((pa) >= avail_start) && ((pa) < avail_end))
   
   
 #define NKSEG   (NSEGMAP - (KERNBASE / NBSG)) /* is KERNBASE ok? */  #define NKSEG   (NSEGMAP - (KERNBASE / NBSG)) /* is KERNBASE ok? */
 #define NUSEG   (NSEGMAP-NKSEG)  #define NUSEG   (NSEGMAP-NKSEG)
   
   /*
    * locking issues:
    *
    */
   
 /*  /*
  * Note that PMAP_LOCK is used in routines called at splnet() and   * Note that PMAP_LOCK is used in routines called at splnet() and
Line 119  vm_offset_t avail_start, avail_end;
Line 157  vm_offset_t avail_start, avail_end;
 #define PMAP_UNLOCK() splx(s)  #define PMAP_UNLOCK() splx(s)
   
 #define TAILQ_EMPTY(headp) \  #define TAILQ_EMPTY(headp) \
         !((headp)->tqh_first)                  !((headp)->tqh_first)
   
 #define TAILQ_REMOVE_FIRST(result, headp, entries) \  #define TAILQ_REMOVE_FIRST(result, headp, entries) \
 { \  { \
         result = (headp)->tqh_first; \          result = (headp)->tqh_first; \
         if (result) TAILQ_REMOVE(headp, result, entries); \          if (result) TAILQ_REMOVE(headp, result, entries); \
         }          }
   
 /*  /*
  * locking issues:   * pv support, i.e stuff that maps from physical pages to virtual addresses
  *   *
  */   */
 #ifdef  PMAP_DEBUG  
 int pmap_db_lock;  
 #define PMAP_DB_LOCK() do { \  
     if (pmap_db_lock) panic("pmap_db_lock: line %d", __LINE__); \  
     pmap_db_lock = 1; \  
 } while (0)  
 #define PMAP_DB_UNLK() pmap_db_lock = 0  
 #else   /* PMAP_DEBUG */  
 #define PMAP_DB_LOCK() XXX  
 #define PMAP_DB_UNLK() XXX  
 #endif  /* PMAP_DEBUG */  
   
 #ifdef  PMAP_DEBUG  
 int pmeg_lock;  
 #define PMEG_LOCK() do { \  
     if (pmeg_lock) panic("pmeg_lock: line %d", __LINE__); \  
     pmeg_lock = 1; \  
 } while (0)  
 #define PMEG_UNLK() pmeg_lock = 0  
 #else   /* PMAP_DEBUG */  
 #define PMEG_LOCK() XXX  
 #define PMEG_UNLK() XXX  
 #endif  /* PMAP_DEBUG */  
   
   
 /*  /*
  * pv support, i.e stuff that maps from physical pages to virtual addresses   * XXX - Could eliminate this by causing managed() to return 0
  *   * ( avail_start = avail_end = 0 )
  */   */
   int pv_initialized = 0;
   
 struct pv_entry {  struct pv_entry {
     struct pv_entry *pv_next;          struct pv_entry *pv_next;
     pmap_t           pv_pmap;          pmap_t         pv_pmap;
     vm_offset_t      pv_va;          vm_offset_t      pv_va;
     unsigned char    pv_flags;          /* XXX - put flags in low byte of pv_va */
           unsigned int     pv_flags;
 };  };
   
 typedef struct pv_entry *pv_entry_t;  typedef struct pv_entry *pv_entry_t;
   
 pv_entry_t pv_head_table = NULL;  pv_entry_t pv_head_table = NULL;
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
 struct pv_entry *  static struct pv_entry *
 pa_to_pvp(pa)  pa_to_pvp(pa)
     vm_offset_t pa;          vm_offset_t pa;
 {  {
     struct pv_entry *pvp;          struct pv_entry *pvp;
     if (pa < avail_start || pa >= avail_end) {          if (pa < avail_start || pa >= avail_end) {
         printf("pa_to_pvp: bad pa=0x%x\n", pa);                  printf("pa_to_pvp: bad pa=0x%x\n", pa);
         Debugger();                  Debugger();
     }          }
     pvp = &pv_head_table[PA_PGNUM(pa)];          pvp = &pv_head_table[PA_PGNUM(pa)];
     return pvp;          return pvp;
 }  }
 #else  #else
 #define pa_to_pvp(pa) &pv_head_table[PA_PGNUM(pa)]  #define pa_to_pvp(pa) &pv_head_table[PA_PGNUM(pa)]
 #endif  #endif
   
 #define PV_VALID  8  /* These are as in the MMU but shifted by PV_SHIFT. */
 #define PV_WRITE  4  #define PV_SHIFT        24
 #define PV_SYSTEM 2  #define PV_VALID  0x80
 #define PV_NC     1  #define PV_WRITE  0x40
 #define PV_MASK   0xF  #define PV_SYSTEM 0x20
   #define PV_NC     0x10
 #define MAKE_PV_REAL(pv_flags) ((pv_flags & PV_MASK) << PG_PERM_SHIFT)  #define PV_PERM   0xF0
 #define PG_TO_PV_FLAGS(pte) (((PG_PERM) & pte) >> PG_PERM_SHIFT)  #define PV_TYPE   0x0C
   #define PV_REF    0x02
 /* cache support */  #define PV_MOD    0x01
 static unsigned char *pv_cache_table = NULL;  
 #define set_cache_flags(pa, flags) \  #define MAKE_PV_REAL(pv_flags) ((pv_flags & PV_PERM) << PV_SHIFT)
     pv_cache_table[PA_PGNUM(pa)] |= flags & PV_NC  #define PG_TO_PV_FLAGS(pte) (((PG_PERM) & pte) >> PV_SHIFT)
 #define force_cache_flags(pa, flags) \  
     pv_cache_table[PA_PGNUM(pa)] = flags & PV_NC  /*
 #define get_cache_flags(pa) (pv_cache_table[PA_PGNUM(pa)])   * cache support
    */
 /* modified bits support */  #define BADALIAS(a1, a2)        (((int)(a1) ^ (int)(a2)) & SEGOFSET)
 static unsigned char *pv_modified_table = NULL;  
   /*
 #if 0   * Save the MOD bit from the given PTE using its PA
 #define save_modified_bits(pte) \   */
     pv_modified_table[PG_PGNUM(pte)] |= \  static void
     (pte & (PG_OBIO|PG_VME16D|PG_VME32D) ? 0 : \  save_modref_bits(int pte)
      ((pte & PG_MOD) >>PG_MOD_SHIFT))  
 #else  
 void save_modified_bits(pte)  
     vm_offset_t pte;  
 {  {
     vm_offset_t pa;          pv_entry_t pvhead;
     int pn;  
   
     pa = PG_PA(pte);          if (pv_initialized == 0)
     if (pa >= avail_end)                  return;
         panic("save_modified_bits: bad pa=%x", pa);  
   
     pn = PG_PGNUM(pte);          if ((pte & PG_TYPE) != PGT_OBMEM)
                   return;
   
     pv_modified_table[pn] |= \          pvhead = pa_to_pvp(PG_PA(pte));
         (pte & (PG_OBIO|PG_VME16D|PG_VME32D) ? 0 : \          pvhead->pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
          ((pte & PG_MOD) >>PG_MOD_SHIFT));  
 }  }
 #endif  
   
 int pv_initialized = 0;  
   
 #define PMEG_INVAL (NPMEG-1)  
 #define PMEG_NULL (pmeg_t) NULL  
 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)  #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)  #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
 #define pmap_add_ref(pmap) ++pmap->pm_refcount  #define pmap_add_ref(pmap) ++pmap->pm_refcount
 #define pmap_del_ref(pmap) --pmap->pm_refcount  #define pmap_del_ref(pmap) --pmap->pm_refcount
 #define pmap_refcount(pmap) pmap->pm_refcount  #define pmap_refcount(pmap) pmap->pm_refcount
 #define get_pmeg_cache(pmap, segnum) (pmap->pm_segmap[segnum])  
 #define PM_UPDATE_CACHE 1  #define PM_UPDATE_CACHE 1
                                 /* external structures */                                  /* external structures */
 pmap_t kernel_pmap = NULL;  pmap_t kernel_pmap = NULL;
Line 249  static struct pmap kernel_pmap_store;
Line 254  static struct pmap kernel_pmap_store;
   
 /* protection conversion */  /* protection conversion */
 static unsigned int protection_converter[8];  static unsigned int protection_converter[8];
 #define pmap_pte_prot(x) protection_converter[x]  #define pmap_pte_prot(x) protection_converter[x&7]
   
 /* pmeg structures, queues, and macros */  /*
    * pmeg structures, queues, and macros
    */
   #define PMEGQ_FREE     0
   #define PMEGQ_INACTIVE 1
   #define PMEGQ_ACTIVE   2
   #define PMEGQ_KERNEL   3
   #define PMEGQ_NONE     4
   
   struct pmeg_state {
           TAILQ_ENTRY(pmeg_state) pmeg_link;
           int            pmeg_index;
           pmap_t         pmeg_owner;
           int            pmeg_version;
           vm_offset_t    pmeg_va;
           int            pmeg_wired;
           int            pmeg_reserved;
           int            pmeg_vpages;
           int            pmeg_qstate;
   };
   
   typedef struct pmeg_state *pmeg_t;
   
   #define PMEG_INVAL (NPMEG-1)
   #define PMEG_NULL (pmeg_t) NULL
   
   /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
 TAILQ_HEAD(pmeg_tailq, pmeg_state);  TAILQ_HEAD(pmeg_tailq, pmeg_state);
 struct pmeg_tailq pmeg_free_queue, pmeg_inactive_queue,  struct pmeg_tailq pmeg_free_queue, pmeg_inactive_queue,
     pmeg_active_queue, pmeg_kernel_queue;          pmeg_active_queue, pmeg_kernel_queue;
   
 static struct pmeg_state pmeg_array[NPMEG];  static struct pmeg_state pmeg_array[NPMEG];
   
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
 pmeg_t pmeg_p(segnum)  static pmeg_t
     int segnum;  pmeg_p(sme)
 {          int sme;
     return &pmeg_array[segnum];  {
           if (sme < 0 || sme >= SEGINV)
                   panic("pmeg_p: bad sme");
           return &pmeg_array[sme];
 }  }
 #else  #else
 #define pmeg_p(x) &pmeg_array[x]  #define pmeg_p(x) &pmeg_array[x]
 #endif  #endif
   
 #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired_count > 0)  #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
   
   /*
    * context structures, and queues
    */
   #define CTXINVAL -1
   #define has_context(pmap)       (pmap->pm_ctxnum >= 0)
   
 /* context structures, and queues */  
 TAILQ_HEAD(context_tailq, context_state);  TAILQ_HEAD(context_tailq, context_state);
 struct context_tailq context_free_queue, context_active_queue;  struct context_tailq context_free_queue, context_active_queue;
   
Line 282  static struct context_state context_arra
Line 321  static struct context_state context_arra
  * (set in sun3_startup.c)   * (set in sun3_startup.c)
  */   */
 vm_offset_t tmp_vpages[2];  vm_offset_t tmp_vpages[2];
   int tmp_vpages_inuse;
   
 /* context support */  
   
 /* prototypes */  /*
 int get_pte_val __P((pmap_t pmap, vm_offset_t va, vm_offset_t *ptep));   * prototypes
 void set_pte_val __P((pmap_t pmap, vm_offset_t va, vm_offset_t pte));   */
   static int get_pte_val __P((pmap_t pmap, vm_offset_t va));
 void context_allocate __P((pmap_t pmap));  static void set_pte_val __P((pmap_t pmap, vm_offset_t va, int pte));
 void context_free __P((pmap_t pmap));  static int get_pte_pmeg __P((int, int));
 void context_init __P((void));  static void set_pte_pmeg __P((int, int, int));
   
 void pmeg_steal __P((int pmeg_num));  static void context_allocate __P((pmap_t pmap));
 void pmeg_flush __P((pmeg_t pmegp));  static void context_free __P((pmap_t pmap));
 pmeg_t pmeg_allocate_invalid __P((pmap_t pmap, vm_offset_t va));  static void context_init __P((void));
 void pmeg_release __P((pmeg_t pmegp));  
 void pmeg_release_empty __P((pmeg_t pmegp, int segnum));  static void pmeg_flush __P((pmeg_t pmegp));
 pmeg_t pmeg_cache __P((pmap_t pmap, vm_offset_t va));  static pmeg_t pmeg_allocate __P((pmap_t pmap, vm_offset_t va));
 void pmeg_wire __P((pmeg_t pmegp));  static void pmeg_release __P((pmeg_t pmegp));
 void pmeg_unwire __P((pmeg_t pmegp));  static void pmeg_free __P((pmeg_t pmegp, int segnum));
 void pmeg_init __P((void));  static pmeg_t pmeg_cache __P((pmap_t pmap, vm_offset_t va));
   static void pmeg_set_wiring __P((pmeg_t pmegp, vm_offset_t va, int));
 unsigned char pv_compute_cache __P((pv_entry_t head));  
 int pv_compute_modified __P((pv_entry_t head));  static int pv_compute_cache __P((pv_entry_t head));
 void pv_remove_all __P(( vm_offset_t pa));  static int pv_link __P((pmap_t pmap, vm_offset_t, vm_offset_t, u_int));
 unsigned char pv_link  __P((pmap_t pmap, vm_offset_t pa, vm_offset_t va, unsigned char flags));  static void pv_unlink __P((pmap_t, vm_offset_t, vm_offset_t));
 void pv_change_pte __P((pv_entry_t pv_list, vm_offset_t set_bits, vm_offset_t clear_bits));  static void pv_remove_all __P(( vm_offset_t pa));
 void pv_unlink __P((pmap_t pmap, vm_offset_t pa, vm_offset_t va));  static void pv_changepte __P((pv_entry_t, int, int));
 void pv_init __P((void));  static void pv_syncflags __P((pv_entry_t head));
   static void pv_init __P((void));
   
   void sun3_pmeg_init __P((void));
   void sun3_reserve_pmeg __P((int pmeg_num));
 void sun3_protection_init __P((void));  void sun3_protection_init __P((void));
   
 void pmap_bootstrap __P((void));  static void pmap_common_init __P((pmap_t pmap));
 void pmap_init __P((vm_offset_t phys_start, vm_offset_t phys_end));  
   
 void pmap_common_init __P((pmap_t pmap));  
 vm_offset_t pmap_map __P((vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot));  
 void pmap_user_pmap_init __P((pmap_t pmap));  
   
 pmap_t pmap_create __P((vm_size_t size));  
 void pmap_release __P((pmap_t pmap));  
 void pmap_destroy __P((pmap_t pmap));  
 void pmap_reference __P((pmap_t pmap));  
 void pmap_pinit __P((pmap_t pmap));  
   
 void pmap_page_protect __P((vm_offset_t pa, vm_prot_t prot));  static void pmap_user_pmap_init __P((pmap_t pmap));
   
 void pmap_remove_range_mmu __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva));  static void pmap_remove_range_mmu __P((pmap_t, vm_offset_t, vm_offset_t));
 void pmap_remove_range_contextless __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pmeg_t pmegp));  static void pmap_remove_range_noctx __P((pmap_t, vm_offset_t, vm_offset_t));
 void pmap_remove_range __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva));  static void pmap_remove_range __P((pmap_t pmap, vm_offset_t, vm_offset_t));
 void pmap_remove __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva));  
   static void pmap_enter_kernel __P((vm_offset_t va, vm_offset_t pa,
           vm_prot_t prot, boolean_t wired, int pte_proto));
   static void pmap_enter_user __P((pmap_t pmap, vm_offset_t va, vm_offset_t pa,
           vm_prot_t prot, boolean_t wired, int pte_proto));
   
   static void pmap_protect_range_noctx __P((pmap_t, vm_offset_t, vm_offset_t));
   static void pmap_protect_range_mmu __P((pmap_t, vm_offset_t, vm_offset_t));
   static void pmap_protect_range __P((pmap_t, vm_offset_t, vm_offset_t));
   
 void pmap_enter_kernel __P((vm_offset_t va, vm_offset_t pa, vm_prot_t prot, boolean_t wired, vm_offset_t pte_proto, vm_offset_t mem_type));  
 void pmap_enter_user __P((pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, boolean_t wired, vm_offset_t pte_proto, vm_offset_t mem_type));  
 void pmap_enter __P((pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, boolean_t wired));  
   
 void pmap_clear_modify __P((vm_offset_t pa));  
 boolean_t pmap_is_modified __P((vm_offset_t pa));  
   
 void pmap_clear_reference __P((vm_offset_t pa));  
 boolean_t pmap_is_referenced __P((vm_offset_t pa));  
   
 void pmap_activate __P((pmap_t pmap, struct pcb *pcbp));  
 void pmap_deactivate __P((pmap_t pmap, struct pcb *pcbp));  
   
 void pmap_change_wiring __P((pmap_t pmap, vm_offset_t va, boolean_t wired));  
   
 void pmap_copy __P((pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr));  
 void pmap_copy_page __P((vm_offset_t src, vm_offset_t dst));  
 void pmap_zero_page __P((vm_offset_t pa));  
   
 vm_offset_t pmap_extract __P((pmap_t pmap, vm_offset_t va));  
 vm_offset_t pmap_phys_address __P((int page_number));  
   
 void pmap_pageable __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable));  
   
 void pmap_protect_range_contextless __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_offset_t pte_proto, pmeg_t pmegp));  
 void pmap_protect_range_mmu __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_offset_t pte_proto));  
 void pmap_protect_range __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_offset_t pte_proto));  
 void pmap_protect __P((pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot));  
   
 void pmap_update __P((void));  
   
 /*  /*
  * Debugging support.   * Debugging support.
Line 385  int pmap_debug = 0;
Line 394  int pmap_debug = 0;
 #ifdef  PMAP_DEBUG      /* XXX */  #ifdef  PMAP_DEBUG      /* XXX */
 int pmap_db_watchva = -1;  int pmap_db_watchva = -1;
 int pmap_db_minphys;  int pmap_db_minphys;
 void set_pte_debug(va, pte)  static void
     vm_offset_t va, pte;  set_pte_debug(vm_offset_t va, int pte)
 {  {
     if ((pte & PG_VALID) && (PG_PA(pte) < pmap_db_minphys))          if ((pte & PG_VALID) && (PG_PA(pte) < pmap_db_minphys))
     {          {
         printf("set_pte_debug: va=%x pa=%x\n", va, PG_PA(pte));                  printf("set_pte_debug: va=%x pa=%x\n", va, PG_PA(pte));
         Debugger();                  Debugger();
     }          }
     set_pte(va, pte);          set_pte(va, pte);
 }  }
 #define set_pte set_pte_debug  #define set_pte set_pte_debug
   
 int pmap_db_watchpmeg;  int pmap_db_watchpmeg;
 void set_segmap_debug(va, sme)  static void
      vm_offset_t va;  set_segmap_debug(va, sme)
      unsigned char sme;           vm_offset_t va;
 {           unsigned int sme;
     if (sme == pmap_db_watchpmeg) {  {
         printf("set_segmap_debug: watch pmeg %x\n", sme);          if (sme == pmap_db_watchpmeg) {
         Debugger();                  printf("set_segmap_debug: watch pmeg %x\n", sme);
     }                  Debugger();
     set_segmap(va,sme);          }
           set_segmap(va,sme);
 }  }
 #define set_segmap set_segmap_debug  #define set_segmap set_segmap_debug
   
 #endif  /* PMAP_DEBUG */  #endif  /* PMAP_DEBUG */
   
   #ifdef  PMAP_DEBUG      /* XXX */
   extern int getsr();
   #define CHECK_SPL() do { \
           if ((getsr() & PSL_IPL) < PSL_IPL3) \
                   panic("pmap: bad spl, line %d", __LINE__); \
   } while (0)
   #else   /* PMAP_DEBUG */
   #define CHECK_SPL() (void)0
   #endif  /* PMAP_DEBUG */
   
 /*  /*
  * Get a PTE from either the hardware or the pmeg cache.   * Get a PTE from either the hardware or the pmeg cache.
  * Return non-zero if PTE was found for this VA.   * Return non-zero if PTE was found for this VA.
  */   */
 int get_pte_val(pmap, va, ptep)  static int
      pmap_t pmap;  get_pte_val(pmap, va)
      vm_offset_t va, *ptep;          pmap_t pmap;
 {          vm_offset_t va;
     int saved_context,s;  {
     unsigned char sme;          int old_ctx, in_ctx, sme;
     pmeg_t pmegp;          pmeg_t pmegp;
     int rc=0;          int pte = PG_INVAL;
   
 #ifdef  PMAP_DEBUG      /* XXX */  #ifdef  PMAP_DEBUG
     if (pmap == kernel_pmap)          if (pmap == kernel_pmap)
         panic("get_pte_val: kernel_pmap");                  panic("get_pte_val: kernel_pmap");
           sme = pmap->pm_segmap[VA_SEGNUM(va)];
           if (sme == SEGINV)
                   panic("get_pte_val: SEGINV");
 #endif  #endif
   
     PMAP_LOCK();          CHECK_SPL();
     if (pmap->pm_context) {  
         saved_context = get_context();          old_ctx = CTXINVAL;
         set_context(pmap->pm_context->context_num);          in_ctx = FALSE;
         sme = get_segmap(va);          if (has_context(pmap)) {
         if (sme != SEGINV) {                  old_ctx = get_context();
             *ptep = get_pte(va);                  set_context(pmap->pm_ctxnum);
             rc = 1;                  sme = get_segmap(va);
                   if (sme != SEGINV)
                           in_ctx = TRUE;
           }
           if (in_ctx == TRUE) {
   #ifdef  PMAP_DEBUG
                   if (sme != pmap->pm_segmap[VA_SEGNUM(va)])
                           panic("get_pte_val: unknown sme!");
   #endif
                   pte = get_pte(va);
           } else {
                   /* PMEG is not currently in the HW segmap. */
                   sme = pmap->pm_segmap[VA_SEGNUM(va)];
                   pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
         }          }
         set_context(saved_context);          if (old_ctx != CTXINVAL)
     } else {                  set_context(old_ctx);
         /* we don't have a context */  
         pmegp = pmeg_cache(pmap, sun3_trunc_seg(va));  
         if (pmegp) {  
             *ptep = get_pte_pmeg(pmegp->pmeg_index, VA_PTE_NUM(va));  
             pmeg_release(pmegp);  
             rc = 1;  
         }  
     }  
     PMAP_UNLOCK();  
     return rc;  
 }  
   
 void set_pte_val(pmap, va, pte)  
      pmap_t pmap;  
      vm_offset_t va,pte;  
 {  
     int saved_context, s;  
     pmeg_t pmegp;  
     unsigned char sme;  
   
 #ifdef  PMAP_DEBUG      /* XXX */          return pte;
     if (pmap == kernel_pmap)  }
         panic("set_pte_val: kernel_pmap");  
   static void
   set_pte_val(pmap, va, pte)
           pmap_t pmap;
           vm_offset_t va;
           int pte;
   {
           int old_ctx, in_ctx, sme;
           pmeg_t pmegp;
   
   #ifdef  PMAP_DEBUG
           if (pmap == kernel_pmap)
                   panic("set_pte_val: kernel_pmap");
           sme = pmap->pm_segmap[VA_SEGNUM(va)];
           if (sme == SEGINV)
                   panic("set_pte_val: SEGINV");
 #endif  #endif
   
     PMAP_LOCK();          CHECK_SPL();
     if (pmap->pm_context) {  
         saved_context = get_context();          old_ctx = CTXINVAL;
         set_context(pmap->pm_context->context_num);          in_ctx = FALSE;
         sme = get_segmap(va);          if (has_context(pmap)) {
         if (sme != SEGINV)                  old_ctx = get_context();
             set_pte(va, pte);                  set_context(pmap->pm_ctxnum);
         set_context(saved_context);                  sme = get_segmap(va);
     } else {                  if (sme != SEGINV)
         /* we don't have a context */                          in_ctx = TRUE;
         pmegp = pmeg_cache(pmap, sun3_trunc_seg(va));          }
         if (!pmegp) panic("pmap: no pmeg to set pte in");          if (in_ctx == TRUE) {
         set_pte_pmeg(pmegp->pmeg_index, VA_PTE_NUM(va), pte);  
         pmeg_release(pmegp);  
     }  
     PMAP_UNLOCK();  
 }  
   
 void context_allocate(pmap)  
      pmap_t pmap;  
 {  
     context_t context;  
     int s;  
   
     PMAP_LOCK();  
 #ifdef  PMAP_DEBUG  
     if (pmap_debug & PMD_CONTEXT)  
         printf("context_allocate: for pmap %x\n", pmap);  
 #endif  
     if (pmap == kernel_pmap)  
         panic("context_allocate: kernel_pmap");  
     if (pmap->pm_context)  
         panic("pmap: pmap already has context allocated to it");  
     if (TAILQ_EMPTY(&context_free_queue)) { /* steal one from active*/  
         if (TAILQ_EMPTY(&context_active_queue))  
             panic("pmap: no contexts to be found");  
         context_free((&context_active_queue)->tqh_first->context_upmap);  
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
         if (pmap_debug & PMD_CONTEXT)                  sme != pmap->pm_segmap[VA_SEGNUM(va)];
             printf("context_allocate: pmap %x, take context %x num %d\n",                  panic("set_pte_val: unknown sme!");
                    pmap, context, context->context_num);  
 #endif  #endif
     }                  set_pte(va, pte);
     TAILQ_REMOVE_FIRST(context, &context_free_queue, context_link);          } else {
     TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);                  /* PMEG is not currently in the HW segmap. */
     if (context->context_upmap != NULL)                  sme = pmap->pm_segmap[VA_SEGNUM(va)];
         panic("pmap: context in use???");                  set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
     pmap->pm_context = context;  
     context->context_upmap = pmap;  
 #ifdef  PMAP_DEBUG  
     if (pmap_debug & PMD_CONTEXT)  
         printf("context_allocate: pmap %x given context %x num %d\n",  
                pmap, context, context->context_num);  
 #endif  
     PMAP_UNLOCK();  
 }  
   
 void context_free(pmap)         /* :) */  
      pmap_t pmap;  
 {  
     int saved_context, i, s;  
     context_t context;  
     unsigned int sme;  
   
     vm_offset_t va;  
   
     PMAP_LOCK();  
     if (!pmap->pm_context)  
         panic("pmap: can't free a non-existent context");  
 #ifdef  PMAP_DEBUG  
     if (pmap_debug & PMD_CONTEXT)  
         printf("context_free: freeing pmap %x of context %x num %d\n",  
                pmap, pmap->pm_context, pmap->pm_context->context_num);  
 #endif  
     saved_context = get_context();  
     context = pmap->pm_context;  
     set_context(context->context_num);  
   
     /* Unload MMU (but keep in SW segmap). */  
     va = 0;  
     for (i=0; i < NUSEG; i++) {  
         if (pmap->pm_segmap[i] != SEGINV) {  
             /* The MMU might have a valid sme. */  
             sme = get_segmap(va);  
             if (sme != SEGINV) {  
 #ifdef  PMAP_DEBUG  
                 /* Validate SME found in MMU. */  
                 if (sme != pmap->pm_segmap[i])  
                     panic("context_free: unknown sme 0x%x at va=0x%x",  
                           sme, va);  
                 if (pmap_debug & PMD_SEGMAP)  
                     printf("pmap: set_segmap ctx=%d v=%x old=%x new=ff (cf)\n",  
                            context->context_num, sun3_trunc_seg(va), sme);  
 #endif  
                 set_segmap(va, SEGINV);  
                 pmeg_release(pmeg_p(sme));  
             }  
         }          }
 #ifdef  DIAGNOSTIC          if (old_ctx != CTXINVAL)
         if (get_segmap(va) != SEGINV)                  set_context(old_ctx);
             panic("context_free: did not clean pmap=%x va=%x", pmap, va);          return;
   }
   
   static void
   context_allocate(pmap)
           pmap_t pmap;
   {
           context_t context;
           int s;
   
           PMAP_LOCK();
   #ifdef  PMAP_DEBUG
           if (pmap_debug & PMD_CONTEXT)
                   printf("context_allocate: for pmap %x\n", pmap);
 #endif  #endif
         va += NBSG;          if (pmap == kernel_pmap)
     }                  panic("context_allocate: kernel_pmap");
     set_context(saved_context);          if (has_context(pmap))
     context->context_upmap = NULL;                  panic("pmap: pmap already has context allocated to it");
     TAILQ_REMOVE(&context_active_queue, context, context_link);          if (TAILQ_EMPTY(&context_free_queue)) {
     TAILQ_INSERT_TAIL(&context_free_queue, context,                  /* Steal one from the active queue. */
                       context_link);/* active??? XXX */                  if (TAILQ_EMPTY(&context_active_queue))
     pmap->pm_context = NULL;                          panic("pmap: no contexts to be found");
                   context_free((&context_active_queue)->tqh_first->context_upmap);
   #ifdef  PMAP_DEBUG
                   if (pmap_debug & PMD_CONTEXT)
                           printf("context_allocate: pmap %x, take context %x num %d\n",
                                      pmap, context, context->context_num);
   #endif
           }
           TAILQ_REMOVE_FIRST(context, &context_free_queue, context_link);
           TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
           if (context->context_upmap != NULL)
                   panic("pmap: context in use???");
           pmap->pm_ctxnum = context->context_num;
           context->context_upmap = pmap;
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     if (pmap_debug & PMD_CONTEXT)          if (pmap_debug & PMD_CONTEXT)
         printf("context_free: pmap %x context removed\n", pmap);                  printf("context_allocate: pmap %x given context %x num %d\n",
                              pmap, context, context->context_num);
 #endif  #endif
     PMAP_UNLOCK();  
           /*
            * We could reload the MMU here, but that would
            * artificially move PMEGs from the inactive queue
            * to the active queue, so do lazy reloading.
            * XXX - Need to reload wired pmegs though...
            */
   
           PMAP_UNLOCK();
 }  }
   
 void context_init()  static void
 {  context_free(pmap)              /* :) */
     int i;          pmap_t pmap;
   {
           int saved_ctxnum, ctxnum;
           int i, s, sme;
           context_t contextp;
           vm_offset_t va;
   
     TAILQ_INIT(&context_free_queue);          PMAP_LOCK();
     TAILQ_INIT(&context_active_queue);  
   
     /* XXX - Can we use context zero?  (Adam says yes.) */          ctxnum = pmap->pm_ctxnum;
     for (i=0; i < NCONTEXT; i++) {  
         context_array[i].context_num = i;  
         context_array[i].context_upmap = NULL;  
         TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],  
                           context_link);  
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
         if (pmap_debug & PMD_CONTEXT)          if (pmap_debug & PMD_CONTEXT)
             printf("context_init: context num %d is %x\n",                  printf("context_free: freeing context num %d of pmap 0x%x\n",
                    i, &context_array[i]);                             ctxnum, pmap);
 #endif  #endif
     }  
 }  
   
           if (ctxnum < 0 || ctxnum >= NCONTEXT)
                   panic("pmap: can't free a non-existent context");
           contextp = &context_array[ctxnum];
   
           saved_ctxnum = get_context();
           set_context(ctxnum);
   
           /* Unload MMU (but keep in SW segmap). */
           va = 0;
           for (i=0; i < NUSEG; i++) {
                   if (pmap->pm_segmap[i] != SEGINV) {
                           /* The MMU might have a valid sme. */
                           sme = get_segmap(va);
                           if (sme != SEGINV) {
   #ifdef  PMAP_DEBUG
                                   /* Validate SME found in MMU. */
                                   if (sme != pmap->pm_segmap[i])
                                           panic("context_free: unknown sme at va=0x%x", va);
                                   if (pmap_debug & PMD_SEGMAP)
                                           printf("pmap: set_segmap ctx=%d v=%x old=%x new=ff (cf)\n",
                                                      ctxnum, sun3_trunc_seg(va), sme);
   #endif
   #ifdef  HAVECACHE
                                   cache_flush_segment(va);
   #endif
                                   set_segmap(va, SEGINV);
                                   pmeg_release(pmeg_p(sme));
                           }
                   }
                   va += NBSG;
           }
           set_context(saved_ctxnum);
           contextp->context_upmap = NULL;
           TAILQ_REMOVE(&context_active_queue, contextp, context_link);
           TAILQ_INSERT_TAIL(&context_free_queue, contextp,
                                             context_link);/* active??? XXX */
           pmap->pm_ctxnum = CTXINVAL;
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
 void context_print(context)          if (pmap_debug & PMD_CONTEXT)
     context_t context;                  printf("context_free: pmap %x context removed\n", pmap);
   #endif
           PMAP_UNLOCK();
   }
   
   static void
   context_init()
 {  {
     printf(" context_num=  0x%x\n", context->context_num);          int i;
     printf(" context_upmap=0x%x\n", context->context_upmap);  
           TAILQ_INIT(&context_free_queue);
           TAILQ_INIT(&context_active_queue);
   
           for (i=0; i < NCONTEXT; i++) {
                   context_array[i].context_num = i;
                   context_array[i].context_upmap = NULL;
                   TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
                                                     context_link);
   #ifdef  PMAP_DEBUG
                   if (pmap_debug & PMD_CONTEXT)
                           printf("context_init: context num %d is %x\n",
                                      i, &context_array[i]);
   #endif
           }
 }  }
   
   #ifdef  PMAP_DEBUG
 void pmap_print(pmap)  void pmap_print(pmap)
     pmap_t pmap;          pmap_t pmap;
 {  {
     printf(" pm_context=0x%x\n", pmap->pm_context);          printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
     printf(" pm_version=0x%x\n", pmap->pm_version);          printf(" pm_version=0x%x\n", pmap->pm_version);
     printf(" pm_segmap=0x%x\n", pmap->pm_segmap);          printf(" pm_segmap=0x%x\n", pmap->pm_segmap);
     if (pmap->pm_context) context_print(pmap->pm_context);  
 }  }
 #endif  #endif
   
 /* steal a pmeg without altering its mapping */  /*
    * Reserve a pmeg (forever) for use by PROM, etc.
 void pmeg_steal(pmeg_num)   * Contents are left as-is.  Called very early...
      int pmeg_num;   */
   void
   sun3_reserve_pmeg(sme)
           int sme;
 {  {
     pmeg_t pmegp;          pmeg_t pmegp;
   
     pmegp = pmeg_p(pmeg_num);  
     if (pmegp->pmeg_reserved)  
         mon_panic("pmeg_steal: attempt to steal an already reserved pmeg\n");  
     if (pmegp->pmeg_owner)  
         mon_panic("pmeg_steal: pmeg is already owned\n");  
   
 #if 0 /* def    PMAP_DEBUG */          /* Can not use pmeg_p() because it fails on SEGINV. */
     mon_printf("pmeg_steal: 0x%x\n", pmegp->pmeg_index);          pmegp = &pmeg_array[sme];
 #endif  
   
     /* XXX - Owned by kernel, but not "managed"... */          if (pmegp->pmeg_reserved)
     pmegp->pmeg_owner = NULL;                  mon_panic("sun3_reserve_pmeg: already reserved\n");
     pmegp->pmeg_reserved++;     /* keep count, just in case */          if (pmegp->pmeg_owner)
     TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);                  mon_panic("sun3_reserve_pmeg: already owned\n");
     pmegp->pmeg_qstate = PMEGQ_NONE;  
           /* XXX - Owned by kernel, but not really usable... */
           pmegp->pmeg_owner = NULL;
           pmegp->pmeg_reserved++; /* keep count, just in case */
           TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
           pmegp->pmeg_qstate = PMEGQ_NONE;
 }  }
   
 void pmeg_clean(pmegp)  static void
      pmeg_t pmegp;  pmeg_clean(pmegp)
           pmeg_t pmegp;
 {  {
     int i;          int i;
   
     for (i = 0; i < NPAGSEG; i++)          for (i = 0; i < NPAGSEG; i++)
         set_pte_pmeg(pmegp->pmeg_index, i, PG_INVAL);                  set_pte_pmeg(pmegp->pmeg_index, i, PG_INVAL);
 }  }
   
 /*  /*
Line 662  void pmeg_clean(pmegp)
Line 709  void pmeg_clean(pmegp)
  * at the head of the queue again.   * at the head of the queue again.
  */   */
   
 void pmeg_clean_free()  static void
   pmeg_clean_free()
 {  {
     pmeg_t pmegp, pmegp_first;          pmeg_t pmegp, pmegp_first;
   
     if (TAILQ_EMPTY(&pmeg_free_queue))  
         panic("pmap: no free pmegs available to clean");  
   
     pmegp_first = NULL;          CHECK_SPL();
   
     PMEG_LOCK();          if (TAILQ_EMPTY(&pmeg_free_queue))
     for (;;) {                  panic("pmap: no free pmegs available to clean");
   
         TAILQ_REMOVE_FIRST(pmegp, &pmeg_free_queue, pmeg_link);          pmegp_first = NULL;
   
 #ifdef  PMAP_DEBUG  
         if (pmegp->pmeg_index == pmap_db_watchpmeg) {  
             printf("pmeg_clean_free: watch pmeg 0x%x\n", pmegp->pmeg_index);  
             Debugger();  
         }  
 #endif  
   
         pmegp->pmeg_qstate = PMEGQ_NONE;          for (;;) {
   
         pmeg_clean(pmegp);                  TAILQ_REMOVE_FIRST(pmegp, &pmeg_free_queue, pmeg_link);
   
         TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);                  pmegp->pmeg_qstate = PMEGQ_NONE;
         pmegp->pmeg_qstate = PMEGQ_FREE;  
   
         if (pmegp == pmegp_first)                  pmeg_clean(pmegp);
             break;  
         if (pmegp_first == NULL)  
             pmegp_first = pmegp;  
   
     }                  TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
     PMEG_UNLK();                  pmegp->pmeg_qstate = PMEGQ_FREE;
 }  
   
 /*                  if (pmegp == pmegp_first)
  * Clean out an inactive pmeg in preparation for a new owner.                          break;
  * (Inactive means referenced in pm_segmap but not hardware.)                  if (pmegp_first == NULL)
  */                          pmegp_first = pmegp;
 void pmeg_flush(pmegp)  
      pmeg_t pmegp;  
 {  
     vm_offset_t pte, va;  
     pmap_t pmap;  
     int i;  
   
     pmap = pmegp->pmeg_owner;  
 #ifdef  DIAGNOSTIC  
     if (pmap == NULL)  
         panic("pmeg_flush: no owner, pmeg=0x%x", pmegp);  
     if (pmap == kernel_pmap)  
         panic("pmeg_flush: kernel_pmap, pmeg=0x%x", pmegp);  
 #endif  
   
 #if 0   /* XXX */  
     if (!pmegp->pmeg_vpages)  
         printf("pmap: pmeg_flush() on clean pmeg\n");  
 #endif  
   
     va = pmegp->pmeg_va;  
     for (i = 0; i < NPAGSEG; i++, va += NBPG) {  
         pte = get_pte_pmeg(pmegp->pmeg_index, i);  
         if (pte & PG_VALID) {  
             if (pv_initialized)  
                 save_modified_bits(pte);  
             pv_unlink(pmap, PG_PA(pte), va);  
             pmegp->pmeg_vpages--;  
             set_pte_pmeg(pmegp->pmeg_index, i, PG_INVAL);  
         }  
     }  
 #ifdef  PMAP_DEBUG  
     if (pmegp->pmeg_vpages != 0) {  
         printf("pmap: pmeg_flush() didn't result in a clean pmeg\n");  
         Debugger(); /* XXX */  
     }  
 #endif  
     /* Invalidate owner's software segmap. (XXX - paranoid) */  
     if (pmap->pm_segmap) {  
         i = VA_SEGNUM(pmegp->pmeg_va);  
 #ifdef  DIAGNOSTIC  
         if (i >= NUSEG)  
             panic("pmeg_flush: bad va, pmeg=%x", pmegp);  
 #endif  
 #ifdef  PMAP_DEBUG  
         if (pmap_debug & PMD_SEGMAP) {  
             printf("pm_segmap: pmap=%x i=%x old=%x new=ff (flsh)\n",  
                    pmap, i, pmap->pm_segmap[i]);  
         }          }
 #endif  
         pmap->pm_segmap[i] = SEGINV;  
     }  
     pmegp->pmeg_owner = NULL;   /* more paranoia */  
 }  }
   
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
 void pmeg_verify_empty(va)  static void
     vm_offset_t va;  pmeg_verify_empty(va)
           vm_offset_t va;
 {  {
     vm_offset_t pte;          vm_offset_t eva;
     vm_offset_t eva;          int pte;
   
     for (eva = va + NBSG;  va < eva; va += NBPG) {          for (eva = va + NBSG;  va < eva; va += NBPG) {
         pte = get_pte(va);                  pte = get_pte(va);
         if (pte & PG_VALID)                  if (pte & PG_VALID)
             panic("pmeg_verify_empty");                          panic("pmeg_verify_empty");
     }          }
 }  }
   
 void pmeg_print(pmegp)  static void
     pmeg_t pmegp;  pmeg_print(pmegp)
           pmeg_t pmegp;
 {  {
     printf("link_next=0x%x  link_prev=0x%x\n",          printf("link_next=0x%x  link_prev=0x%x\n",
            pmegp->pmeg_link.tqe_next,                     pmegp->pmeg_link.tqe_next,
            pmegp->pmeg_link.tqe_prev);                     pmegp->pmeg_link.tqe_prev);
     printf("index=0x%x owner=0x%x own_vers=0x%x\n",          printf("index=0x%x owner=0x%x own_vers=0x%x\n",
            pmegp->pmeg_index, pmegp->pmeg_owner,                     pmegp->pmeg_index, pmegp->pmeg_owner,
            pmegp->pmeg_owner_version);                     pmegp->pmeg_version);
     printf("va=0x%x wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",          printf("va=0x%x wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
            pmegp->pmeg_va, pmegp->pmeg_wired_count,                     pmegp->pmeg_va, pmegp->pmeg_wired,
            pmegp->pmeg_reserved, pmegp->pmeg_vpages,                     pmegp->pmeg_reserved, pmegp->pmeg_vpages,
            pmegp->pmeg_qstate);                     pmegp->pmeg_qstate);
 }  }
 #endif  #endif
   
 pmeg_t pmeg_allocate_invalid(pmap, va)  /*
      pmap_t pmap;   * Allocate a PMEG by whatever mean necessary
      vm_offset_t va;   * (invalidating some mappings if necessary).
    */
   static pmeg_t
   pmeg_allocate(pmap, va)
           pmap_t pmap;
           vm_offset_t va;
 {  {
     pmeg_t pmegp;          pmeg_t pmegp;
   
           CHECK_SPL();
   
     PMEG_LOCK();          /* Get one onto the free list if necessary. */
     if (!TAILQ_EMPTY(&pmeg_free_queue)) {          pmegp = pmeg_free_queue.tqh_first;
         TAILQ_REMOVE_FIRST(pmegp, &pmeg_free_queue, pmeg_link);          if (!pmegp) {
                   /* Try inactive queue... */
                   pmegp = pmeg_inactive_queue.tqh_first;
                   if (!pmegp) {
                           /* Try active queue... */
                           pmegp = pmeg_active_queue.tqh_first;
                   }
                   if (!pmegp) {
                           panic("pmeg_allocate: failed");
                   }
                   /* This will put it on the free list. */
                   pmap_remove_range(pmegp->pmeg_owner,
                                                     pmegp->pmeg_va,
                                                     pmegp->pmeg_va + NBSG);
           }
   
           /* OK, free list has something for us to take. */
           pmegp = pmeg_free_queue.tqh_first;
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
           if (!pmegp) {
                   panic("pmeg_allocagte: still none free?");
           }
         if (pmegp->pmeg_qstate != PMEGQ_FREE)          if (pmegp->pmeg_qstate != PMEGQ_FREE)
             panic("pmeg_alloc_inv: bad on free queue: %x", pmegp);                  panic("pmeg_allocate: bad on free queue: %x", pmegp);
 #endif  
     }  
     else if (!TAILQ_EMPTY(&pmeg_inactive_queue)) {  
         TAILQ_REMOVE_FIRST(pmegp, &pmeg_inactive_queue, pmeg_link);  
 #ifdef  PMAP_DEBUG  
         if (pmap_debug & PMD_SEGMAP)  
             printf("pmeg_alloc_inv: take inactive 0x%x\n", pmegp->pmeg_index);  
         if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)  
             panic("pmeg_alloc_inv: bad on inactive queue: %x", pmegp);  
 #endif  
         pmeg_flush(pmegp);  
     }  
     else if (!TAILQ_EMPTY(&pmeg_active_queue)) {  
         TAILQ_REMOVE_FIRST(pmegp, &pmeg_active_queue, pmeg_link);  
 #ifdef  PMAP_DEBUG  
         if (pmap_debug & PMD_SEGMAP)  
             printf("pmeg_alloc_inv: take active %d\n", pmegp->pmeg_index);  
         if (pmegp->pmeg_qstate != PMEGQ_ACTIVE)  
             panic("pmeg_alloc_inv: bad on active queue: %x", pmegp);  
 #endif  #endif
         if (pmegp->pmeg_owner == kernel_pmap)          TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
             panic("pmeg_alloc_invalid: kernel pmeg: %x\n", pmegp);  
         pmap_remove_range(pmegp->pmeg_owner, pmegp->pmeg_va,  
                           pmegp->pmeg_va+NBSG);  
     } else  
         panic("pmeg_allocate_invalid: failed");  
   
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     if (pmegp->pmeg_index == pmap_db_watchpmeg) {          if (pmegp->pmeg_index == pmap_db_watchpmeg) {
         printf("pmeg_alloc_inv: watch pmeg 0x%x\n", pmegp->pmeg_index);                  printf("pmeg_allocate: watch pmeg 0x%x\n", pmegp->pmeg_index);
         Debugger();                  Debugger();
     }          }
 #endif  #endif
 #ifdef  DIAGNOSTIC  #ifdef  DIAGNOSTIC
     if (pmegp->pmeg_index == SEGINV)          if (pmegp->pmeg_index == SEGINV)
         panic("pmeg_alloc_inv: pmeg_index=ff");                  panic("pmeg_allocate: pmeg_index=ff");
     if (pmegp->pmeg_vpages)          if (pmegp->pmeg_vpages)
         panic("pmeg_alloc_inv: vpages!=0, pmegp=%x", pmegp);                  panic("pmeg_allocate: vpages!=0, pmegp=%x", pmegp);
 #endif  #endif
   
     pmegp->pmeg_owner = pmap;          /* Reassign this PMEG for the caller. */
     pmegp->pmeg_owner_version = pmap->pm_version;          pmegp->pmeg_owner = pmap;
     pmegp->pmeg_va = va;          pmegp->pmeg_version = pmap->pm_version;
     pmegp->pmeg_wired_count = 0;          pmegp->pmeg_va = va;
     pmegp->pmeg_reserved  = 0;          pmegp->pmeg_wired = 0;
     pmegp->pmeg_vpages  = 0;          pmegp->pmeg_reserved  = 0;
     if (pmap == kernel_pmap) {          pmegp->pmeg_vpages  = 0;
         TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);          if (pmap == kernel_pmap) {
         pmegp->pmeg_qstate = PMEGQ_KERNEL;                  TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
     } else {                  pmegp->pmeg_qstate = PMEGQ_KERNEL;
         TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);          } else {
         pmegp->pmeg_qstate = PMEGQ_ACTIVE;                  TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
                   pmegp->pmeg_qstate = PMEGQ_ACTIVE;
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
         if (pmap_debug & PMD_SEGMAP) {                  if (pmap_debug & PMD_SEGMAP) {
             printf("pm_segmap: pmap=%x i=%x old=%x new=%x (ainv)\n",                          printf("pm_segmap: pmap=%x i=%x old=%x new=%x (ainv)\n",
                    pmap, VA_SEGNUM(va),                                     pmap, VA_SEGNUM(va),
                    pmap->pm_segmap[VA_SEGNUM(va)],                                     pmap->pm_segmap[VA_SEGNUM(va)],
                    pmegp->pmeg_index);                                     pmegp->pmeg_index);
         }                  }
 #endif  #endif
         pmap->pm_segmap[VA_SEGNUM(va)] = pmegp->pmeg_index;          }
     }          /* Caller will verify that it's empty (if debugging). */
     /* XXX - Make sure pmeg is clean (in caller). */          return pmegp;
     PMEG_UNLK();  
     return pmegp;  
 }  }
   
 /*  /*
  * Put pmeg on the inactive queue.   * Put pmeg on the inactive queue, leaving its contents intact.
  * It will be cleaned out when re-allocated.   * This happens when we loose our context.  We may reclaim
    * this pmeg later if it is still in the inactive queue.
  */   */
 void pmeg_release(pmegp)  static void
      pmeg_t pmegp;  pmeg_release(pmegp)
           pmeg_t pmegp;
 {  {
     if (pmegp->pmeg_owner == kernel_pmap)          CHECK_SPL();
         panic("pmeg_release: kernel_pmap");  
   
     PMEG_LOCK();  
     if (pmegp->pmeg_qstate == PMEGQ_INACTIVE) {  
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
         printf("pmeg_release: already inactive\n");          if (pmegp->pmeg_qstate == PMEGQ_INACTIVE) {
         Debugger();                  printf("pmeg_release: already inactive\n");
                   Debugger();
                   return;
           }
 #endif  #endif
     } else {  
   #ifdef  DIAGNOSTIC
           if (pmegp->pmeg_owner == kernel_pmap)
                   panic("pmeg_release: kernel_pmap");
         if (pmegp->pmeg_qstate != PMEGQ_ACTIVE)          if (pmegp->pmeg_qstate != PMEGQ_ACTIVE)
             panic("pmeg_release: not q_active %x", pmegp);                  panic("pmeg_release: not q_active %x", pmegp);
   #endif
   
         TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);          TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
         pmegp->pmeg_qstate = PMEGQ_NONE;  
         TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);  
         pmegp->pmeg_qstate = PMEGQ_INACTIVE;          pmegp->pmeg_qstate = PMEGQ_INACTIVE;
     }          TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
     PMEG_UNLK();  
 }  }
   
 /*  /*
  * Put pmeg on the free queue.   * Move the pmeg to the free queue from wherever it is.
  * The pmeg might be in kernel_pmap   * The pmeg will be clean.  It might be in kernel_pmap.
  */   */
 void pmeg_release_empty(pmegp, segnum)  static void
      pmeg_t pmegp;  pmeg_free(pmegp, segnum)
      int segnum;          pmeg_t pmegp;
 {          int segnum;
   {
           CHECK_SPL();
   
   #ifdef  PMAP_DEBUG
           /* XXX - Caller should verify that it's empty. */
           if (pmegp->pmeg_vpages != 0)
                   panic("pmeg_free: vpages");
   #endif
   
           switch (pmegp->pmeg_qstate) {
           case PMEGQ_ACTIVE:
                   TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
                   break;
           case PMEGQ_INACTIVE:
                   TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                   break;
           case PMEGQ_KERNEL:
                   TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
                   break;
           default:
                   panic("pmeg_free: releasing bad pmeg");
                   break;
           }
   
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     /* XXX - Caller should verify that it's empty. */          if (pmegp->pmeg_index == pmap_db_watchpmeg) {
     if (pmegp->pmeg_vpages != 0)                  printf("pmeg_free: watch pmeg 0x%x\n",
         panic("pmeg_release_empty: vpages");                             pmegp->pmeg_index);
                   Debugger();
           }
 #endif  #endif
   
     PMEG_LOCK();          pmegp->pmeg_owner = NULL;
     switch (pmegp->pmeg_qstate) {          pmegp->pmeg_qstate = PMEGQ_FREE;
     case PMEGQ_ACTIVE:          TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
         TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);  }
         break;  
     case PMEGQ_INACTIVE:  /*
         TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link); /* XXX */   * Find a PMEG that was put on the inactive queue when we
         break;   * had our context stolen.  If found, move to active queue.
     case PMEGQ_KERNEL:   */
         TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);  static pmeg_t
         break;  pmeg_cache(pmap, va)
     default:          pmap_t pmap;
         panic("pmeg_release_empty: releasing bad pmeg");          vm_offset_t va;
         break;  {
     }          int segnum;
           pmeg_t pmegp;
   
           CHECK_SPL();
   
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     if (pmegp->pmeg_index == pmap_db_watchpmeg) {          if (pmap == kernel_pmap)
         printf("pmeg_release_empty: watch pmeg 0x%x\n",                  panic("pmeg_cache: kernel_pmap");
                pmegp->pmeg_index);  
         Debugger();  
     }  
 #endif  #endif
     pmegp->pmeg_qstate = PMEGQ_NONE;  
   
     if (pmegp->pmeg_owner->pm_segmap) {          if (pmap->pm_segmap == NULL)
                   return PMEG_NULL;
           segnum = VA_SEGNUM(va);
           if (segnum > NUSEG)             /* out of range */
                   return PMEG_NULL;
           if (pmap->pm_segmap[segnum] == SEGINV)  /* nothing cached */
                   return PMEG_NULL;
   
           pmegp = pmeg_p(pmap->pm_segmap[segnum]);
   
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
         if (pmap_debug & PMD_SEGMAP) {          if (pmegp->pmeg_index == pmap_db_watchpmeg) {
             printf("pm_segmap: pmap=%x i=%x old=%x new=ff (rele)\n",                  printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
                    pmegp->pmeg_owner, segnum,                  Debugger();
                    pmegp->pmeg_owner->pm_segmap[segnum]);  
         }          }
 #endif  #endif
         pmegp->pmeg_owner->pm_segmap[segnum] = SEGINV;  
     }          /*
            * Our segmap named a PMEG.  If it is no longer ours,
            * invalidate that entry in our segmap and return NULL.
            */
           if ((pmegp->pmeg_owner != pmap) ||
                   (pmegp->pmeg_version != pmap->pm_version) ||
                   (pmegp->pmeg_va != va))
           {
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     else {                  printf("pm_segmap: pmap=%x i=%x old=%x new=ff (cach)\n",
         if (pmegp->pmeg_owner != kernel_pmap) {                             pmap, segnum, pmap->pm_segmap[segnum]);
             printf("pmeg_release_empty: null segmap\n");                  Debugger(); /* XXX */
             Debugger();  #endif
                   pmap->pm_segmap[segnum] = SEGINV;
                   return PMEG_NULL; /* cache lookup failed */
         }          }
     }  
   #ifdef  PMAP_DEBUG
           /* Make sure it is on the inactive queue. */
           if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
                   panic("pmeg_cache: pmeg was taken: %x", pmegp);
 #endif  #endif
     pmegp->pmeg_owner = NULL;   /* XXX - paranoia */  
   
     TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);          TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
     pmegp->pmeg_qstate = PMEGQ_FREE;          pmegp->pmeg_qstate = PMEGQ_ACTIVE;
           TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
   
     PMEG_UNLK();          return pmegp;
 }  }
   
 pmeg_t pmeg_cache(pmap, va)  static void
      pmap_t pmap;  pmeg_set_wiring(pmegp, va, flag)
      vm_offset_t va;          pmeg_t pmegp;
           vm_offset_t va;
           int flag;
 {  {
     int segnum;          int idx, mask;
     pmeg_t pmegp;  
   
 #ifdef  PMAP_DEBUG          CHECK_SPL();
     if (pmap == kernel_pmap)          idx = VA_PTE_NUM(va);
         panic("pmeg_cache: kernel_pmap");          mask = 1 << idx;
 #endif  
   
     if (pmap->pm_segmap == NULL)          if (flag)
         return PMEG_NULL;                  pmegp->pmeg_wired |= mask;
     segnum = VA_SEGNUM(va);          else
     if (segnum > NUSEG)         /* out of range */                  pmegp->pmeg_wired &= ~mask;
         return PMEG_NULL;  }
     if (pmap->pm_segmap[segnum] == SEGINV)      /* nothing cached */  
         return PMEG_NULL;  void
   sun3_pmeg_init()
   {
           int x;
   
     pmegp = pmeg_p(pmap->pm_segmap[segnum]);          /* clear pmeg array, put it all on the free pmeq queue */
   
 #ifdef  PMAP_DEBUG          TAILQ_INIT(&pmeg_free_queue);
     if (pmegp->pmeg_index == pmap_db_watchpmeg) {          TAILQ_INIT(&pmeg_inactive_queue);
         printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);          TAILQ_INIT(&pmeg_active_queue);
         Debugger();          TAILQ_INIT(&pmeg_kernel_queue);
     }  
 #endif  
   
     /* Found a valid pmeg, make sure it's still ours. */          bzero(pmeg_array, NPMEG*sizeof(struct pmeg_state));
     if ((pmegp->pmeg_owner != pmap) ||          for (x =0 ; x<NPMEG; x++) {
         (pmegp->pmeg_owner_version != pmap->pm_version) ||                  TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x],
         (pmegp->pmeg_va != va))                                    pmeg_link);
     {                  pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
 #ifdef  PMAP_DEBUG                  pmeg_array[x].pmeg_index = x;
         if (pmap_debug & PMD_SEGMAP) {  
             printf("pm_segmap: pmap=%x i=%x old=%x new=ff (cach)\n",  
                    pmap, segnum, pmap->pm_segmap[segnum]);  
         }          }
         /* XXX - Make sure it's not in the MMU? */  
         if (pmap->pm_context) {  
             int c, sme;  
             c = get_context();  
             set_context(pmap->pm_context->context_num);  
             sme = get_segmap(va);  
             set_context(c);  
             if (sme != SEGINV)  
                 panic("pmeg_cache: about to orphan pmeg");  
         }  
 #endif  
         pmap->pm_segmap[segnum] = SEGINV;  
         return PMEG_NULL; /* cache lookup failed */  
     }  
   
     PMEG_LOCK();  
 #ifdef  PMAP_DEBUG  
     /* Make sure it is on the inactive queue. */  
     if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)  
         panic("pmeg_cache: pmeg was taken: %x", pmegp);  
 #endif  
     TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);  
     pmegp->pmeg_qstate = PMEGQ_NONE;  
     TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);  
     pmegp->pmeg_qstate = PMEGQ_ACTIVE;  
   
     PMEG_UNLK();          /* The last pmeg is not usable. */
     return pmegp;          sun3_reserve_pmeg(SEGINV);
 }  }
   
 void pmeg_wire(pmegp)  #ifdef  PMAP_DEBUG
      pmeg_t pmegp;  static void
 {  pv_print(pa)
     pmegp->pmeg_wired_count++;          vm_offset_t pa;
 }  
 void pmeg_unwire(pmegp)  
      pmeg_t pmegp;  
 {  {
     pmegp->pmeg_wired_count--;          pv_entry_t pv;
 }  
   
 void pmeg_init()          if (!pv_initialized)
 {                  return;
     int x;  
           pv = pa_to_pvp(pa);
           printf("pv_list for pa %x: flags=%x\n", pa, pv->pv_flags);
           while (pv) {
                   printf("pv_entry %x pmap %x va %x next %x\n",
                              pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
                   pv = pv->pv_next;
           }
   }
   #endif  /* PMAP_DEBUG */
   
     /* clear pmeg array, put it all on the free pmeq queue */  static int
   pv_compute_cache(head)
           pv_entry_t head;
   {
           pv_entry_t pv;
           int cread, cwrite, ccache, clen;
   
           if (!pv_initialized) return 0;
           cread = cwrite = ccache = clen = 0;
           if (!head->pv_pmap) return 0;
           for (pv = head; pv != NULL; pv=pv->pv_next) {
                   cread++;
                   pv->pv_flags & PV_WRITE ? cwrite++ : 0 ;
                   pv->pv_flags & PV_WRITE ? ccache++ : 0 ;
                   if (ccache) return PV_NC;
           }
           if ((cread==1) || (cwrite ==0)) return 0;
           return PV_NC;
   }
   
     TAILQ_INIT(&pmeg_free_queue);  /*
     TAILQ_INIT(&pmeg_inactive_queue);   * Set or clear bits in all PTEs mapping a page.
     TAILQ_INIT(&pmeg_active_queue);   * Also does syncflags work while we are there...
     TAILQ_INIT(&pmeg_kernel_queue);   */
   static void
   pv_changepte(head, set_bits, clear_bits)
           pv_entry_t head;
           int set_bits;
           int clear_bits;
   {
           pv_entry_t pv;
           pmap_t pmap;
           vm_offset_t va;
           int pte, sme, s;
           int saved_ctx;
           boolean_t in_ctx;
   
           if (!pv_initialized)
                   return;
           if (head->pv_pmap == NULL)
                   return;
           if ((set_bits == 0) && (clear_bits == 0))
                   return;
   
           s = splpmap();
           saved_ctx = get_context();
   
           for (pv = head; pv != NULL; pv = pv->pv_next) {
                   pmap = pv->pv_pmap;
                   va = pv->pv_va;
   #ifdef  DIAGNOSTIC
                   /*
                    * Only the head may have a null pmap, and
                    * we checked for that above.
                    */
                   if (pmap == NULL)
                           panic("pv_changepte: null pmap");
   #endif
   
                   /* Is the PTE currently accessable in some context? */
                   in_ctx = FALSE;
                   if (pmap == kernel_pmap)
                           in_ctx = TRUE;
                   else if (has_context(pmap)) {
                           /* PMEG may be inactive. */
                           set_context(pmap->pm_ctxnum);
                           sme = get_segmap(va);
                           if (sme != SEGINV)
                                   in_ctx = TRUE;
                   }
   
                   if (in_ctx == TRUE) {
                           /*
                            * The PTE is in the current context.
                            */
   #ifdef  HAVECACHE
                           /* Make sure pte is up-to-date */
                           /* XXX should flush only when necessary */
                           cache_flush_page(va);
   #endif
                           pte = get_pte(va);
                   } else {
                           /*
                            * The PTE is not in any context.
                            */
                           if (pmap->pm_segmap == NULL)
                                   panic("pv_changepte: null segmap");
                           sme = pmap->pm_segmap[VA_SEGNUM(va)];
                           if (sme == SEGINV)
                                   panic("pv_changepte: SEGINV");
                           pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   }
   
     bzero(pmeg_array, NPMEG*sizeof(struct pmeg_state));  #ifdef  DIAGNOSTIC
     for (x =0 ; x<NPMEG; x++) {                  /*
             TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x],                   * PV entries point only to valid mappings.
                               pmeg_link);                   * XXX - Make sure pv_unlink() was done...
             pmeg_array[x].pmeg_qstate = PMEGQ_FREE;                   */
             pmeg_array[x].pmeg_index = x;                  if ((pte & PG_VALID) == 0)
     }                          panic("pv_changepte: not PG_VALID at va=0x%x\n", va);
   #endif
                   /* Get these while it's easy. */
                   if (pte & PG_MODREF) {
                           head->pv_flags |= (pte & PG_MODREF);
                           pte &= ~PG_MODREF;
                   }
   
                   /* Finally, set and clear some bits. */
                   pte |= set_bits;
                   pte &= ~clear_bits;
   
                   if (in_ctx == TRUE) {
                           set_pte(va, pte);
                   } else {
                           set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   }
           }
   
     /* The last pmeg is not usable. */          set_context(saved_ctx);
     pmeg_steal(SEGINV);          splx(s);
           return;
 }  }
   
 void pv_print(pa)  /*
     vm_offset_t pa;   * Sync ref and mod bits in pvlist
 {   * (turns off same in hardware PTEs).
     pv_entry_t head;   */
   static void
   pv_syncflags(head)
           pv_entry_t head;
   {
           pv_entry_t pv;
           pmap_t pmap;
           vm_offset_t va;
           int pte, sme, s;
           int saved_ctx;
           boolean_t in_ctx;
   
           if (!pv_initialized)
                   return;
           if (head->pv_pmap == NULL)
                   return;
   
           PMAP_LOCK();
           saved_ctx = get_context();
   
           for (pv = head; pv != NULL; pv = pv->pv_next) {
                   pmap = pv->pv_pmap;
                   va = pv->pv_va;
   #ifdef  DIAGNOSTIC
                   /*
                    * Only the head may have a null pmap, and
                    * we checked for that above.
                    */
                   if (pmap == NULL)
                           panic("pv_syncflags: null pmap");
   #endif
   
                   /* Is the PTE currently accessable in some context? */
                   in_ctx = FALSE;
                   if (pmap == kernel_pmap)
                           in_ctx = TRUE;
                   else if (has_context(pmap)) {
                           /* PMEG may be inactive. */
                           set_context(pmap->pm_ctxnum);
                           sme = get_segmap(va);
                           if (sme != SEGINV)
                                   in_ctx = TRUE;
                   }
   
                   if (in_ctx == TRUE) {
                           /*
                            * The PTE is in the current context.
                            */
   #ifdef  HAVECACHE
                           /* Make sure pte is up-to-date */
                           /* XXX should flush only when necessary */
                           cache_flush_page(va);
   #endif
                           pte = get_pte(va);
                   } else {
                           /*
                            * The PTE is not in any context.
                            * XXX - Consider syncing MODREF bits
                            * when the PMEG looses its context?
                            */
                           if (pmap->pm_segmap == NULL)
                                   panic("pv_syncflags: null segmap");
                           sme = pmap->pm_segmap[VA_SEGNUM(va)];
                           if (sme == SEGINV)
                                   panic("pv_syncflags: SEGINV");
                           pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   }
   
   #ifdef  DIAGNOSTIC
                   /*
                    * PV entries point only to valid mappings.
                    * XXX - Make sure pv_unlink() was done...
                    */
                   if ((pte & PG_VALID) == 0)
                           panic("pv_syncflags: not PG_VALID at va=0x%x\n", va);
   #endif
                   /* OK, do what we came here for... */
                   if (pte & PG_MODREF) {
                           head->pv_flags |= (pte & PG_MODREF);
                           pte &= ~PG_MODREF;
                   }
   
                   if (in_ctx == TRUE) {
                           set_pte(va, pte);
                   } else {
                           set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   }
           }
   
     printf("pv_list for pa %x\n", pa);          set_context(saved_ctx);
   
     if (!pv_initialized) return;          PMAP_UNLOCK();
     head = pa_to_pvp(pa);  
     if (!head->pv_pmap) {  
         printf("empty pv_list\n");  
         return;          return;
     }  
     for (; head != NULL; head = head->pv_next) {  
         printf("pv_entry %x pmap %x va %x flags %x next %x\n",  
                head, head->pv_pmap,  
                head->pv_va,  
                head->pv_flags,  
                head->pv_next);  
     }  
 }  
   
 unsigned char pv_compute_cache(head)  
      pv_entry_t head;  
 {  
     pv_entry_t pv;  
     int cread, cwrite, ccache, clen;  
   
     if (!pv_initialized) return 0;  
     cread = cwrite = ccache = clen = 0;  
     if (!head->pv_pmap) return 0;  
     for (pv = head; pv != NULL; pv=pv->pv_next) {  
         cread++;  
         pv->pv_flags & PV_WRITE ? cwrite++ : 0 ;  
         pv->pv_flags & PV_WRITE ? ccache++ : 0 ;  
         if (ccache) return PV_NC;  
     }  
     if ((cread==1) || (cwrite ==0)) return 0;  
     return PV_NC;  
 }  
   
 int pv_compute_modified(head)  
      pv_entry_t head;  
 {  
     pv_entry_t pv;  
     int modified;  
     vm_offset_t pte;  
     unsigned int seg;  
   
     if (!pv_initialized) return 0;  
     if (!head->pv_pmap) return 0;  
     modified = 0;  
     for (pv = head; pv != NULL; pv=pv->pv_next) {  
         if (pv->pv_pmap == kernel_pmap) {  
             pte = get_pte(pv->pv_va);  
             if (pte & PG_MOD) return 1;  
             continue;  
         }  
         seg = VA_SEGNUM(pv->pv_va);  
         if (pv->pv_pmap->pm_segmap == NULL) {  
 #ifdef  PMAP_DEBUG  
             if (pmap_debug & PMD_SEGMAP) {  
                 printf("pv_compute_modified: null segmap\n");  
                 Debugger();     /* XXX */  
             }  
 #endif  
             continue;  
         }  
         if (pv->pv_pmap->pm_segmap[seg] == SEGINV)  
             continue;  
         if (get_pte_val(pv->pv_pmap, pv->pv_va,&pte)) {  
             if (!(pte & PG_VALID)) continue;  
             if (pte & PG_MOD) return 1;  
         }  
     }  
     return 0;  
 }  }
   
   
 /* pv_entry support routines */  /* pv_entry support routines */
 void pv_remove_all(pa)  static void
      vm_offset_t pa;  pv_remove_all(pa)
 {          vm_offset_t pa;
     pv_entry_t pv;  {
     pmap_t pmap;          pv_entry_t pv;
     vm_offset_t va;          pmap_t pmap;
           vm_offset_t va;
   
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
     if (pmap_debug & PMD_REMOVE)          if (pmap_debug & PMD_REMOVE)
         printf("pv_remove_all(%x)\n", pa);                  printf("pv_remove_all(%x)\n", pa);
 #endif  #endif
     if (!pv_initialized)          if (!pv_initialized)
         return;                  return;
   
     for (;;) {          /* The PV head never moves. */
         pv = pa_to_pvp(pa);          pv = pa_to_pvp(pa);
         if (pv->pv_pmap == NULL)          while (pv->pv_pmap != NULL) {
             break;                  pmap = pv->pv_pmap;
         pmap = pv->pv_pmap;                  va   = pv->pv_va;
         va   = pv->pv_va;                  pmap_remove_range(pmap, va, va + NBPG);
         /*  #ifdef PMAP_DEBUG
          * XXX - Have to do pv_unlink here because the call                  /* Make sure it went away. */
          * pmap_remove_range might not unlink the pv entry.                  if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
          * This also implies extra pv_unlink calls...                  {
          */                          printf("pv_remove_all: head unchanged for pa=0x%x\n", pa);
         pv_unlink(pmap, pa, va);                          Debugger();
         pmap_remove_range(pmap, va, va + NBPG);                  }
         /* The list was modified, so restart at head. */  #endif
     }          }
 }  }
   
 unsigned char pv_link(pmap, pa, va, flags)  /*
      pmap_t pmap;   * The pmap system is asked to look all mappings that point to a
      vm_offset_t pa, va;   * given physical memory address.  This function adds a new element
      unsigned char flags;   * to the list of mappings maintained for the given physical address.
 {   * Returns PG_NC if the (new) pvlist says that the address cannot
     unsigned char nflags;   * be cached.
     pv_entry_t last,pv,head,npv;   */
     int s;  static int
   pv_link(pmap, pa, va, flags)
           pmap_t pmap;
           vm_offset_t pa, va;
           u_int flags;
   {
           pv_entry_t head, npv;
   
           if (!pv_initialized)
                   return 0;
   
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
     if ((pmap_debug & PMD_LINK) ||          if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
         (va == pmap_db_watchva))                  printf("pv_link(%x, %x, %x, %x)\n", pmap, pa, va, flags);
     {                  /* pv_print(pa); */
         printf("pv_link(%x, %x, %x, %x)\n", pmap, pa, va, flags);          }
         /* pv_print(pa); */  #endif
     }  
 #endif  
     if (!pv_initialized) return 0;  
     PMAP_LOCK();  
     head = pv = pa_to_pvp(pa);  
     if (pv->pv_pmap == NULL) {  /* not currently "managed" */  
         pv->pv_va = va;  
         pv->pv_pmap = pmap,  
         pv->pv_next = NULL;  
         pv->pv_flags = flags;  
         force_cache_flags(pa, flags);  
         PMAP_UNLOCK();  
         return flags & PV_NC;  
     }  
     for (npv = pv ; npv != NULL; last= npv, npv = npv->pv_next ) {  
         if ((npv->pv_pmap != pmap) || (npv->pv_va != va)) continue;  
         if (flags == npv->pv_flags) {/* no change */  
             PMAP_UNLOCK();  
             return get_cache_flags(pa);  
         }  
         npv->pv_flags = flags;  
         goto recompute;  
     }  
 /*zalloc(pv_zone);*/  
     pv = malloc(sizeof(struct pv_entry), M_VMPVENT, M_WAITOK);  
     pv->pv_va = va;  
     pv->pv_pmap = pmap,  
     pv->pv_next = NULL;  
     pv->pv_flags = flags;  
     last->pv_next = pv;  
   
  recompute:  
     if (get_cache_flags(pa) & PG_NC) return flags & PV_NC; /* already NC */  
     if (flags & PV_NC) {        /* being NCed, wasn't before */  
         force_cache_flags(pa, flags);  
         pv_change_pte(head, MAKE_PV_REAL(PV_NC), 0);  
         PMAP_UNLOCK();  
         return flags & PV_NC;  
     }  
     nflags = pv_compute_cache(head);  
     force_cache_flags(pa, nflags);  
     pv_change_pte(head, MAKE_PV_REAL(nflags), 0); /*  */  
     PMAP_UNLOCK();  
     return nflags & PV_NC;  
 }  
   
 void pv_change_pte(pv_list, set_bits, clear_bits)  
      pv_entry_t pv_list;  
      vm_offset_t set_bits;  
      vm_offset_t clear_bits;  
 {  
     pv_entry_t pv;  
     int context;  
     vm_offset_t pte;  
     unsigned int seg;  
   
     if (!pv_initialized) return;          CHECK_SPL();
     if (pv_list->pv_pmap == NULL) /* nothing to hack on */  
         return;          /* Only the non-cached bit is of interest. */
     if (!set_bits && !clear_bits) return; /* nothing to do */          flags = flags & PV_NC;
     context = get_context();  
     for (pv = pv_list; pv != NULL; pv=pv->pv_next) {          head = pa_to_pvp(pa);
         if (pv->pv_pmap == NULL)          if (head->pv_pmap == NULL) {
             panic("pv_list corrupted");                  /* not currently mapped anywhere */
         if (pv->pv_pmap == kernel_pmap) {                  /* pmap_stats.ps_enter_firstpv++; */
             pte = get_pte(pv->pv_va);                  head->pv_va = va;
             pte|=set_bits;                  head->pv_pmap = pmap,
             pte&=~clear_bits;                  head->pv_next = NULL;
             set_pte(pv->pv_va, pte);                  head->pv_flags = flags;
             continue;                  return (flags);
         }          }
         seg = VA_SEGNUM(pv->pv_va);  
         if (pv->pv_pmap->pm_segmap == NULL) {  
 #if 0 /* def    PMAP_DEBUG */  
             if (pmap_debug & PMD_SEGMAP) {  
                 printf("pv_change_pte: null segmap\n");  
                 Debugger();     /* XXX */  
             }  
 #endif  
             continue;  
         }  
         if (pv->pv_pmap->pm_segmap[seg] == SEGINV)  
             continue;  
         if (!get_pte_val(pv->pv_pmap, pv->pv_va, &pte))  
             continue;  
         pte|=set_bits;  
         pte&=~clear_bits;  
         set_pte_val(pv->pv_pmap, pv->pv_va, pte);  
     }  
     set_context(context);  
 }  
   
 void pv_unlink(pmap, pa, va)  
      pmap_t pmap;  
      vm_offset_t pa, va;  
 {  
     pv_entry_t pv,pv_list,dead,last;  
     unsigned char saved_flags,nflags;  
   
     if (!pv_initialized) return;  
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
     if ((pmap_debug & PMD_UNLINK) ||          /* XXX - See if this mapping is already in the list? */
         (va == pmap_db_watchva))          for (npv = head; npv != NULL; npv = npv->pv_next) {
     {                  if ((npv->pv_pmap == pmap) && (npv->pv_va == va))
         printf("pv_unlink(%x, %x, %x)\n", pmap, pa, va);                          panic("pv_link: duplicate entry for PA=0x%x", pa);
     }          }
 #endif  #endif
     pv_list = pa_to_pvp(pa);  
   
     if (pv_list->pv_pmap == NULL) {          /*
            * Before entering the new mapping, see if it will cause
            * old mappings to become aliased (need cache inhibit).
            */
           /* pmap_stats.ps_enter_secondpv++; */
   
           head->pv_flags |= flags;
           if ((head->pv_flags & PV_NC) == 0) {
                   for (npv = head; npv != NULL; npv = npv->pv_next) {
                           if (BADALIAS(va, npv->pv_va)) {
                                   head->pv_flags |= PV_NC;
                                   pv_changepte(head, PG_NC, 0);
                                   break;
                           }
                   }
           }
           npv = (pv_entry_t) malloc(sizeof(*npv), M_VMPVENT, M_WAITOK);
           npv->pv_next = head->pv_next;
           npv->pv_pmap = pmap;
           npv->pv_va   = va;
           head->pv_next = npv;
   
           return (head->pv_flags & PV_NC);
   }
   
   /*
    * pv_unlink is a helper function for pmap_remove.
    * It removes the appropriate (pmap, pa, va) entry.
    *
    * Once the entry is removed, if the pv_table head has the cache
    * inhibit bit set, see if we can turn that off; if so, walk the
    * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
    * definition nonempty, since it must have at least two elements
    * in it to have PV_NC set, and we only remove one here.)
    */
   static void
   pv_unlink(pmap, pa, va)
           pmap_t pmap;
           vm_offset_t pa, va;
   {
           pv_entry_t head, npv;
   
           if (!pv_initialized)
                   return;
   
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
         if (pmap_debug & PMD_UNLINK) {          if ((pmap_debug & PMD_UNLINK) ||
             printf("pv_unlink: empty list\n");                  (va == pmap_db_watchva))
           {
                   printf("pv_unlink(%x, %x, %x)\n", pmap, pa, va);
         }          }
 #endif  #endif
         return;          head = pa_to_pvp(pa);
     }  
   #ifdef DIAGNOSTIC
           if (head->pv_pmap == NULL)
                   panic("pv_unlink: empty list");
   #endif
   
     for (pv = pv_list, last = NULL; pv != NULL; last=pv, pv=pv->pv_next) {  
         if ((pv->pv_pmap != pmap) || (pv->pv_va != va))  
             continue;  
         /*          /*
          * right entry now how do we "remove" it, pte will be removed           * First entry is special (sigh).
          * by code above us. so we should only do deletes, and maybe some cache  
          * recomputation  
          */           */
         saved_flags = pv->pv_flags;          npv = head->pv_next;
         if (pv == pv_list) {    /* first entry, mega annoying */          if (head->pv_pmap == pmap && head->pv_va == va) {
             if (pv->pv_next) {  /* is something after us, if so copy it                  /* pmap_stats.ps_unlink_pvfirst++; */
                                    forward */                  if (npv != NULL) {
                 dead = pv->pv_next;                          /* Copy next entry into (fixed) head. */
                 pv->pv_va = dead->pv_va;                          head->pv_next = npv->pv_next;
                 pv->pv_pmap = dead->pv_pmap;                          head->pv_pmap = npv->pv_pmap;
                 pv->pv_next = dead->pv_next;                          head->pv_va   = npv->pv_va;
                 pv->pv_flags = dead->pv_flags;                          free((caddr_t)npv, M_VMPVENT);
                 /* zfree*/                  } else {
                 free(dead, M_VMPVENT);                          /* No next entry, list is now empty. */
             }                          head->pv_pmap = NULL;
             else                  }
                 pv->pv_pmap = NULL; /* enough to invalidate */          } else {
         }                  register pv_entry_t prev;
         else {  
             last->pv_next = pv->pv_next;                  for (prev = head;; prev = npv, npv = npv->pv_next) {
             /*      zfree(pv);*/                          /* pmap_stats.ps_unlink_pvsearch++; */
             free(pv, M_VMPVENT);                          if (npv == NULL)
         }                                  panic("pv_unlink: not on list");
         if (saved_flags & PV_NC) {/* this entry definitely caused a NC                          if (npv->pv_pmap == pmap && npv->pv_va == va)
                                   * condition.                                  break;
                                   */                  }
             nflags = pv_compute_cache(pv_list);                  prev->pv_next = npv->pv_next;
             set_cache_flags(pa, nflags);                  free((caddr_t)npv, M_VMPVENT);
             pv_change_pte(pv_list, MAKE_PV_REAL(nflags), 0); /*  */          }
   
           if (head->pv_flags & PV_NC) {
                   /*
                    * Not cached: check to see if we can fix that now.
                    */
                   va = head->pv_va;
                   for (npv = head->pv_next; npv != NULL; npv = npv->pv_next)
                           if (BADALIAS(va, npv->pv_va))
                                   return;
                   head->pv_flags &= ~PV_NC;
                   pv_changepte(head, 0, PG_NC);
         }          }
         return;  
     }  
 #ifdef PMAP_DEBUG  
     if (pmap_debug & PMD_UNLINK) {  
         printf("pv_unlink: not on list\n");  
     }  
 #endif  
 }  
 void pv_init()  
 {  
     int i;  
   
     pv_head_table = (pv_entry_t) kmem_alloc(kernel_map,  
                                             PA_PGNUM(avail_end) *  
                                             sizeof(struct pv_entry));  
     if (!pv_head_table)  
         mon_panic("pmap: kmem_alloc() of pv table failed");  
     for (i = 0; i < PA_PGNUM(avail_end); i++) { /* dumb XXX*/  
         bzero(&pv_head_table[i], sizeof(struct pv_entry));  
     }  
     pv_modified_table = (unsigned char *) kmem_alloc(kernel_map,  
                                                      PA_PGNUM(avail_end)*  
                                                      sizeof(char));  
     if (!pv_modified_table)  
         mon_panic("pmap: kmem_alloc() of pv modified table failed");  
   
     bzero(pv_modified_table, sizeof(char)* PA_PGNUM(avail_end));  
     pv_cache_table = (unsigned char *) kmem_alloc(kernel_map,  
                                                   PA_PGNUM(avail_end) *  
                                                   sizeof(char));  
     if (!pv_cache_table)  
         mon_panic("pmap: kmem_alloc() of pv cache table failed");  
     bzero(pv_cache_table, sizeof(char)* PA_PGNUM(avail_end));  
     pv_initialized++;  
 }  }
   
 void sun3_protection_init()  static void
   pv_init()
   {
           int sz;
   
           sz = PA_PGNUM(avail_end);
           sz *= sizeof(struct pv_entry);
   
           pv_head_table = (pv_entry_t) kmem_alloc(kernel_map, sz);
           if (!pv_head_table)
                   mon_panic("pmap: kmem_alloc() of pv table failed");
           bzero((caddr_t) pv_head_table, sz);
   
           pv_initialized++;
   }
   
   void
   sun3_protection_init()
 {  {
     unsigned int *kp, prot;          unsigned int *kp, prot;
   
     kp = protection_converter;          kp = protection_converter;
     for (prot = 0; prot < 8; prot++) {          for (prot = 0; prot < 8; prot++) {
         switch (prot) {                  switch (prot) {
             /* READ WRITE EXECUTE */                          /* READ WRITE EXECUTE */
         case VM_PROT_NONE |VM_PROT_NONE |VM_PROT_NONE:                  case VM_PROT_NONE |VM_PROT_NONE |VM_PROT_NONE:
             *kp++ = PG_INVAL;                          *kp++ = PG_INVAL;
             break;                          break;
         case VM_PROT_NONE |VM_PROT_NONE |VM_PROT_EXECUTE:                  case VM_PROT_NONE |VM_PROT_NONE |VM_PROT_EXECUTE:
         case VM_PROT_READ |VM_PROT_NONE |VM_PROT_NONE:                  case VM_PROT_READ |VM_PROT_NONE |VM_PROT_NONE:
         case VM_PROT_READ |VM_PROT_NONE |VM_PROT_EXECUTE:                  case VM_PROT_READ |VM_PROT_NONE |VM_PROT_EXECUTE:
             *kp++ = PG_VALID;                          *kp++ = PG_VALID;
             break;                          break;
         case VM_PROT_NONE |VM_PROT_WRITE |VM_PROT_NONE:                  case VM_PROT_NONE |VM_PROT_WRITE |VM_PROT_NONE:
         case VM_PROT_NONE |VM_PROT_WRITE |VM_PROT_EXECUTE:                  case VM_PROT_NONE |VM_PROT_WRITE |VM_PROT_EXECUTE:
         case VM_PROT_READ |VM_PROT_WRITE |VM_PROT_NONE:                  case VM_PROT_READ |VM_PROT_WRITE |VM_PROT_NONE:
         case VM_PROT_READ |VM_PROT_WRITE |VM_PROT_EXECUTE:                  case VM_PROT_READ |VM_PROT_WRITE |VM_PROT_EXECUTE:
             *kp++ = PG_VALID|PG_WRITE;                          *kp++ = PG_VALID|PG_WRITE;
             break;                          break;
                   }
         }          }
     }  
 }  }
 /* pmap maintenance routines */  /* pmap maintenance routines */
   
 void pmap_common_init(pmap)  void
      pmap_t pmap;  pmap_common_init(pmap)
           pmap_t pmap;
   {
           bzero(pmap, sizeof(struct pmap));
           pmap->pm_refcount=1;
           pmap->pm_version = pmap_version++;
           pmap->pm_ctxnum = CTXINVAL;
           simple_lock_init(&pmap->pm_lock);
   }
   
   /*
    * Prepare the kernel for VM operations.
    * This is called by sun3_startup:sun3_bootstrap()
    * after the "start/end" globals are set.
    */
   void
   pmap_bootstrap()
   {
           extern void vm_set_page_size();
   
           /*
            * Reserve a segment for the kernel to use to access a pmeg
            * that is not currently mapped into any context/segmap.
            * The kernel temporarily maps such a pmeg into this segment.
            */
           temp_seg_va = virtual_avail;
           virtual_avail += NBSG;
   #ifdef  DEBUG
           if (temp_seg_va & SEGOFSET)
                   mon_panic("pmap_bootstrap: temp_seg_va");
   #endif
   
           /* Initialization for pmap_next_page() */
           avail_next = avail_start;
   
           PAGE_SIZE = NBPG;
           vm_set_page_size();
   
           sun3_protection_init();
   
           /* after setting up some structures */
   
           kernel_pmap = &kernel_pmap_store;
           pmap_common_init(kernel_pmap);
   
           context_init();
   
           pmeg_clean_free();
   }
   
   /*
    * For our convenience, vm_page.c implements:
    *       pmap_startup(), pmap_steal_memory()
    * using the functions:
    *       pmap_virtual_space(), pmap_free_pages(), pmap_next_page(),
    * which are much simpler to implement.
    */
   
   /*
    * How much virtual space does this kernel have?
    * (After mapping kernel text, data, etc.)
    */
   void
   pmap_virtual_space(v_start, v_end)
           vm_offset_t *v_start;
           vm_offset_t *v_end;
 {  {
     bzero(pmap, sizeof(struct pmap));          *v_start = virtual_avail;
     pmap->pm_refcount=1;          *v_end   = virtual_end;
     pmap->pm_version = pmap_version++;  
     simple_lock_init(&pmap->pm_lock);  
 }  }
   
 void pmap_bootstrap()  /*
    * How many physical pages will pmap_next_page() return?
    * One may return a number larger that the actual number of
    * free pages if that is convenient (i.e. physical memory
    * might have some small holes not worth counting here).
    * The return value is used to allocate per-page arrays.
    */
   u_int
   pmap_free_pages()
 {  {
     extern void vm_set_page_size();          int i, bytes;
   
     PAGE_SIZE = NBPG;          bytes = avail_end - avail_start;
     vm_set_page_size();          return(sun3_btop(bytes));
   }
   
     sun3_protection_init();  /*
    * If there are still physical pages available, put the address of
    * the next available one at paddr and return non-zero, otherwise
    * return zero to indicate that there are no more free pages.
    */
   int
   pmap_next_page(paddr)
           vm_offset_t *paddr;
   {
           /* Used up all ranges? */
           if (avail_next >= avail_end)
                   return FALSE;
   
    /* after setting up some structures */          /* Have memory, will return TRUE */
           *paddr = avail_next;
           avail_next += NBPG;
   
     kernel_pmap = &kernel_pmap_store;          /* Is it time to skip over the hole? */
     pmap_common_init(kernel_pmap);          if (avail_next == hole_start)
                   avail_next += sun3_round_page(hole_size);
   
     /* pmeg_init(); done in sun3_vm_init() */          return TRUE;
     context_init();  
   
     pmeg_clean_free();  
 }  }
   
 /*  /*
  * Bootstrap memory allocator. This function allows for early dynamic   * pmap_page_index()
  * memory allocation until the virtual memory system has been bootstrapped.  
  * After that point, either kmem_alloc or malloc should be used. This  
  * function works by stealing pages from the (to be) managed page pool,  
  * stealing virtual address space, then mapping the pages and zeroing them.  
  *   *
  * It should be used from pmap_bootstrap till vm_page_startup, afterwards   *   Given a physical address, return the page number that it is in
  * it cannot be used, and will generate a panic if tried. Note that this   *   the block of free memory.  Return -1 if not in managed range.
  * memory will never be freed, and in essence it is wired down.   *
    *   There can be some values that we never return (i.e. a hole)
    *   as long as the pages in the hole were not given to the VM code
    *   by pmap_next_page() and our return values are less than the
    *   value that was returned by pmap_free_pages().
  */   */
 void *  u_long
 pmap_bootstrap_alloc(size)  pmap_page_index(pa)
         int size;          vm_offset_t pa;
 {  {
         register void *mem;          u_long idx;
   
         extern boolean_t vm_page_startup_initialized;          if (pa < avail_start || pa >= avail_end)
         if (vm_page_startup_initialized)                  return -1;
                 panic("pmap_bootstrap_alloc: called after startup initialized");  
   
         size = round_page(size);          return (sun3_btop(pa - avail_start));
         mem = (void *)virtual_avail;  
         virtual_avail = pmap_map(virtual_avail, avail_start,  
             avail_start + size, VM_PROT_READ|VM_PROT_WRITE);  
         avail_start += size;  
         bzero((void *)mem, size);  
         return (mem);  
 }  }
   
   
 /*  /*
  *      Initialize the pmap module.   *      Initialize the pmap module.
  *      Called by vm_init, to initialize any structures that the pmap   *      Called by vm_init, to initialize any structures that the pmap
  *      system needs to map virtual memory.   *      system needs to map virtual memory.
  */   */
 void  void
 pmap_init(phys_start, phys_end)  pmap_init()
         vm_offset_t     phys_start, phys_end;  
 {  {
     extern int physmem;          extern int physmem;
   
     pv_init();          pv_init();
     physmem = btoc(phys_end);          physmem = btoc(avail_end);
 }  }
   
   /*
    * Record the mapping for kernel text/data/bss
    */
 vm_offset_t  vm_offset_t
 pmap_map(virt, start, end, prot)  pmap_map(virt, start, end, prot)
      vm_offset_t        virt;          vm_offset_t     virt;
      vm_offset_t        start;          vm_offset_t     start;
      vm_offset_t        end;          vm_offset_t     end;
      int                prot;          int             prot;
 {  {
     while (start < end) {          while (start < end) {
         pmap_enter(kernel_pmap, virt, start, prot, FALSE);                  pmap_enter(kernel_pmap, virt, start, prot, FALSE);
         virt += NBPG;                  virt += NBPG;
         start += NBPG;                  start += NBPG;
     }          }
     return(virt);          return(virt);
 }  }
   
 void pmap_user_pmap_init(pmap)  void
      pmap_t pmap;  pmap_user_pmap_init(pmap)
 {          pmap_t pmap;
     int i;  {
     pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);          int i;
     for (i=0; i < NUSEG; i++) {          pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);
         pmap->pm_segmap[i] = SEGINV;          for (i=0; i < NUSEG; i++) {
     }                  pmap->pm_segmap[i] = SEGINV;
           }
 }  }
   
 /*  /*
Line 1519  pmap_t
Line 1702  pmap_t
 pmap_create(size)  pmap_create(size)
         vm_size_t       size;          vm_size_t       size;
 {  {
     pmap_t pmap;          pmap_t pmap;
   
     if (size)          if (size)
         return NULL;                  return NULL;
   
     PMAP_DB_LOCK();          pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
     pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);          pmap_common_init(pmap);
     pmap_common_init(pmap);          pmap_user_pmap_init(pmap);
     pmap_user_pmap_init(pmap);          return pmap;
     PMAP_DB_UNLK();  
     return pmap;  
 }  }
   
 /*  /*
Line 1539  pmap_create(size)
Line 1720  pmap_create(size)
  */   */
 void  void
 pmap_release(pmap)  pmap_release(pmap)
      struct pmap *pmap;          struct pmap *pmap;
 {  {
   
     if (pmap == kernel_pmap)          if (pmap == kernel_pmap)
         panic("pmap_release: kernel_pmap!");                  panic("pmap_release: kernel_pmap!");
   
     PMAP_DB_LOCK();          if (has_context(pmap))
     if (pmap->pm_context)                  context_free(pmap);
         context_free(pmap);          free(pmap->pm_segmap, M_VMPMAP);
     free(pmap->pm_segmap, M_VMPMAP);          pmap->pm_segmap = NULL;
     pmap->pm_segmap = NULL;  
     PMAP_DB_UNLK();  
 }  }
   
   
Line 1561  pmap_release(pmap)
Line 1740  pmap_release(pmap)
  */   */
 void  void
 pmap_destroy(pmap)  pmap_destroy(pmap)
       pmap_t pmap;          pmap_t pmap;
 {  {
     int count;          int count;
   
     if (pmap == NULL)          if (pmap == NULL)
         return; /* XXX - Duh! */                  return; /* XXX - Duh! */
   
     PMAP_DB_LOCK();  
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
     if (pmap_debug & PMD_CREATE)          if (pmap_debug & PMD_CREATE)
         printf("pmap_destroy(%x)\n", pmap);                  printf("pmap_destroy(%x)\n", pmap);
 #endif  #endif
     if (pmap == kernel_pmap)          if (pmap == kernel_pmap)
         panic("pmap_destroy: kernel_pmap!");                  panic("pmap_destroy: kernel_pmap!");
     pmap_lock(pmap);          pmap_lock(pmap);
     count = pmap_del_ref(pmap);          count = pmap_del_ref(pmap);
     pmap_unlock(pmap);          pmap_unlock(pmap);
     if (count == 0) {          if (count == 0) {
         pmap_release(pmap);                  pmap_release(pmap);
         free((caddr_t)pmap, M_VMPMAP); /* XXXX -- better make sure we                  free((caddr_t)pmap, M_VMPMAP);
                                           it this way allocate */          }
     }  
     PMAP_DB_UNLK();  
 }  }
   
 /*  /*
  *      pmap_page_protect:   *        pmap_page_protect:
  *   *
  *      Lower the permission for all mappings to a given page.   *        Lower the permission for all mappings to a given page.
  */   */
 void  void
 pmap_page_protect(pa, prot)  pmap_page_protect(pa, prot)
         vm_offset_t     pa;          vm_offset_t      pa;
         vm_prot_t       prot;          vm_prot_t          prot;
 {  {
     int s;          int s;
   
           PMAP_LOCK();
   
     PMAP_LOCK();  
     PMAP_DB_LOCK();  
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
     if (pmap_debug & PMD_PROTECT)          if (pmap_debug & PMD_PROTECT)
         printf("pmap_page_protect(%x, %x)\n", pa, prot);                  printf("pmap_page_protect(%x, %x)\n", pa, prot);
 #endif  #endif
     switch (prot) {          switch (prot) {
     case VM_PROT_ALL:          case VM_PROT_ALL:
         break;                  break;
     case VM_PROT_READ:          case VM_PROT_READ:
     case VM_PROT_READ|VM_PROT_EXECUTE:          case VM_PROT_READ|VM_PROT_EXECUTE:
         pv_change_pte(pa_to_pvp(pa), 0, PG_WRITE);                  pv_changepte(pa_to_pvp(pa), 0, PG_WRITE);
         break;                  break;
     default:          default:
         /* remove mapping for all pmaps that have it:                  /* remove mapping for all pmaps that have it:
          *                   *
          * follow pv trail to pmaps and temporarily delete it that way.                   * follow pv trail to pmaps and temporarily delete it that way.
          * keep looping till all mappings go away                   * keep looping till all mappings go away
          */                   */
         pv_remove_all(pa);                  pv_remove_all(pa);
     }          }
     PMAP_DB_UNLK();  
     PMAP_UNLOCK();          PMAP_UNLOCK();
 }  }
   
 /*  /*
Line 1630  void
Line 1806  void
 pmap_reference(pmap)  pmap_reference(pmap)
         pmap_t  pmap;          pmap_t  pmap;
 {  {
     PMAP_DB_LOCK();          if (pmap != NULL) {
     if (pmap != NULL) {                  pmap_lock(pmap);
         pmap_lock(pmap);                  pmap_add_ref(pmap);
         pmap_add_ref(pmap);                  pmap_unlock(pmap);
         pmap_unlock(pmap);          }
     }  
     PMAP_DB_UNLK();  
 }  }
   
 void pmap_remove_range_mmu(pmap, sva, eva)  /*
      pmap_t pmap;   * Remove some mappings, all in one PMEG,
      vm_offset_t sva, eva;   * where that PMEG is currently in the MMU.
 {   * The current context is already correct.
     int saved_context,i;   * If no PTEs remain valid in the PMEG, free it.
     unsigned int sme;   */
     pmeg_t pmegp;  void
     vm_offset_t va,pte;  pmap_remove_range_mmu(pmap, sva, eva)
           pmap_t pmap;
     /* XXX - Never unmap DVMA space... */          vm_offset_t sva, eva;
     if (sva >= VM_MAX_KERNEL_ADDRESS)  {
         return;          int old_ctx, i;
           pmeg_t pmegp;
           vm_offset_t va;
           int pte, sme;
   
     saved_context = get_context();          /* Interrupt level handled by caller. */
     if (pmap != kernel_pmap)          CHECK_SPL();
         set_context(pmap->pm_context->context_num);  
   
     sme = get_segmap(sva);  
     if (sme != SEGINV) {  
         pmegp = pmeg_p(sme);  
 #ifdef  DIAGNOSTIC  #ifdef  DIAGNOSTIC
         /* Make sure it is in our software segmap (cache). */          if (pmap != kernel_pmap) {
         if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(sva)] != sme))                  if (pmap->pm_ctxnum != get_context())
             panic("pmap_remove_range_mmu: MMU has bad pmeg %x", sme);                          panic("pmap_remove_range_mmu: wrong context");
 #endif  
     } else {  
         if (pmap == kernel_pmap)  
             return;  
         pmegp = pmeg_cache(pmap, sun3_trunc_seg(sva));  
         if (!pmegp)  
             goto outta_here;  
 #ifdef  PMAP_DEBUG  
         if (pmap_debug & PMD_SEGMAP) {  
             if (pmap != kernel_pmap)  
                 printf("pmap: set_segmap ctx=%d v=%x old=ff new=%x (rm1)\n",  
                        get_context(), sun3_trunc_seg(sva),  
                        pmegp->pmeg_index);  
         }          }
 #endif  #endif
         set_segmap(sva, pmegp->pmeg_index);  
     }  
     /* have pmeg, will travel */  
   
           va = sun3_trunc_seg(sva);
           sme = get_segmap(va);
 #ifdef  DIAGNOSTIC  #ifdef  DIAGNOSTIC
     /* Make sure we own the pmeg, right va, etc. */          /* Make sure it is valid and known. */
     if (pmegp->pmeg_va != sun3_trunc_seg(sva))          if (sme == SEGINV)
         panic("pmap_remove_range_mmu: wrong va pmeg %x", pmegp);                  panic("pmap_remove_range_mmu: SEGINV");
     if (pmegp->pmeg_owner != pmap)          if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(sva)] != sme))
         panic("pmap_remove_range_mmu: not my pmeg %x", pmegp);                  panic("pmap_remove_range_mmu: incorrect sme, va=0x%x", va);
     if (pmegp->pmeg_owner_version != pmap->pm_version)  
         panic("pmap_remove_range_mmu: old vers pmeg %x", pmegp);  
     if (pmap == kernel_pmap) {  
         if (pmegp->pmeg_qstate != PMEGQ_KERNEL)  
             panic("pmap_remove_range_mmu: pmeg not q_kernel %x", pmegp);  
     } else {  
         if (pmegp->pmeg_qstate != PMEGQ_ACTIVE)  
             panic("pmap_remove_range_mmu: pmeg not q_active %x", pmegp);  
     }  
 #endif  #endif
           pmegp = pmeg_p(sme);
   
     if (pmegp->pmeg_vpages <= 0)          /* have pmeg, will travel */
         goto outta_here; /* no valid pages anyway */  
   #ifdef  DIAGNOSTIC
           /* Make sure we own the pmeg, right va, etc. */
           if ((pmegp->pmeg_va != va) ||
                   (pmegp->pmeg_owner != pmap) ||
                   (pmegp->pmeg_version != pmap->pm_version))
           {
                   printf("pmap_remove_range_mmu: wrong pmeg:\n");
                   pmeg_print(pmegp);
                   panic("bye");
           }
           if (pmegp->pmeg_vpages <= 0)
                   panic("pmap_remove_range_mmu: no valid pages?");
   #endif
   
   #ifdef  HAVECACHE
           cache_flush_segment(va);
   #endif
   
           /* Invalidate the PTEs in the given range. */
           for (va = sva; va < eva; va += NBPG) {
                   pte = get_pte(va);
                   if (pte & PG_VALID) {
                           if ((pte & PG_TYPE) == PGT_OBMEM) {
                                   save_modref_bits(pte);
                                   pv_unlink(pmap, PG_PA(pte), va);
                           }
                           set_pte(va, PG_INVAL);
                           pmegp->pmeg_vpages--;
                   }
           }
   
           if (pmegp->pmeg_vpages <= 0) {
                   /* We are done with this pmeg. */
                   if (is_pmeg_wired(pmegp)) {
                           printf("pmap: removing wired pmeg: 0x%x\n", pmegp);
                           Debugger(); /* XXX */
                   }
   
                   /* First, remove it from the MMU. */
                   if (kernel_pmap == pmap) {
                           old_ctx = get_context();
                           for (i=0; i < NCONTEXT; i++) { /* map out of all segments */
                                   set_context(i);
                                   set_segmap(sva, SEGINV);
                           }
                           set_context(old_ctx);
                   } else {
   #ifdef  PMAP_DEBUG
                           if (pmap_debug & PMD_SEGMAP) {
                                   printf("pmap: set_segmap ctx=%d v=%x old=%x new=ff (rm2)\n",
                                              get_context(), sun3_trunc_seg(sva),
                                              pmegp->pmeg_index);
                           }
   #endif
                           set_segmap(sva, SEGINV);
                           pmap->pm_segmap[VA_SEGNUM(sva)] = SEGINV;
                   }
                   /* Now, put it on the free list. */
                   pmeg_free(pmegp, VA_SEGNUM(sva));
           }
   }
   
   void
   pmap_remove_range_noctx(pmap, sva, eva)
           pmap_t pmap;
           vm_offset_t sva, eva;
   {
           int pte, sme, ptenum, segnum;
           vm_offset_t va;
           pmeg_t pmegp;
   
           /* Interrupt level handled by caller. */
           CHECK_SPL();
   
     for (va = sva; va < eva; va += NBPG) {  
         pte = get_pte(va);  
         if (pte & PG_VALID) {  
             vm_offset_t pa;  
             pa = PG_PA(pte);  
 #ifdef  PMAP_DEBUG  
             if (pte & PG_TYPE)  
                 panic("pmap_remove_range_mmu: not memory, va=%x", va);  
             if (pa < avail_start || pa >= avail_end)  
                 panic("pmap_remove_range_mmu: not managed, va=%x", va);  
 #endif  
             if (pv_initialized)  
                 save_modified_bits(pte);  
             pv_unlink(pmap, pa, va);  
             pmegp->pmeg_vpages--;  
             set_pte(va, PG_INVAL);  
         }  
     }  
     if (pmegp->pmeg_vpages <= 0) {  
         /* We are done with this pmeg. */  
         if (is_pmeg_wired(pmegp)) {  
             printf("pmap: removing wired pmeg: 0x%x\n", pmegp);  
             Debugger(); /* XXX */  
         }  
         /* First, remove it from the MMU. */  
         if (kernel_pmap == pmap) {  
             for (i=0; i < NCONTEXT; i++) { /* map out of all segments */  
                 set_context(i);  
                 set_segmap(sva,SEGINV);  
             }  
         } else {  
 #ifdef  PMAP_DEBUG  
             if (pmap_debug & PMD_SEGMAP) {  
                 printf("pmap: set_segmap ctx=%d v=%x old=%x new=ff (rm2)\n",  
                        get_context(), sun3_trunc_seg(sva),  
                        pmegp->pmeg_index);  
             }  
 #endif  
             set_segmap(sva, SEGINV);  
         }  
         /* Now, put it on the free list. */  
         pmeg_release_empty(pmegp, VA_SEGNUM(sva));  
     }  
 outta_here:  
     set_context(saved_context);  
 }  
   
 void pmap_remove_range_contextless(pmap, sva, eva, pmegp)  
      pmap_t pmap;  
      vm_offset_t sva, eva;  
      pmeg_t pmegp;  
 {  
     int sp,ep,i;  
     vm_offset_t pte,va;  
   
 #ifdef  PMAP_DEBUG  
     /* Kernel always in a context (actually, in all contexts). */  
     if (pmap == kernel_pmap)  
         panic("pmap_remove_range_contextless: kernel_pmap");  
     /* The pmeg passed here is always from the pmeg_cache */  
     if (pmegp->pmeg_qstate != PMEGQ_ACTIVE)  
         panic("pmap_remove_range_contextless: pmeg_qstate");  
 #endif  
   
     sp = VA_PTE_NUM(sva);  
     ep = VA_PTE_NUM(eva);  
     va = sva;  
     for (i = sp; i < ep; i++) {  
         pte = get_pte_pmeg(pmegp->pmeg_index, i);  
         if (pte & PG_VALID) {  
             if (pv_initialized)  
                 save_modified_bits(pte);  
             pv_unlink(pmap, PG_PA(pte), va);  
             pmegp->pmeg_vpages--;  
             set_pte_pmeg(pmegp->pmeg_index, i, PG_INVAL);  
         }  
         va+=NBPG;  
     }  
     if (pmegp->pmeg_vpages <= 0) {  
         if (is_pmeg_wired(pmegp))  
             panic("pmap: removing wired");  
         pmeg_release_empty(pmegp, VA_SEGNUM(sva));  
     }  
     else pmeg_release(pmegp);  
 }  
   
 /* guaraunteed to be within one pmeg */  
 void pmap_remove_range(pmap, sva, eva)  
      pmap_t pmap;  
      vm_offset_t sva, eva;  
 {  
     pmeg_t pmegp;  
   
 #ifdef  PMAP_DEBUG  
     if ((pmap_debug & PMD_REMOVE) ||  
         ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))  
         printf("pmap_remove_range(%x, %x, %x)\n", pmap, sva, eva);  
 #endif  
   
    /* cases: kernel: always has context, always available  
     *  
     *        user: has context, is available  
     *        user: has no context, is available  
     *        user: has no context, is not available (NOTHING) |_ together  
     *        user: has context, isn't available (NOTHING)     |  
     */  
     if (pmap != kernel_pmap) {  
         if (pmap->pm_segmap == NULL) {  
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
             if (pmap_debug & PMD_SEGMAP) {          /* Kernel always in a context (actually, in all contexts). */
                 printf("pmap_remove_range: null segmap\n");          if (pmap == kernel_pmap)
                 Debugger(); /* XXX */                  panic("pmap_remove_range_noctx: kernel_pmap");
             }          if (pmap->pm_segmap == NULL)
                   panic("pmap_remove_range_noctx: null segmap");
 #endif  #endif
             return;  
           segnum = VA_SEGNUM(sva);
           sme = pmap->pm_segmap[segnum];
           pmegp = pmeg_p(sme);
   
           for (va = sva; va < eva; va += NBPG) {
                   ptenum = VA_PTE_NUM(va);
                   pte = get_pte_pmeg(pmegp->pmeg_index, ptenum);
                   if (pte & PG_VALID) {
                           if ((pte & PG_TYPE) == PGT_OBMEM) {
                                   save_modref_bits(pte);
                                   pv_unlink(pmap, PG_PA(pte), va);
                           }
                           set_pte_pmeg(pmegp->pmeg_index, ptenum, PG_INVAL);
                           pmegp->pmeg_vpages--;
                   }
           }
           if (pmegp->pmeg_vpages <= 0) {
                   if (is_pmeg_wired(pmegp))
                           panic("pmap: removing wired");
   
                   pmap->pm_segmap[segnum] = SEGINV;
                   pmeg_free(pmegp, segnum);
         }          }
         if (get_pmeg_cache(pmap, VA_SEGNUM(sva)) == SEGINV) {  }
   
   /*
    * guaraunteed to be within one segment
    */
   void
   pmap_remove_range(pmap, sva, eva)
           pmap_t pmap;
           vm_offset_t sva, eva;
   {
           int c, s, sme;
           int old_ctx;
           boolean_t in_ctx;
   
           PMAP_LOCK();
   
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
             /* XXX - Make sure it's not in the MMU? */          if ((pmap_debug & PMD_REMOVE) ||
             if (pmap->pm_context) {                  ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
                 int c, sme;                  printf("pmap_remove_range(%x, %x, %x)\n", pmap, sva, eva);
                 c = get_context();  #endif
                 set_context(pmap->pm_context->context_num);  #ifdef  DIAGNOSTIC
           if (sun3_trunc_seg(sva) != sun3_trunc_seg(eva-NBPG))
                   panic("pmap_remove_range: bad range!");
   #endif
   
           /* cases: kernel: always has context, always available
            *
            *              user: has context, is available
            *              user: has no context, is available
            *              user: has no context, is not available (NOTHING) |_ together
            *              user: has context, isn't available (NOTHING)     |
            */
   
           if (pmap == kernel_pmap) {
                 sme = get_segmap(sva);                  sme = get_segmap(sva);
                 set_context(c);  
                 if (sme != SEGINV)                  if (sme != SEGINV)
                     panic("pmap_remove_range: not in cache");                          pmap_remove_range_mmu(pmap, sva, eva);
             }          } else {
 #endif                  /* It is a user pmap. */
             return;                  if (pmap->pm_segmap[VA_SEGNUM(sva)] != SEGINV) {
                           /* There is a PMEG, but maybe not active. */
                           old_ctx = CTXINVAL;
                           in_ctx = FALSE;
                           if (has_context(pmap)) {
                                   old_ctx = get_context();
                                   set_context(pmap->pm_ctxnum);
                                   sme = get_segmap(sva);
                                   if (sme != SEGINV)
                                           in_ctx = TRUE;
                           }
                           if (in_ctx == TRUE) {
                                   /*
                                    * The PMEG is in the current context.
                                    */
                                   pmap_remove_range_mmu(pmap, sva, eva);
                           } else {
                                   /*
                                    * There is a PMEG to deal with,
                                    * but it is not active.
                                    */
                                   pmap_remove_range_noctx(pmap, sva, eva);
                           }
                           if (old_ctx != CTXINVAL) {
                                   set_context(old_ctx);
                           }
                   }
         }          }
     }  
   
     if ((pmap == kernel_pmap) || (pmap->pm_context)) {          PMAP_UNLOCK();
         pmap_remove_range_mmu(pmap, sva, eva);  
         return;          return;
     }  
     /* we are a user pmap without a context, possibly without a pmeg to  
      * operate upon  
      *  
      */  
     pmegp = pmeg_cache(pmap, sun3_trunc_seg(sva));  
     if (!pmegp) return;  
     pmap_remove_range_contextless(pmap, sva, eva, pmegp);  
 }  }
   
 /*  /*
Line 1854  void pmap_remove_range(pmap, sva, eva)
Line 2036  void pmap_remove_range(pmap, sva, eva)
  *      It is assumed that the start and end are properly   *      It is assumed that the start and end are properly
  *      rounded to the page size.   *      rounded to the page size.
  */   */
 void pmap_remove(pmap, sva, eva)  void
      pmap_t pmap;  pmap_remove(pmap, sva, eva)
      vm_offset_t sva, eva;          pmap_t pmap;
 {          vm_offset_t sva, eva;
     vm_offset_t va,neva;  {
     pmeg_t pmegp;          register vm_offset_t va, neva;
     int s;  
     /* some form of locking?, when where. */  
   
     if (pmap == NULL)          if (pmap == NULL)
         return;                  return;
   
           if (pmap == kernel_pmap) {
                   if (sva < VM_MIN_KERNEL_ADDRESS)
                           sva = VM_MIN_KERNEL_ADDRESS;
                   if (eva > DVMA_SPACE_END) {
   #ifdef  PMAP_DEBUG
                           printf("pmap_remove: eva=0x%x\n", eva);
                           Debugger();
   #endif
                           eva = DVMA_SPACE_END;
                   }
           } else {
                   if (eva > VM_MAXUSER_ADDRESS)
                           eva = VM_MAXUSER_ADDRESS;
           }
   
           va = sva;
           while (va < eva) {
                   neva = sun3_trunc_seg(va) + NBSG;
                   if (neva > eva)
                           neva = eva;
                   pmap_remove_range(pmap, va, neva);
                   va = neva;
           }
   }
   
   static void
   pmap_enter_kernel(va, pa, prot, wired, new_pte)
           vm_offset_t va;
           vm_offset_t pa;
           vm_prot_t prot;
           boolean_t wired;
           int new_pte;
   {
           int s, i, c;
           int sme, old_pte;
           int nflags, do_pv;
           vm_offset_t seg_va;
           pmeg_t pmegp;
   
           /*
             keep in hardware only, since its mapped into all contexts anyway;
             need to handle possibly allocating additional pmegs
             need to make sure they cant be stolen from the kernel;
             map any new pmegs into all contexts, make sure rest of pmeg is null;
             deal with pv_stuff; possibly caching problems;
             must also deal with changes too.
             */
   
           /*
            * In detail:
            *
            * (a) lock pmap
            * (b) Is the VA in a already mapped segment, if so
            *       look to see if that VA address is "valid".  If it is, then
            *       action is a change to an existing pte
            * (c) if not mapped segment, need to allocate pmeg
            * (d) if adding pte entry or changing physaddr of existing one,
            *              use pv_stuff, for change, pmap_remove() possibly.
            * (e) change/add pte
            */
   
   #ifdef PMAP_DEBUG
           if (va < virtual_avail) {
                   printf("pmap_enter_kernel: va=0x%x < virtual_avail\n", va);
                   Debugger();
           }
   #endif
   #ifdef  DIAGNOSTIC
           if ((va < VM_MIN_KERNEL_ADDRESS) || (va >= DVMA_SPACE_END))
                   panic("pmap_enter_kernel: bad va=0x%x", va);
           if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
                   panic("pmap_enter_kernel: bad pte");
   #endif
   
     /* do something about contexts */          if (va >= DVMA_SPACE_START) {
     if (pmap == kernel_pmap) {                  /* This is DVMA space.  Always want it non-cached. */
         if (sva < VM_MIN_KERNEL_ADDRESS)                  new_pte |= PG_NC;
             sva = VM_MIN_KERNEL_ADDRESS;  
         if (eva > VM_MAX_KERNEL_ADDRESS)  
             eva = VM_MAX_KERNEL_ADDRESS;  
     } else {  
         if (eva > VM_MAXUSER_ADDRESS)  
             eva = VM_MAXUSER_ADDRESS;  
     }  
     PMAP_LOCK();  
     PMAP_DB_LOCK();  
     va = sva;  
     pmegp = NULL;  
     while (va < eva) {  
         neva = sun3_round_up_seg(va);  
         if (neva > eva)  
             neva = eva;  
         pmap_remove_range(pmap, va, neva);  
         va = neva;  
     }  
     PMAP_DB_UNLK();  
     PMAP_UNLOCK();  
 }  
   
 void pmap_enter_kernel(va, pa, prot, wired, pte_proto, mem_type)  
      vm_offset_t va;  
      vm_offset_t pa;  
      vm_prot_t prot;  
      boolean_t wired;  
      vm_offset_t pte_proto;  
      vm_offset_t mem_type;  
 {  
     int s,i;  
     vm_offset_t current_pte;  
     unsigned char sme,nflags;  
     int c;  
     pmeg_t pmegp;  
     /*  
       keep in hardware only, since its mapped into all contexts anyway;  
       need to handle possibly allocating additional pmegs  
       need to make sure they cant be stolen from the kernel;  
       map any new pmegs into all contexts, make sure rest of pmeg is null;  
       deal with pv_stuff; possibly caching problems;  
       must also deal with changes too.  
       */  
     pte_proto |= PG_SYSTEM | MAKE_PGTYPE(mem_type);  
     /*  
      * In detail:  
      *  
      * (a) lock pmap  
      * (b) Is the VA in a already mapped segment, if so  
      *     look to see if that VA address is "valid".  If it is, then  
      *     action is a change to an existing pte  
      * (c) if not mapped segment, need to allocate pmeg  
      * (d) if adding pte entry or changing physaddr of existing one,  
      *        use pv_stuff, for change, pmap_remove() possibly.  
      * (e) change/add pte  
      */  
     if (va < VM_MIN_KERNEL_ADDRESS)  
         panic("pmap: kernel trying to allocate virtual space below minkva\n");  
     PMAP_LOCK();  
     sme = get_segmap(va);  
     /* XXXX -- lots of non-defined routines, need to see if pmap has a  
      * context  
      */  
     if (sme == SEGINV) {  
         pmegp = pmeg_allocate_invalid(kernel_pmap, sun3_trunc_seg(va));  
         c = get_context();  
         for (i=0; i < NCONTEXT; i++) { /* map into all segments */  
             set_context(i);  
             set_segmap(va,pmegp->pmeg_index);  
         }          }
         set_context(c);  
           seg_va = sun3_trunc_seg(va);
           do_pv = TRUE;
   
           PMAP_LOCK();
   
           sme = get_segmap(va);
           if (sme == SEGINV) {
                   pmegp = pmeg_allocate(kernel_pmap, sun3_trunc_seg(va));
                   sme = pmegp->pmeg_index;
                   c = get_context();
                   for (i=0; i < NCONTEXT; i++) { /* map into all contexts */
                           set_context(i);
                           set_segmap(va, sme);
                   }
                   set_context(c);
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
         pmeg_verify_empty(sun3_trunc_seg(va));                  if (pmap_debug & PMD_SEGMAP) {
                           printf("pmap: set_segmap pmap=%x va=%x sme=%x (ek1)\n",
                                      kernel_pmap, seg_va, sme);
                   }
                   pmeg_verify_empty(sun3_trunc_seg(va));
 #endif  #endif
         goto add_pte;                  /* There are no existing mappings to deal with. */
     }                  goto add_pte;
     else {          }
   
         /* Found an existing pmeg.  Modify it... */          /* Found an existing pmeg.  Modify it... */
         pmegp = pmeg_p(sme);          pmegp = pmeg_p(sme);
 #ifdef  DIAGNOSTIC  #ifdef  DIAGNOSTIC
         /* Make sure it is ours. */          /* Make sure it is ours. */
         if (pmegp->pmeg_owner && (pmegp->pmeg_owner != kernel_pmap))          if (pmegp->pmeg_owner && (pmegp->pmeg_owner != kernel_pmap))
             panic("pmap_enter_kernel: MMU has bad pmeg %x", sme);                  panic("pmap_enter_kernel: MMU has bad pmeg %x", sme);
 #endif  #endif
         /* Don't try to unlink if in DVMA map. */  
         if (va >= VM_MAX_KERNEL_ADDRESS) {          /*
                 /* also make sure it is non-cached :) */           * Found existing PMEG.  Does mapping already exist?
                 pte_proto |= PG_NC;           *      (a) if so, is it same pa then really a protection change
            *      (b) if not same, pa then we have to unlink from old pa
            *      (c)
            */
           old_pte = get_pte(va);
           if ((old_pte & PG_VALID) == 0)
                   goto add_pte;
   
           /* XXX - removing valid page here, way lame... */
           pmegp->pmeg_vpages--;
   
           if ((old_pte & PG_TYPE) != PGT_OBMEM) {
                   /* Was not main memory, so no pv_entry for it. */
                 goto add_pte;                  goto add_pte;
         }          }
         /* Make sure we own it. */  
         if (pmegp->pmeg_owner != kernel_pmap)  
             panic("pmap_enter_kernel: found unknown pmeg");  
     }  
   
     /* does mapping already exist */  
     /*    (a) if so, is it same pa then really a protection change  
      *    (b) if not same, pa then we have to unlink from old pa  
      *    (c)  
      */  
     current_pte = get_pte(va);  
     if ((current_pte & PG_VALID) == 0)  
         goto add_pte; /* just adding */  
     pmegp->pmeg_vpages--;       /* removing valid page here, way lame XXX*/  
     if (pv_initialized)  
         save_modified_bits(current_pte);  
     if (PG_PGNUM(current_pte) != PG_PGNUM(pte_proto)) /* !same physical addr*/  
         pv_unlink(kernel_pmap, PG_PA(current_pte), va);  
   
 add_pte:        /* can be destructive */  
     if (wired) {  
         /* Will pmeg wire count go from zero to one? */  
         if (!is_pmeg_wired(pmegp))  
             kernel_pmap->pm_stats.wired_count++;  
         pmeg_wire(pmegp);       /* XXX */  
     } else {  
         pmeg_unwire(pmegp);     /* XXX */  
         /* Did pmeg wire count go from one to zero? */  
         if (!is_pmeg_wired(pmegp))  
             kernel_pmap->pm_stats.wired_count--;  
     }  
     if (mem_type & PG_TYPE)  
         set_pte(va, pte_proto | PG_NC);  
     else {  
         nflags = pv_link(kernel_pmap, pa, va, PG_TO_PV_FLAGS(pte_proto));  
         if (nflags & PV_NC)  
             set_pte(va, pte_proto | PG_NC);  
         else  
             set_pte(va, pte_proto);  
     }  
     pmegp->pmeg_vpages++;       /* assumes pmap_enter can never insert  
                                  a non-valid page*/  
     PMAP_UNLOCK();  
 }  
   
           /* Old mapping was main memory.  Save mod/ref bits. */
           save_modref_bits(old_pte);
   
 void pmap_enter_user(pmap, va, pa, prot, wired, pte_proto, mem_type)          /*
      pmap_t pmap;           * If not changing the type or pfnum then re-use pv_entry.
      vm_offset_t va;           * Note we get here only with old_pte having PGT_OBMEM.
      vm_offset_t pa;           */
      vm_prot_t prot;          if ((old_pte & (PG_TYPE|PG_FRAME)) ==
      boolean_t wired;                  (new_pte & (PG_TYPE|PG_FRAME)) )
      vm_offset_t pte_proto;          {
      vm_offset_t mem_type;                  do_pv = FALSE;          /* re-use pv_entry */
 {                  new_pte |= (old_pte & PG_NC);
     int saved_context,s;                  goto add_pte;
     unsigned char sme,nflags;  
     pmeg_t pmegp;  
     vm_offset_t current_pte;  
     /* sanity checking */  
     if (mem_type)  
         panic("pmap: attempt to map non main memory page into user space");  
     if ((va+NBPG) > VM_MAXUSER_ADDRESS)  
         panic("pmap: user trying to allocate virtual space above itself\n");  
   
 #ifdef  PMAP_DEBUG  
     if ((pmap_debug & PMD_ENTER) && wired) {  
         printf("pmap_enter_user: attempt to wire user page, ignored\n");  
         printf("pmap=0x%x va=0x%x pa=0x%x\n", pmap, va, pa);  
     }  
 #endif  
   
     pte_proto |= MAKE_PGTYPE(PG_MMEM); /* unnecessary */  
     PMAP_LOCK();  
     saved_context = get_context();  
     if (!pmap->pm_context) {  
         /* XXX - Why is this happening? -gwr */  
 #ifdef  PMAP_DEBUG  
         printf("pmap: pmap_enter_user() on pmap without a context\n");  
         /* XXX - Why is this happening occasionally? -gwr */  
 #endif  
         context_allocate(pmap);  
     }  
     if (saved_context != pmap->pm_context->context_num)  
         set_context(pmap->pm_context->context_num);  
   
     sme = get_segmap(va);  
     if (sme != SEGINV) {  
         /* Found an existing pmeg. */  
         pmegp = pmeg_p(sme);  
 #ifdef  DIAGNOSTIC  
         /* Make sure it is in our software segmap (cache). */  
         /* XXX - We are hitting this one! -gwr */  
         if (pmap->pm_segmap[VA_SEGNUM(va)] != sme)  
             panic("pmap_enter_user: MMU has bad pmeg %x", sme);  
 #endif  
     } else {  
         pmegp = pmeg_cache(pmap, sun3_trunc_seg(va));  
         if (pmegp) {  
             /* found cached pmeg - just reinstall in segmap */  
 #ifdef  PMAP_DEBUG  
             if (pmap_debug & PMD_SEGMAP) {  
                 printf("pmap: set_segmap ctx=%d v=%x old=ff new=%x (eu1)\n",  
                        get_context(), sun3_trunc_seg(va),  
                        pmegp->pmeg_index);  
             }  
 #endif  
             set_segmap(va, pmegp->pmeg_index);  
         } else {  
             /* no cached pmeg - get a new one */  
             pmegp = pmeg_allocate_invalid(pmap, sun3_trunc_seg(va));  
 #ifdef  PMAP_DEBUG  
             if (pmap_debug & PMD_SEGMAP) {  
                 printf("pmap: set_segmap ctx=%d v=%x old=ff new=%x (eu2)\n",  
                        get_context(), sun3_trunc_seg(va),  
                        pmegp->pmeg_index);  
             }  
 #endif  
             set_segmap(va, pmegp->pmeg_index);  
 #ifdef PMAP_DEBUG  
             pmeg_verify_empty(sun3_trunc_seg(va));  
             if (pmap->pm_segmap[VA_SEGNUM(va)] != pmegp->pmeg_index)  
                 panic("pmap_enter_user: pmeg_alloc_inv broken?");  
 #endif  
         }          }
     }  
   
 #ifdef  DIAGNOSTIC          /* OK, different type or PA, have to kill old pv_entry. */
     /* Make sure we own the pmeg, right va, etc. */          pv_unlink(kernel_pmap, PG_PA(old_pte), va);
     if (pmegp->pmeg_va != sun3_trunc_seg(va))  
         panic("pmap_enter_user: wrong va pmeg %x", pmegp);   add_pte:       /* can be destructive */
     if (pmegp->pmeg_owner != pmap)          pmeg_set_wiring(pmegp, va, wired);
         panic("pmap_enter_user: not my pmeg %x", pmegp);  
     if (pmegp->pmeg_owner_version != pmap->pm_version)          /* Anything but RAM is mapped non-cached. */
         panic("pmap_enter_user: old vers pmeg %x", pmegp);          if ((new_pte & PG_TYPE) != PGT_OBMEM) {
     if (pmegp->pmeg_qstate != PMEGQ_ACTIVE)                  new_pte |= PG_NC;
         panic("pmap_enter_user: pmeg not active %x", pmegp);                  do_pv = FALSE;
 #endif          }
           if (do_pv) {
     /* does mapping already exist */                  nflags = pv_link(kernel_pmap, pa, va, PG_TO_PV_FLAGS(new_pte));
     /*    (a) if so, is it same pa then really a protection change                  if (nflags & PV_NC)
      *    (b) if not same, pa then we have to unlink from old pa                          new_pte |= PG_NC;
      *    (c)          }
      */          set_pte(va, new_pte);
     current_pte = get_pte(va);          pmegp->pmeg_vpages++;
     if ((current_pte & PG_VALID) == 0)  
         goto add_pte;          PMAP_UNLOCK();
     if (pv_initialized)  }
         save_modified_bits(current_pte);  
     pmegp->pmeg_vpages--;       /* removing valid page here, way lame XXX*/  
     if (PG_PGNUM(current_pte) != PG_PGNUM(pte_proto))  void
         pv_unlink(pmap, PG_PA(current_pte), va);  pmap_enter_user(pmap, va, pa, prot, wired, new_pte)
           pmap_t pmap;
 add_pte:          vm_offset_t va;
     /* if we did wiring on user pmaps, then the code would be here */          vm_offset_t pa;
     /* XXX - pv_link calls malloc which calls pmap_enter_kernel... */          vm_prot_t prot;
     nflags = pv_link(pmap, pa, va, PG_TO_PV_FLAGS(pte_proto));          boolean_t wired;
     if (nflags & PV_NC)          int new_pte;
         set_pte(va, pte_proto | PG_NC);  {
     else          int s, old_ctx;
         set_pte(va, pte_proto);          int sme, segnum, old_pte;
     pmegp->pmeg_vpages++;       /* assumes pmap_enter can never insert          int nflags, do_pv;
                                    a non-valid page */          vm_offset_t seg_va;
     set_context(saved_context);          pmeg_t pmegp;
     PMAP_UNLOCK();  
   #ifdef  PMAP_DEBUG
           /*
            * Some user pages are wired here, and a later
            * call to pmap_change_wiring() will unwire them.
            * XXX - Need a separate list for wired user pmegs
            * so they can not be stolen from the active list.
            */
           if (wired && (pmap_debug & PMD_WIRING)) {
                   printf("pmap_enter_user: attempt to wire user page, ignored\n");
                   printf("pmap=0x%x va=0x%x pa=0x%x\n", pmap, va, pa);
           }
   #endif
   #ifdef  DIAGNOSTIC
           if (va >= VM_MAXUSER_ADDRESS)
                   panic("pmap_enter_user: bad va=0x%x", va);
           if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
                   panic("pmap_enter_user: bad pte");
   #endif
   
           segnum = VA_SEGNUM(va);
           seg_va = sun3_trunc_seg(va);
           do_pv = TRUE;
   
           PMAP_LOCK();
   
           /*
            * Make sure the current context is correct.
            * Even though we call pmap_activate when we switch
            * to a new process, the VM system occasionally will
            * activate enough other pmaps that we can loose our
            * own context and have to reallocate one here.
            */
           old_ctx = get_context();
           if (old_ctx != pmap->pm_ctxnum) {
   #ifdef  PMAP_DEBUG
                   if (pmap_debug & PMD_SWITCH) {
                           printf("pmap_enter_user: wrong context\n");
                           Debugger();
                   }
   #endif
                   if (!has_context(pmap)) {
   #ifdef  PMAP_DEBUG
                           if (pmap_debug & PMD_SWITCH) {
                                   printf("pmap_enter_user: pmap without context\n");
                                   Debugger();
                           }
   #endif
                           context_allocate(pmap);
                   }
                   set_context(pmap->pm_ctxnum);
           }
   
           /*
            * Have correct context, now get a PMEG.
            */
           if (pmap->pm_segmap[segnum] == SEGINV) {
                   /* Not in cache; need to allocate a new PMEG. */
   #ifdef  PMAP_DEBUG
                   /* should not be in hardware */
                   sme = get_segmap(va);
                   if (sme != SEGINV)
                           panic("pmap_enter_user: unknown sme at VA=0x%x", va);
   #endif
                   /* This will get us an "active" PMEG */
                   pmegp = pmeg_allocate(pmap, seg_va);
                   sme = pmegp->pmeg_index;
                   pmap->pm_segmap[segnum] = sme;
                   set_segmap(va, sme);
   #ifdef  PMAP_DEBUG
                   if (pmap_debug & PMD_SEGMAP) {
                           printf("pmap: set_segmap pmap=%x va=%x sme=%x (eu1)\n",
                                      pmap, seg_va, sme);
                   }
                   pmeg_verify_empty(seg_va);
   #endif
                   /* There are no existing mappings to deal with. */
                   goto add_pte;
           }
   
           /* There is a PMEG but it might be inactive. */
           sme = get_segmap(va);
           if (sme != SEGINV) {
                   /* It is already active. */
                   pmegp = pmeg_p(sme);
   #ifdef  PMAP_DEBUG
                   /* Make sure it is the right PMEG. */
                   if (sme != pmap->pm_segmap[segnum])
                           panic("pmap_enter_user: wrong sme at VA=0x%x", seg_va);
   #endif
           } else {
                   /* Make the PMEG active. */
                   pmegp = pmeg_cache(pmap, seg_va);
                   if (pmegp == NULL)
                           panic("pmap_enter_user: pmeg_cache lost");
                   /* found cached pmeg - just reinstall in segmap */
                   sme = pmegp->pmeg_index;
                   set_segmap(va, sme);
   #ifdef  PMAP_DEBUG
                   if (pmap_debug & PMD_SEGMAP) {
                           printf("pmap: set_segmap pmap=%x va=%x sme=%x (eu2)\n",
                                      pmap, seg_va, sme);
                   }
   #endif
           }
   
           /*
            * Found existing PMEG.  Does mapping already exist?
            *      (a) if so, is it same pa then really a protection change
            *      (b) if not same, pa then we have to unlink from old pa
            *      (c)
            */
           old_pte = get_pte(va);
           if ((old_pte & PG_VALID) == 0)
                   goto add_pte;
   
           /* XXX - removing valid page here, way lame... */
           pmegp->pmeg_vpages--;
   
           if ((old_pte & PG_TYPE) != PGT_OBMEM) {
                   /* Was not main memory, so no pv_entry for it. */
                   goto add_pte;
           }
   
           /* Old mapping was main memory.  Save mod/ref bits. */
           save_modref_bits(old_pte);
   
           /*
            * If not changing the type or pfnum then re-use pv_entry.
            * Note we get here only with old_pte having PGT_OBMEM.
            */
           if ((old_pte & (PG_TYPE|PG_FRAME)) ==
                   (new_pte & (PG_TYPE|PG_FRAME)) )
           {
                   do_pv = FALSE;          /* re-use pv_entry */
                   new_pte |= (old_pte & PG_NC);
                   goto add_pte;
           }
   
           /* OK, different type or PA, have to kill old pv_entry. */
           pv_unlink(pmap, PG_PA(old_pte), va);
   
    add_pte:
           /* XXX - Wiring changes on user pmaps? */
           /* pmeg_set_wiring(pmegp, va, wired); */
   
           /* Anything but RAM is mapped non-cached. */
           if ((new_pte & PG_TYPE) != PGT_OBMEM) {
                   new_pte |= PG_NC;
                   do_pv = FALSE;
           }
           if (do_pv) {
                   nflags = pv_link(pmap, pa, va, PG_TO_PV_FLAGS(new_pte));
                   if (nflags & PV_NC)
                           new_pte |= PG_NC;
           }
           set_pte(va, new_pte);
           pmegp->pmeg_vpages++;
   
           set_context(old_ctx);
           PMAP_UNLOCK();
 }  }
 /*  
   /*
  *      Insert the given physical page (p) at   *      Insert the given physical page (p) at
  *      the specified virtual address (v) in the   *      the specified virtual address (v) in the
  *      target physical map with the protection requested.   *      target physical map with the protection requested.
  *   *
    *      The physical address is page aligned, but may have some
    *      low bits set indicating an OBIO or VME bus page, or just
    *      that the non-cache bit should be set (i.e PMAP_NC).
    *
  *      If specified, the page will be wired down, meaning   *      If specified, the page will be wired down, meaning
  *      that the related pte can not be reclaimed.   *      that the related pte can not be reclaimed.
  *   *
Line 2140  add_pte:
Line 2411  add_pte:
   
 void  void
 pmap_enter(pmap, va, pa, prot, wired)  pmap_enter(pmap, va, pa, prot, wired)
      pmap_t pmap;          pmap_t pmap;
      vm_offset_t va;          vm_offset_t va;
      vm_offset_t pa;          vm_offset_t pa;
      vm_prot_t prot;          vm_prot_t prot;
      boolean_t wired;          boolean_t wired;
 {  
     vm_offset_t pte_proto, mem_type;  
     int s;  
   
     if (pmap == NULL) return;  
 #ifdef  PMAP_DEBUG  
     if ((pmap_debug & PMD_ENTER) ||  
         (va == pmap_db_watchva))  
         printf("pmap_enter(%x, %x, %x, %x, %x)\n",  
                pmap, va, pa, prot, wired);  
 #endif  
     mem_type = pa & PMAP_MEMMASK;  
     pte_proto = PG_VALID | pmap_pte_prot(prot) | (pa & PMAP_NC ? PG_NC : 0);  
     pa &= ~PMAP_SPECMASK;  
     pte_proto |= PA_PGNUM(pa);  
   
     /* treatment varies significantly:  
      *  kernel ptes are in all contexts, and are always in the mmu  
      *  user ptes may not necessarily? be in the mmu.  pmap may not  
      *   be in the mmu either.  
      *  
      */  
     PMAP_LOCK();  
     if (pmap == kernel_pmap) {  
         /* This can be called recursively through malloc. */  
         pmap_enter_kernel(va, pa, prot, wired, pte_proto, mem_type);  
     } else {  
         PMAP_DB_LOCK();  
         pmap_enter_user(pmap, va, pa, prot, wired, pte_proto, mem_type);  
     PMAP_DB_UNLK();  
     }  
     PMAP_UNLOCK();  
 }  
   
 void  
 pmap_clear_modify(pa)  
      vm_offset_t        pa;  
 {  {
     int s;          int pte_proto;
           int s;
   
     if (!pv_initialized) return;          if (pmap == NULL)
                   return;
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     if (pmap_debug & PMD_MODBIT)          if ((pmap_debug & PMD_ENTER) ||
         printf("pmap_clear_modified: %x\n", pa);                  (va == pmap_db_watchva))
                   printf("pmap_enter(%x, %x, %x, %x, %x)\n",
                              pmap, va, pa, prot, wired);
 #endif  #endif
     PMAP_LOCK();  
     PMAP_DB_LOCK();          /* Get page-type bits from low part of the PA... */
     pv_modified_table[PA_PGNUM(pa)] = 0;          pte_proto = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
     pv_change_pte(pa_to_pvp(pa), 0, PG_MOD);  
     PMAP_DB_UNLK();          /* ...now the valid and writable bits... */
     PMAP_UNLOCK();          pte_proto |= (PG_VALID | pmap_pte_prot(prot));
   
           /* ...and finally the page-frame number. */
           pte_proto |= PA_PGNUM(pa);
   
           /*
            * treatment varies significantly:
            *  kernel ptes are in all contexts, and are always in the mmu
            *  user ptes may not necessarily? be in the mmu.  pmap may not
            *   be in the mmu either.
            *
            */
           PMAP_LOCK();
           if (pmap == kernel_pmap) {
                   /* This can be called recursively through malloc. */
                   pte_proto |= PG_SYSTEM;
                   pmap_enter_kernel(va, pa, prot, wired, pte_proto);
           } else {
                   pmap_enter_user(pmap, va, pa, prot, wired, pte_proto);
           }
           PMAP_UNLOCK();
 }  }
 boolean_t  
 pmap_is_modified(pa)  /*
      vm_offset_t        pa;   * This is a shortcut used by the trap handler to
    * reload PMEGs into a user segmap without calling
    * the actual VM fault handler.  Returns TRUE if:
    *      the PMEG was reloaded, and
    *      it has a valid PTE at va.
    * Otherwise return zero and let VM code handle it.
    */
   int pmap_fault_reload(pmap, va, ftype)
           pmap_t pmap;
           vm_offset_t va;
           vm_prot_t ftype;
 {  {
     int s;          int rv, s, pte, chkpte, sme, segnum, ctx;
           pmeg_t pmegp;
   
   #ifdef  PMAP_DEBUG
           if (pmap == kernel_pmap)
                   panic("pmap_fault_reload: kernel_pmap");
   #endif
           if (pmap->pm_segmap == NULL) {
   #ifdef  PMAP_DEBUG
                   printf("pmap_fault_reload: null segmap\n");
                   Debugger();
   #endif
                   return (0);
           }
           if (va >= VM_MAXUSER_ADDRESS)
                   return (0);
   
           /* Make sure context is correct. */
           ctx = get_context();
           if (ctx != pmap->pm_ctxnum) {
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     if (pmap_debug & PMD_MODBIT)                  printf("pmap_fault_reload: wrong context\n");
         printf("pmap_is_modified: %x\n", pa);                  Debugger();
 #endif  #endif
     if (!pv_initialized) return 0;                  /* XXX - What to do... */
     if (pv_modified_table[PA_PGNUM(pa)]) return 1;                  return (0);
     PMAP_LOCK();          }
     PMAP_DB_LOCK();  
     pv_modified_table[PA_PGNUM(pa)] = pv_compute_modified(pa_to_pvp(pa));          chkpte = PG_VALID;
     PMAP_DB_UNLK();          if (ftype & VM_PROT_WRITE)
     PMAP_UNLOCK();                  chkpte |= PG_WRITE;
     return pv_modified_table[PA_PGNUM(pa)];  
           PMAP_LOCK();
   
           rv = 0;
           sme = get_segmap(va);
           if (sme == SEGINV) {
                   /* See if there is something to reload. */
                   pmegp = pmeg_cache(pmap, va);
                   if (pmegp) {
                           /* Found one!  OK, reload it. */
                           /* pmap_stats.ps_pmeg_faultin++ */
                           sme = pmegp->pmeg_index;
                           set_segmap(va, sme);
                           pte = get_pte(va);
                           if (pte & chkpte)
                                   rv = 1;
                   }
           }
   
           PMAP_UNLOCK();
           return (0);
 }  }
   
 void pmap_clear_reference(pa)  
      vm_offset_t        pa;  /*
    * Clear the modify bit for the given physical page.
    */
   void
   pmap_clear_modify(pa)
           register vm_offset_t pa;
 {  {
     int s;          register pv_entry_t     pvhead;
   
     if (!pv_initialized) return;          if (!pv_initialized)
 #ifdef PMAP_DEBUG                  return;
     if (pmap_debug & PMD_REFBIT)          if (!managed(pa))
         printf("pmap_clear_referenced: %x\n", pa);                  return;
 #endif  
     PMAP_LOCK();          pvhead = pa_to_pvp(pa);
     PMAP_DB_LOCK();          pv_syncflags(pvhead);
     pv_remove_all(pa);          pvhead->pv_flags &= ~PV_MOD;
     PMAP_DB_UNLK();  
     PMAP_UNLOCK();  
 }  }
   
 boolean_t  /*
 pmap_is_referenced(pa)   * Tell whether the given physical page has been modified.
      vm_offset_t        pa;   */
   int
   pmap_is_modified(pa)
           register vm_offset_t pa;
 {  {
 #ifdef PMAP_DEBUG          register pv_entry_t     pvhead;
     if (pmap_debug & PMD_REFBIT)  
         printf("pmap_is_referenced: %x\n", pa);          if (!pv_initialized)
 #endif                  return (0);
     return FALSE;       /* XXX */          if (!managed(pa))
                   return (0);
           pvhead = pa_to_pvp(pa);
           if ((pvhead->pv_flags & PV_MOD) == 0)
                   pv_syncflags(pvhead);
           return (pvhead->pv_flags & PV_MOD);
 }  }
   
   /*
    * Clear the reference bit for the given physical page.
    * It's OK to just remove mappings if that's easier.
    */
   void
   pmap_clear_reference(pa)
           register vm_offset_t pa;
   {
           register pv_entry_t     pvhead;
   
           if (!pv_initialized)
                   return;
           if (!managed(pa))
                   return;
   
           pvhead = pa_to_pvp(pa);
           pv_syncflags(pvhead);
           pvhead->pv_flags &= ~PV_REF;
   }
   
 void pmap_activate(pmap, pcbp)  /*
      pmap_t pmap;   * Tell whether the given physical page has been referenced.
      struct pcb *pcbp;   * It's OK to just return FALSE if page is not mapped.
    */
   int
   pmap_is_referenced(pa)
           vm_offset_t     pa;
 {  {
     int s, newctx;          register pv_entry_t     pvhead;
   
 #ifdef  PMAP_DEBUG          if (!pv_initialized)
         if (pmap_debug & PMD_SWITCH)                  return (0);
             printf("pmap_activate(%x, %x)\n", pmap, pcbp);          if (!managed(pa))
 #endif                  return (0);
           pvhead = pa_to_pvp(pa);
           if ((pvhead->pv_flags & PV_REF) == 0)
                   pv_syncflags(pvhead);
           return (pvhead->pv_flags & PV_REF);
   }
   
     if (pmap == kernel_pmap)  
         panic("pmap_activate: kernel_pmap");  
   
     if (pmap->pm_context == NULL)  void
         context_allocate(pmap);  pmap_activate(pmap, pcbp)
           pmap_t pmap;
           struct pcb *pcbp;
   {
           CHECK_SPL();
   
     newctx = pmap->pm_context->context_num;          if (pmap == kernel_pmap)
                   panic("pmap_activate: kernel_pmap");
   
     if (newctx != get_context()) {          if (!has_context(pmap)) {
                   context_allocate(pmap);
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
         if (pmap_debug & PMD_SWITCH)                  if (pmap_debug & PMD_SWITCH)
             printf("pmap_activate(%x, %x) switching to context %d\n",                          printf("pmap_activate(%x) takes context %d\n",
                    pmap, pcbp, newctx);                                     pmap, pmap->pm_ctxnum);
 #endif  #endif
         set_context(newctx);          }
     }  
   #ifdef  PMAP_DEBUG
           if (pmap_debug & PMD_SWITCH) {
                   int old_ctx = get_context();
                   if (old_ctx != pmap->pm_ctxnum) {
                           printf("pmap_activate(%x) old_ctx=%d new_ctx=%d\n",
                                      pmap, old_ctx, pmap->pm_ctxnum);
                   }
           }
   #endif
   
           set_context(pmap->pm_ctxnum);
 }  }
 void pmap_deactivate(pmap, pcbp)  
      pmap_t pmap;  void
      struct pcb *pcbp;  pmap_deactivate(pmap, pcbp)
           pmap_t pmap;
           struct pcb *pcbp;
 {  {
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
     if (pmap_debug & PMD_SWITCH)          if (pmap_debug & PMD_SWITCH)
         printf("pmap_deactivate(%x, %x)\n", pmap, pcbp);                  printf("pmap_deactivate(%x, %x)\n", pmap, pcbp);
 #endif  #endif
     /* XXX - Why bother? -gwr */          /* Nothing to do really, and not called anyway... */
     TAILQ_INSERT_TAIL(&context_active_queue, pmap->pm_context, context_link);  
 }  }
   
 /*  /*
Line 2295  void pmap_deactivate(pmap, pcbp)
Line 2656  void pmap_deactivate(pmap, pcbp)
  */   */
 void  void
 pmap_change_wiring(pmap, va, wired)  pmap_change_wiring(pmap, va, wired)
          pmap_t pmap;          pmap_t  pmap;
         vm_offset_t     va;          vm_offset_t     va;
         boolean_t       wired;          boolean_t       wired;
 {  {
     int s, sme;          int s, sme;
     pmeg_t pmegp;          int wiremask, ptenum;
           pmeg_t pmegp;
   
           if (pmap == NULL)
                   return;
   #ifdef PMAP_DEBUG
           if (pmap_debug & PMD_WIRING)
                   printf("pmap_change_wiring(pmap=0x%x, va=0x%x, wire=%d)\n",
                              pmap, va, wired);
   #endif
           /*
            * We are asked to unwire pages that were wired when
            * pmap_enter() was called and we ignored wiring.
            * (VM code appears to wire a stack page during fork.)
            */
           if (pmap != kernel_pmap) {
 #ifdef PMAP_DEBUG  #ifdef PMAP_DEBUG
     if (pmap_debug & PMD_WIRING)                  if (pmap_debug & PMD_WIRING)
         printf("pmap_change_wiring(0x%x, 0x%x, %x)\n",                          printf("  (user pmap -- ignored)\n");
                pmap, va, wired);  #endif
 #endif                  return;
     PMAP_LOCK();          }
     PMAP_DB_LOCK();  
     if (pmap == kernel_pmap) {          ptenum = VA_PTE_NUM(va);
           wiremask = 1 << ptenum;
   
           PMAP_LOCK();
   
         sme = get_segmap(va);          sme = get_segmap(va);
         if (sme == SEGINV)          if (sme == SEGINV)
             panic("pmap_change_wiring: invalid va=0x%x", va);                  panic("pmap_change_wiring: invalid va=0x%x", va);
         pmegp = pmeg_p(sme);          pmegp = pmeg_p(sme);
         if (wired)          if (wired)
             pmeg_wire(pmegp);                  pmegp->pmeg_wired |= wiremask;
         else          else
             pmeg_unwire(pmegp);                  pmegp->pmeg_wired &= ~wiremask;
     } else {          PMAP_UNLOCK();
         if (pmap_debug & PMD_WIRING)  
             printf("pmap_change_wiring: user pmap (ignored)\n");  
     }  
     PMAP_DB_UNLK();  
     PMAP_UNLOCK();  
 }  }
   
 /*  /*
  *      Copy the range specified by src_addr/len   *      Copy the range specified by src_addr/len
  *      from the source map to the range dst_addr/len   *      from the source map to the range dst_addr/len
Line 2332  pmap_change_wiring(pmap, va, wired)
Line 2707  pmap_change_wiring(pmap, va, wired)
  *   *
  *      This routine is only advisory and need not do anything.   *      This routine is only advisory and need not do anything.
  */   */
 void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)  void
   pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
         pmap_t          dst_pmap;          pmap_t          dst_pmap;
         pmap_t          src_pmap;          pmap_t          src_pmap;
         vm_offset_t     dst_addr;          vm_offset_t     dst_addr;
Line 2342  void pmap_copy(dst_pmap, src_pmap, dst_a
Line 2718  void pmap_copy(dst_pmap, src_pmap, dst_a
 }  }
   
 /*  /*
  *      pmap_copy_page copies the specified (machine independent)  
  *      page by mapping the page into virtual memory and using  
  *      bcopy to copy the page, one machine dependent page at a  
  *      time.  
  */  
 void pmap_copy_page(src, dst)  
          vm_offset_t    src, dst;  
 {  
     vm_offset_t pte;  
     int s;  
   
 #ifdef  PMAP_DEBUG  
     if (pmap_debug & PMD_COW)  
         printf("pmap_copy_page: %x -> %x\n", src, dst);  
 #endif  
     PMAP_LOCK();  
     PMAP_DB_LOCK();  
     pte = PG_VALID |PG_SYSTEM|PG_WRITE|PG_NC|PG_MMEM| PA_PGNUM(src);  
     set_pte(tmp_vpages[0], pte);  
     pte = PG_VALID |PG_SYSTEM|PG_WRITE|PG_NC|PG_MMEM| PA_PGNUM(dst);  
     set_pte(tmp_vpages[1], pte);  
     bcopy((char *) tmp_vpages[0], (char *) tmp_vpages[1], NBPG);  
     set_pte(tmp_vpages[0], PG_INVAL);  
     set_pte(tmp_vpages[0], PG_INVAL);  
     PMAP_DB_UNLK();  
     PMAP_UNLOCK();  
 }  
 /*  
  *      Routine:        pmap_extract   *      Routine:        pmap_extract
  *      Function:   *      Function:
  *              Extract the physical page address associated   *              Extract the physical page address associated
Line 2377  void pmap_copy_page(src, dst)
Line 2725  void pmap_copy_page(src, dst)
  */   */
 vm_offset_t  vm_offset_t
 pmap_extract(pmap, va)  pmap_extract(pmap, va)
      pmap_t     pmap;          pmap_t  pmap;
      vm_offset_t va;          vm_offset_t va;
 {  {
     unsigned char sme;          unsigned int sme;
     unsigned int seg;          unsigned int seg;
     vm_offset_t pte;          int pte = 0;
     int s;          int s;
   
     PMAP_LOCK();          PMAP_LOCK();
     PMAP_DB_LOCK();          if (pmap == kernel_pmap) {
     if (pmap == kernel_pmap) {                  sme = get_segmap(va);
         sme = get_segmap(va);                  if (sme != SEGINV)
         if (sme == SEGINV)                          pte = get_pte(va);
             panic("pmap: pmap_extract() failed on kernel va");          } else {
         pte = get_pte(va);                  seg = VA_SEGNUM(va);
         if (pte & PG_VALID)                  if (pmap->pm_segmap[seg] != SEGINV)
             goto valid;                          pte = get_pte_val(pmap, va);
         panic("pmap: pmap_extract() failed on invalid kernel va");          }
     }          PMAP_UNLOCK();
     seg = VA_SEGNUM(va);          if ((pte & PG_VALID) == 0)
     if (pmap->pm_segmap[seg] == SEGINV)                  panic("pmap_extract: va=0x%x", va);
         panic("pmap: pmap_extract() failed on user va");          return PG_PA(pte);
     if (get_pte_val(pmap, va,&pte)) {  
         if (pte & PG_VALID)  
             goto valid;  
     }  
     panic("pmap: pmap_extract() failed on invalid user va");  
  valid:  
     PMAP_DB_UNLK();  
     PMAP_UNLOCK();  
     return PG_PA(pte);  
 }  }
   
 /*  /*
  *      Routine:        pmap_pageable   *      Routine:        pmap_pageable
  *      Function:   *      Function:
Line 2423  pmap_extract(pmap, va)
Line 2763  pmap_extract(pmap, va)
  *              will specify that these pages are to be wired   *              will specify that these pages are to be wired
  *              down (or not) as appropriate.   *              down (or not) as appropriate.
  */   */
 void pmap_pageable(pmap, sva, eva, pageable)  void
   pmap_pageable(pmap, sva, eva, pageable)
         pmap_t          pmap;          pmap_t          pmap;
         vm_offset_t     sva, eva;          vm_offset_t     sva, eva;
         boolean_t       pageable;          boolean_t       pageable;
 {  {
 /* not implemented, hopefully not needed */          /* not implemented, hopefully not needed */
 }  }
   
   /*
    * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
    * XXX  this should almost certainly be done differently, and
    *      elsewhere, or even not at all
    */
 vm_offset_t  vm_offset_t
 pmap_phys_address(ppn)  pmap_phys_address(x)
         int ppn;          int x;
 {  {
     vm_offset_t pa;          return (x);
   
     pa = sun3_ptob(ppn);  
     return pa;  
 }  }
   
 /*  /*
Line 2447  pmap_phys_address(ppn)
Line 2790  pmap_phys_address(ppn)
  */   */
 void  void
 pmap_pinit(pmap)  pmap_pinit(pmap)
      pmap_t pmap;          pmap_t pmap;
 {  {
     pmap_common_init(pmap);          pmap_common_init(pmap);
     pmap_user_pmap_init(pmap);          pmap_user_pmap_init(pmap);
 }  }
   
 void pmap_protect_range_contextless(pmap, sva, eva,pte_proto, pmegp)  /*
      pmap_t pmap;   * Remove write permissions, all in one PMEG,
      vm_offset_t sva, eva;   * where that PMEG is currently in the MMU.
      vm_offset_t pte_proto;   * The current context is already correct.
      pmeg_t pmegp;   */
 {  void
     int sp, ep, i, s;  pmap_protect_range_mmu(pmap, sva, eva)
     unsigned char nflags;          pmap_t pmap;
     vm_offset_t pte,va;          vm_offset_t sva, eva;
   {
     sp = VA_PTE_NUM(sva);          int old_ctx, i;
     ep = VA_PTE_NUM(eva);          pmeg_t pmegp;
     va = sva;          vm_offset_t va;
     for (i = sp; i < ep; i++) {          int pte, sme;
         pte = get_pte_pmeg(pmegp->pmeg_index, i);          int nflags;
         if (pte & PG_VALID) {  
             if (pv_initialized)          /* Interrupt level handled by caller. */
                 save_modified_bits(pte);          CHECK_SPL();
             pte_proto |= (PG_MOD|PG_SYSTEM|PG_TYPE|PG_ACCESS|PG_FRAME) & pte;  
             nflags = pv_link(pmap, PG_PA(pte), va, PG_TO_PV_FLAGS(pte_proto));  #ifdef  DIAGNOSTIC
             if (nflags & PV_NC)          if (pmap != kernel_pmap) {
                 set_pte_pmeg(pmegp->pmeg_index, i, pte_proto|PG_NC);                  if (pmap->pm_ctxnum != get_context())
             else                          panic("pmap_protect_range_mmu: wrong context");
                 set_pte_pmeg(pmegp->pmeg_index, i, pte_proto);          }
         }  #endif
         va+=NBPG;  
     }          va = sun3_trunc_seg(sva);
 }          sme = get_segmap(va);
   #ifdef  DIAGNOSTIC
 void pmap_protect_range_mmu(pmap, sva, eva, pte_proto)          /* Make sure it is valid and known. */
      pmap_t pmap;          if (sme == SEGINV)
      vm_offset_t sva, eva;                  panic("pmap_protect_range_mmu: SEGINV");
      vm_offset_t pte_proto;          if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(sva)] != sme))
 {                  panic("pmap_protect_range_mmu: incorrect sme, va=0x%x", va);
     int saved_context;  #endif
     unsigned int sme,nflags;  
     pmeg_t pmegp;  
     vm_offset_t va,pte;  
   
     saved_context = get_context();  
     if (pmap != kernel_pmap)  
         set_context(pmap->pm_context->context_num);  
     sme = get_segmap(sva);  
     if (sme != SEGINV) {  
         pmegp = pmeg_p(sme);          pmegp = pmeg_p(sme);
   
           /* have pmeg, will travel */
   
 #ifdef  DIAGNOSTIC  #ifdef  DIAGNOSTIC
         /* Make sure it is in our software segmap (cache). */          /* Make sure we own the pmeg, right va, etc. */
         if (pmap->pm_segmap[VA_SEGNUM(sva)] != sme)          if ((pmegp->pmeg_va != va) ||
             panic("pmap_protect_range_mmu: MMU has bad pmeg %x", sme);                  (pmegp->pmeg_owner != pmap) ||
                   (pmegp->pmeg_version != pmap->pm_version))
           {
                   printf("pmap_protect_range_mmu: wrong pmeg:\n");
                   pmeg_print(pmegp);
                   panic("bye");
           }
           if (pmegp->pmeg_vpages <= 0)
                   panic("pmap_protect_range_mmu: no valid pages?");
 #endif  #endif
     } else {  
   #ifdef  HAVECACHE
           cache_flush_segment(va);
   #endif
   
           /* Remove write permission on PTEs in the given range. */
           for (va = sva; va < eva; va += NBPG) {
                   pte = get_pte(va);
                   if (pte & PG_VALID) {
                           save_modref_bits(pte);
                           pte &= ~(PG_WRITE | PG_MODREF);
                           set_pte(va, pte);
                   }
           }
   }
   
   /*
    * Remove write permissions, all in one PMEG,
    * where it is not currently in any context.
    */
   void
   pmap_protect_range_noctx(pmap, sva, eva)
           pmap_t pmap;
           vm_offset_t sva, eva;
   {
           int pte, sme, ptenum, segnum;
           vm_offset_t va;
           pmeg_t pmegp;
           int nflags;
   
           /* Interrupt level handled by caller. */
           CHECK_SPL();
   
   #ifdef  PMAP_DEBUG
           /* Kernel always in a context (actually, in all contexts). */
         if (pmap == kernel_pmap)          if (pmap == kernel_pmap)
             return;                  panic("pmap_protect_range_noctx: kernel_pmap");
         pmegp = pmeg_cache(pmap, sun3_trunc_seg(sva));          if (pmap->pm_segmap == NULL)
         if (!pmegp)                  panic("pmap_protect_range_noctx: null segmap");
             goto out;  #endif
 #ifdef  PMAP_DEBUG  
         if (pmap_debug & PMD_SEGMAP) {          segnum = VA_SEGNUM(sva);
             printf("pmap: set_segmap ctx=%d v=%x old=ff new=%x (rp1)\n",          sme = pmap->pm_segmap[segnum];
                    get_context(), sun3_trunc_seg(sva), pmegp->pmeg_index);          pmegp = pmeg_p(sme);
         }  
 #endif          /* Remove write permission on PTEs in the given range. */
         set_segmap(sva, pmegp->pmeg_index);          for (va = sva; va < eva; va += NBPG) {
     }                  ptenum = VA_PTE_NUM(va);
     /* have pmeg, will travel */                  pte = get_pte_pmeg(sme, ptenum);
                   if (pte & PG_VALID) {
     if (pmegp->pmeg_vpages <= 0)                          save_modref_bits(pte);
         return; /* no valid pages anyway */                          pte &= ~(PG_WRITE | PG_MODREF);
     va = sva;                          set_pte_pmeg(sme, ptenum, pte);
     while (va < eva) {                  }
         pte = get_pte(va);          }
         if (pte & PG_VALID) {  }
             if (pv_initialized)  
                 save_modified_bits(pte);  /*
             pte_proto = (pte_proto & (PG_VALID|PG_WRITE)) |   * Remove write permissions in given range.
                 ((PG_MOD|PG_SYSTEM|PG_TYPE|PG_ACCESS|PG_FRAME) & pte);   * (guaranteed to be within one segment)
             nflags = pv_link(pmap, PG_PA(pte), va, PG_TO_PV_FLAGS(pte_proto));   * similar to pmap_remove_range()
             if (nflags & PV_NC)   */
                 set_pte(va, pte_proto | PG_NC);  static void
             else  pmap_protect_range(pmap, sva, eva)
                 set_pte(va, pte_proto);          pmap_t pmap;
         }          vm_offset_t sva, eva;
         va+= NBPG;  
     }  
  out:  
     set_context(saved_context);  
 }  
                                 /* within one pmeg */  
 void pmap_protect_range(pmap, sva, eva, pte_proto)  
      pmap_t     pmap;  
      vm_offset_t        sva, eva;  
      vm_offset_t pte_proto;  
 {  {
     pmeg_t pmegp;          int c, s, sme;
           int old_ctx;
           boolean_t in_ctx;
   
           PMAP_LOCK();
   
     if (pmap != kernel_pmap) {  
         if (pmap->pm_segmap == NULL) {  
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
             if (pmap_debug & PMD_SEGMAP) {          if ((pmap_debug & PMD_PROTECT) ||
                 printf("pmap_protect_range: null segmap\n");                  ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
                 Debugger(); /* XXX */                  printf("pmap_protect_range(%x, %x, %x)\n", pmap, sva, eva);
             }  #endif
   #ifdef  DIAGNOSTIC
           if (sun3_trunc_seg(sva) != sun3_trunc_seg(eva-NBPG))
                   panic("pmap_protect_range: bad range!");
 #endif  #endif
             return;  
           if (pmap == kernel_pmap) {
                   sme = get_segmap(sva);
                   if (sme != SEGINV)
                           pmap_protect_range_mmu(pmap, sva, eva);
           } else {
                   /* It is a user pmap. */
                   if (pmap->pm_segmap[VA_SEGNUM(sva)] != SEGINV) {
                           /* There is a PMEG, but maybe not active. */
                           old_ctx = CTXINVAL;
                           in_ctx = FALSE;
                           if (has_context(pmap)) {
                                   old_ctx = get_context();
                                   set_context(pmap->pm_ctxnum);
                                   sme = get_segmap(sva);
                                   if (sme != SEGINV)
                                           in_ctx = TRUE;
                           }
                           if (in_ctx == TRUE) {
                                   /*
                                    * The PMEG is in the current context.
                                    */
                                   pmap_protect_range_mmu(pmap, sva, eva);
                           } else {
                                   /*
                                    * There is a PMEG to deal with,
                                    * but it is not active.
                                    */
                                   pmap_protect_range_noctx(pmap, sva, eva);
                           }
                           if (old_ctx != CTXINVAL) {
                                   set_context(old_ctx);
                           }
                   }
         }          }
         if (get_pmeg_cache(pmap, VA_SEGNUM(sva)) == SEGINV)  
             return;  
     }  
   
     if ((pmap == kernel_pmap) || (pmap->pm_context))          PMAP_UNLOCK();
         pmap_protect_range_mmu(pmap, sva, eva,pte_proto);          return;
     else {  
         pmegp = pmeg_cache(pmap, sun3_trunc_seg(sva));  
         if (!pmegp) return;  
         pmap_protect_range_contextless(pmap, sva, eva, pte_proto, pmegp);  
     }  
 }  }
   
 /*  /*
  *      Set the physical protection on the   *      Reduce the permissions on the specified
  *      specified range of this map as requested.   *      range of this map as requested.
    *      (Make pages read-only.)
  */   */
 void  void
 pmap_protect(pmap, sva, eva, prot)  pmap_protect(pmap, sva, eva, prot)
Line 2580  pmap_protect(pmap, sva, eva, prot)
Line 2981  pmap_protect(pmap, sva, eva, prot)
         vm_offset_t     sva, eva;          vm_offset_t     sva, eva;
         vm_prot_t       prot;          vm_prot_t       prot;
 {  {
     vm_offset_t pte_proto, va, neva;          vm_offset_t va, neva;
     int s;  
   
     if (pmap == NULL) return;  
     if (pmap == kernel_pmap) {  
         if (sva < VM_MIN_KERNEL_ADDRESS)  
             sva = VM_MIN_KERNEL_ADDRESS;  
         if (eva > VM_MAX_KERNEL_ADDRESS)  
             eva = VM_MAX_KERNEL_ADDRESS;  
     }  
     else {  
         if (eva > VM_MAX_ADDRESS)  
             eva = VM_MAX_ADDRESS;  
     }  
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     if (pmap_debug & PMD_PROTECT)          if (pmap_debug & PMD_PROTECT)
         printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);                  printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
 #endif  #endif
     if ((prot & VM_PROT_READ) == VM_PROT_NONE) {  
         pmap_remove(pmap, sva, eva);          if (pmap == NULL)
         return;                  return;
     }  
     PMAP_LOCK();          /* If removing all permissions, just unmap. */
     PMAP_DB_LOCK();          if ((prot & VM_PROT_READ) == 0) {
     pte_proto = pmap_pte_prot(prot);                  pmap_remove(pmap, sva, eva);
     va = sva;                  return;
     while (va < eva) {          }
         neva = sun3_round_up_seg(va);  
         if (neva > eva)          /* If leaving writable, nothing to do. */
             neva = eva;          if (prot & VM_PROT_WRITE) {
         pmap_protect_range(pmap, va, neva, pte_proto);                  return;
         va = neva;          }
     }  
     PMAP_DB_UNLK();          if (pmap == kernel_pmap) {
     PMAP_UNLOCK();                  if (sva < VM_MIN_KERNEL_ADDRESS)
                           sva = VM_MIN_KERNEL_ADDRESS;
                   if (eva > DVMA_SPACE_END) {
   #ifdef  PMAP_DEBUG
                           printf("pmap_protect: eva=0x%x\n", eva);
                           Debugger();
   #endif
                           eva = DVMA_SPACE_END;
                   }
           }
           else {
                   if (eva > VM_MAX_ADDRESS)
                           eva = VM_MAX_ADDRESS;
           }
   
           va = sva;
           while (va < eva) {
                   neva = sun3_trunc_seg(va) + NBSG;
                   if (neva > eva)
                           neva = eva;
                   pmap_protect_range(pmap, va, neva);
                   va = neva;
           }
 }  }
   
 /*  /*
    * Count pages resident in this pmap.
    * XXX - Should be called: pmap_resident_count()
    * but that has to be a macro (see kern_sysctl.c)
    */
   segsz_t
   pmap_resident_pages(pmap)
           pmap_t pmap;
   {
           int i, sme, pages;
           pmeg_t pmeg;
   
           pages = 0;
           if (pmap->pm_segmap) {
                   for (i = 0; i < NUSEG; i++) {
                           sme = pmap->pm_segmap[i];
                           if (sme != SEGINV) {
                                   pmeg = pmeg_p(sme);
                                   pages += pmeg->pmeg_vpages;
                           }
                   }
           }
           return (pages);
   }
   
   
   /*
  *      Require that all active physical maps contain no   *      Require that all active physical maps contain no
  *      incorrect entries NOW.  [This update includes   *      incorrect entries NOW.  [This update includes
  *      forcing updates of any address map caching.]   *      forcing updates of any address map caching.]
Line 2625  pmap_protect(pmap, sva, eva, prot)
Line 3062  pmap_protect(pmap, sva, eva, prot)
  *      Generally used to insure that a thread about   *      Generally used to insure that a thread about
  *      to run will see a semantically correct world.   *      to run will see a semantically correct world.
  */   */
 void pmap_update()  void
   pmap_update()
   {
   }
   
   /*
    *      pmap_copy_page copies the specified (machine independent)
    *      page by mapping the page into virtual memory and using
    *      bcopy to copy the page, one machine dependent page at a
    *      time.
    */
   void
   pmap_copy_page(src, dst)
           vm_offset_t     src, dst;
 {  {
           int pte;
           int s;
   
   #ifdef  PMAP_DEBUG
           if (pmap_debug & PMD_COW)
                   printf("pmap_copy_page: %x -> %x\n", src, dst);
   #endif
           PMAP_LOCK();
   
           if (tmp_vpages_inuse)
                   panic("pmap_copy_page: vpages inuse");
           tmp_vpages_inuse++;
   
           pte = PG_PERM | PA_PGNUM(src);
           set_pte(tmp_vpages[0], pte);
           pte = PG_PERM | PA_PGNUM(dst);
           set_pte(tmp_vpages[1], pte);
           bcopy((char *) tmp_vpages[0], (char *) tmp_vpages[1], NBPG);
           set_pte(tmp_vpages[0], PG_INVAL);
           set_pte(tmp_vpages[0], PG_INVAL);
   
           tmp_vpages_inuse--;
           PMAP_UNLOCK();
 }  }
   
 /*  /*
  *      pmap_zero_page zeros the specified (machine independent)   *      pmap_zero_page zeros the specified (machine independent)
  *      page by mapping the page into virtual memory and using   *      page by mapping the page into virtual memory and using
  *      bzero to clear its contents, one machine dependent page   *      bzero to clear its contents, one machine dependent page
  *      at a time.   *      at a time.
  */   */
 void pmap_zero_page(pa)  void
          vm_offset_t    pa;  pmap_zero_page(pa)
           vm_offset_t     pa;
   {
           int pte;
           int s;
   
   #ifdef  PMAP_DEBUG
           if (pmap_debug & PMD_COW)
                   printf("pmap_zero_page: %x\n", pa);
   #endif
           PMAP_LOCK();
   
           if (tmp_vpages_inuse)
                   panic("pmap_zero_page: vpages inuse");
           tmp_vpages_inuse++;
           pte = PG_PERM | PA_PGNUM(pa);
           set_pte(tmp_vpages[0], pte);
           bzero((char *) tmp_vpages[0], NBPG);
           set_pte(tmp_vpages[0], PG_INVAL);
           tmp_vpages_inuse--;
           PMAP_UNLOCK();
   }
   
   static int temp_seg_inuse;
   
   static int
   get_pte_pmeg(int pmeg_num, int page_num)
   {
           vm_offset_t va;
           int pte;
   
   #ifdef  PMAP_DEBUG
           if (temp_seg_inuse)
                   panic("get_pte_pmeg: temp_seg_inuse");
   #endif
           CHECK_SPL();
   
           temp_seg_inuse++;
           va = temp_seg_va;
           set_segmap(temp_seg_va, pmeg_num);
           va += NBPG*page_num;
           pte = get_pte(va);
           set_segmap(temp_seg_va, SEGINV);
           temp_seg_inuse--;
           return pte;
   }
   
   static void
   set_pte_pmeg(int pmeg_num, int page_num, int pte)
 {  {
     vm_offset_t pte;          vm_offset_t va;
     int s;  
   
 #ifdef  PMAP_DEBUG  #ifdef  PMAP_DEBUG
     if (pmap_debug & PMD_COW)          if (temp_seg_inuse)
         printf("pmap_zero_page: %x\n", pa);                  panic("set_pte_pmeg: temp_seg_inuse");
 #endif  #endif
     PMAP_LOCK();          CHECK_SPL();
     pte = PG_VALID |PG_SYSTEM|PG_WRITE|PG_NC|PG_MMEM| PA_PGNUM(pa);  
     set_pte(tmp_vpages[0], pte);          temp_seg_inuse++;
     bzero((char *) tmp_vpages[0], NBPG);          va = temp_seg_va;
     set_pte(tmp_vpages[0], PG_INVAL);          set_segmap(temp_seg_va, pmeg_num);
     PMAP_UNLOCK();          va += NBPG*page_num;
           set_pte(va, pte);
           set_segmap(temp_seg_va, SEGINV);
           temp_seg_inuse--;
 }  }
   
 /*  /*
  * Local Variables:   * Local Variables:
  * tab-width: 8   * tab-width: 4
  * End:   * End:
  */   */

Legend:
Removed from v.1.37  
changed lines
  Added in v.1.38

CVSweb <webmaster@jp.NetBSD.org>