[BACK]Return to uvm_glue.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / uvm

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/uvm/uvm_glue.c between version 1.50 and 1.50.2.5

version 1.50, 2001/06/02 18:09:26 version 1.50.2.5, 2002/10/10 18:45:05
Line 66 
Line 66 
  * rights to redistribute these changes.   * rights to redistribute these changes.
  */   */
   
   #include <sys/cdefs.h>
   __KERNEL_RCSID(0, "$NetBSD$");
   
 #include "opt_kgdb.h"  #include "opt_kgdb.h"
   #include "opt_kstack.h"
 #include "opt_sysv.h"  #include "opt_sysv.h"
 #include "opt_uvmhist.h"  #include "opt_uvmhist.h"
   
Line 94 
Line 98 
   
 static void uvm_swapout __P((struct proc *));  static void uvm_swapout __P((struct proc *));
   
   #define UVM_NUAREA_MAX 16
   void *uvm_uareas;
   int uvm_nuarea;
   
 /*  /*
  * XXXCDC: do these really belong here?   * XXXCDC: do these really belong here?
  */   */
Line 199  uvm_chgkprot(addr, len, rw)
Line 207  uvm_chgkprot(addr, len, rw)
                         panic("chgkprot: invalid page");                          panic("chgkprot: invalid page");
                 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);                  pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED);
         }          }
         pmap_update();          pmap_update(pmap_kernel());
 }  }
 #endif  #endif
   
 /*  /*
  * vslock: wire user memory for I/O   * uvm_vslock: wire user memory for I/O
  *   *
  * - called from physio and sys___sysctl   * - called from physio and sys___sysctl
  * - XXXCDC: consider nuking this (or making it a macro?)   * - XXXCDC: consider nuking this (or making it a macro?)
Line 224  uvm_vslock(p, addr, len, access_type)
Line 232  uvm_vslock(p, addr, len, access_type)
         map = &p->p_vmspace->vm_map;          map = &p->p_vmspace->vm_map;
         start = trunc_page((vaddr_t)addr);          start = trunc_page((vaddr_t)addr);
         end = round_page((vaddr_t)addr + len);          end = round_page((vaddr_t)addr + len);
         error = uvm_fault_wire(map, start, end, access_type);          error = uvm_fault_wire(map, start, end, VM_FAULT_WIRE, access_type);
         return error;          return error;
 }  }
   
 /*  /*
  * vslock: wire user memory for I/O   * uvm_vsunlock: unwire user memory wired by uvm_vslock()
  *   *
  * - called from physio and sys___sysctl   * - called from physio and sys___sysctl
  * - XXXCDC: consider nuking this (or making it a macro?)   * - XXXCDC: consider nuking this (or making it a macro?)
Line 274  uvm_fork(p1, p2, shared, stack, stacksiz
Line 282  uvm_fork(p1, p2, shared, stack, stacksiz
   
         if (shared == TRUE) {          if (shared == TRUE) {
                 p2->p_vmspace = NULL;                  p2->p_vmspace = NULL;
                 uvmspace_share(p1, p2);                 /* share vmspace */                  uvmspace_share(p1, p2);
         } else          } else
                 p2->p_vmspace = uvmspace_fork(p1->p_vmspace); /* fork vmspace */                  p2->p_vmspace = uvmspace_fork(p1->p_vmspace);
   
         /*          /*
          * Wire down the U-area for the process, which contains the PCB           * Wire down the U-area for the process, which contains the PCB
Line 287  uvm_fork(p1, p2, shared, stack, stacksiz
Line 295  uvm_fork(p1, p2, shared, stack, stacksiz
          * Note the kernel stack gets read/write accesses right off           * Note the kernel stack gets read/write accesses right off
          * the bat.           * the bat.
          */           */
         error = uvm_fault_wire(kernel_map, (vaddr_t)up,          error = uvm_fault_wire(kernel_map, (vaddr_t)up, (vaddr_t)up + USPACE,
             (vaddr_t)up + USPACE, VM_PROT_READ | VM_PROT_WRITE);              VM_FAULT_WIRE, VM_PROT_READ | VM_PROT_WRITE);
         if (error)          if (error)
                 panic("uvm_fork: uvm_fault_wire failed: %d", error);                  panic("uvm_fork: uvm_fault_wire failed: %d", error);
   
   #ifdef KSTACK_CHECK_MAGIC
           /*
            * fill stack with magic number
            */
           kstack_setup_magic(p2);
   #endif
   
         /*          /*
          * p_stats currently points at a field in the user struct.  Copy           * p_stats currently points at a field in the user struct.  Copy
          * parts of p_stats, and zero out the rest.           * parts of p_stats, and zero out the rest.
Line 322  uvm_fork(p1, p2, shared, stack, stacksiz
Line 337  uvm_fork(p1, p2, shared, stack, stacksiz
  * - we must run in a separate thread because freeing the vmspace   * - we must run in a separate thread because freeing the vmspace
  *   of the dead process may block.   *   of the dead process may block.
  */   */
   
 void  void
 uvm_exit(p)  uvm_exit(p)
         struct proc *p;          struct proc *p;
Line 330  uvm_exit(p)
Line 346  uvm_exit(p)
   
         uvmspace_free(p->p_vmspace);          uvmspace_free(p->p_vmspace);
         p->p_flag &= ~P_INMEM;          p->p_flag &= ~P_INMEM;
         uvm_fault_unwire(kernel_map, va, va + USPACE);          uvm_uarea_free(va);
         uvm_km_free(kernel_map, va, USPACE);  
         p->p_addr = NULL;          p->p_addr = NULL;
 }  }
   
 /*  /*
    * uvm_uarea_alloc: allocate a u-area
    */
   
   vaddr_t
   uvm_uarea_alloc(void)
   {
           vaddr_t uaddr;
   
   #ifndef USPACE_ALIGN
   #define USPACE_ALIGN    0
   #endif
   
           uaddr = (vaddr_t)uvm_uareas;
           if (uaddr) {
                   uvm_uareas = *(void **)uvm_uareas;
                   uvm_nuarea--;
           } else {
                   uaddr = uvm_km_valloc_align(kernel_map, USPACE, USPACE_ALIGN);
           }
           return uaddr;
   }
   
   /*
    * uvm_uarea_free: free a u-area
    */
   
   void
   uvm_uarea_free(vaddr_t uaddr)
   {
   
           if (uvm_nuarea < UVM_NUAREA_MAX) {
                   *(void **)uaddr = uvm_uareas;
                   uvm_uareas = (void *)uaddr;
                   uvm_nuarea++;
           } else {
                   uvm_km_free(kernel_map, uaddr, USPACE);
           }
   }
   
   /*
  * uvm_init_limit: init per-process VM limits   * uvm_init_limit: init per-process VM limits
  *   *
  * - called for process 0 and then inherited by all others.   * - called for process 0 and then inherited by all others.
  */   */
   
 void  void
 uvm_init_limits(p)  uvm_init_limits(p)
         struct proc *p;          struct proc *p;
Line 376  uvm_swapin(p)
Line 432  uvm_swapin(p)
         struct proc *p;          struct proc *p;
 {  {
         vaddr_t addr;          vaddr_t addr;
         int s;          int s, error;
   
         addr = (vaddr_t)p->p_addr;          addr = (vaddr_t)p->p_addr;
         /* make P_INMEM true */          /* make P_INMEM true */
         uvm_fault_wire(kernel_map, addr, addr + USPACE,          error = uvm_fault_wire(kernel_map, addr, addr + USPACE, VM_FAULT_WIRE,
             VM_PROT_READ | VM_PROT_WRITE);              VM_PROT_READ | VM_PROT_WRITE);
           if (error) {
                   panic("uvm_swapin: rewiring stack failed: %d", error);
           }
   
         /*          /*
          * Some architectures need to be notified when the user area has           * Some architectures need to be notified when the user area has
Line 502  loop:
Line 561  loop:
  *   are swapped... otherwise the longest-sleeping or stopped process   *   are swapped... otherwise the longest-sleeping or stopped process
  *   is swapped, otherwise the longest resident process...   *   is swapped, otherwise the longest resident process...
  */   */
   
 void  void
 uvm_swapout_threads()  uvm_swapout_threads()
 {  {
Line 605  uvm_swapout(p)
Line 665  uvm_swapout(p)
                 remrunqueue(p);                  remrunqueue(p);
         SCHED_UNLOCK(s);          SCHED_UNLOCK(s);
         p->p_swtime = 0;          p->p_swtime = 0;
           p->p_stats->p_ru.ru_nswap++;
         ++uvmexp.swapouts;          ++uvmexp.swapouts;
   
         /*          /*
Line 615  uvm_swapout(p)
Line 676  uvm_swapout(p)
         pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));          pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map));
 }  }
   
   /*
    * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
    * a core file.
    */
   
   int
   uvm_coredump_walkmap(p, vp, cred, func, cookie)
           struct proc *p;
           struct vnode *vp;
           struct ucred *cred;
           int (*func)(struct proc *, struct vnode *, struct ucred *,
               struct uvm_coredump_state *);
           void *cookie;
   {
           struct uvm_coredump_state state;
           struct vmspace *vm = p->p_vmspace;
           struct vm_map *map = &vm->vm_map;
           struct vm_map_entry *entry;
           vaddr_t maxstack;
           int error;
   
           maxstack = trunc_page(USRSTACK - ctob(vm->vm_ssize));
   
           for (entry = map->header.next; entry != &map->header;
                entry = entry->next) {
                   /* Should never happen for a user process. */
                   if (UVM_ET_ISSUBMAP(entry))
                           panic("uvm_coredump_walkmap: user process with "
                               "submap?");
   
                   state.cookie = cookie;
                   state.start = entry->start;
                   state.end = entry->end;
                   state.prot = entry->protection;
                   state.flags = 0;
   
                   if (state.start >= VM_MAXUSER_ADDRESS)
                           continue;
   
                   if (state.end > VM_MAXUSER_ADDRESS)
                           state.end = VM_MAXUSER_ADDRESS;
   
                   if (state.start >= (vaddr_t)vm->vm_maxsaddr) {
                           if (state.end <= maxstack)
                                   continue;
                           if (state.start < maxstack)
                                   state.start = maxstack;
                           state.flags |= UVM_COREDUMP_STACK;
                   }
   
                   if ((entry->protection & VM_PROT_WRITE) == 0)
                           state.flags |= UVM_COREDUMP_NODUMP;
   
                   if (entry->object.uvm_obj != NULL &&
                       entry->object.uvm_obj->pgops == &uvm_deviceops)
                           state.flags |= UVM_COREDUMP_NODUMP;
   
                   error = (*func)(p, vp, cred, &state);
                   if (error)
                           return (error);
           }
   
           return (0);
   }

Legend:
Removed from v.1.50  
changed lines
  Added in v.1.50.2.5

CVSweb <webmaster@jp.NetBSD.org>