[BACK]Return to kern_lwp.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/kern_lwp.c between version 1.63 and 1.64

version 1.63, 2007/03/21 18:26:00 version 1.64, 2007/05/17 14:51:39
Line 151 
Line 151 
  *      a multiprocessor kernel, state transitions may cause the LWP's lock   *      a multiprocessor kernel, state transitions may cause the LWP's lock
  *      pointer to change.  On uniprocessor kernels, most scheduler and   *      pointer to change.  On uniprocessor kernels, most scheduler and
  *      synchronisation objects such as sleep queues and LWPs are protected   *      synchronisation objects such as sleep queues and LWPs are protected
  *      by only one mutex (sched_mutex).  In this case, LWPs' lock pointers   *      by only one mutex (spc_mutex on single CPU).  In this case, LWPs' lock
  *      will never change and will always reference sched_mutex.   *      pointers will never change and will always reference spc_mutex.
    *      Please note that in a multiprocessor kernel each CPU has own spc_mutex.
    *      (spc_mutex here refers to l->l_cpu->ci_schedstate.spc_mutex).
  *   *
  *      Manipulation of the general lock is not performed directly, but   *      Manipulation of the general lock is not performed directly, but
  *      through calls to lwp_lock(), lwp_relock() and similar.   *      through calls to lwp_lock(), lwp_relock() and similar.
  *   *
  *      States and their associated locks:   *      States and their associated locks:
  *   *
  *      LSIDL, LSZOMB   *      LSIDL, LSZOMB, LSONPROC:
  *   *
  *              Always covered by sched_mutex.   *              Always covered by spc_lwplock, which protects running LWPs.
    *              This is a per-CPU lock.
  *   *
  *      LSONPROC, LSRUN:   *      LSRUN:
  *   *
  *              Always covered by sched_mutex, which protects the run queues   *              Always covered by spc_mutex, which protects the run queues.
  *              and other miscellaneous items.  If the scheduler is changed   *              This may be a per-CPU lock, depending on the scheduler.
  *              to use per-CPU run queues, this may become a per-CPU mutex.  
  *   *
  *      LSSLEEP:   *      LSSLEEP:
  *   *
Line 179 
Line 181 
  *              If the LWP was previously sleeping (l_wchan != NULL), then   *              If the LWP was previously sleeping (l_wchan != NULL), then
  *              l_mutex references the sleep queue mutex.  If the LWP was   *              l_mutex references the sleep queue mutex.  If the LWP was
  *              runnable or on the CPU when halted, or has been removed from   *              runnable or on the CPU when halted, or has been removed from
  *              the sleep queue since halted, then the mutex is sched_mutex.   *              the sleep queue since halted, then the mutex is spc_lwplock.
  *   *
  *      The lock order is as follows:   *      The lock order is as follows:
  *   *
  *              sleepq_t::sq_mutex  |---> sched_mutex   *              spc::spc_lwplock ->
  *              tschain_t::tc_mutex |   *                  sleepq_t::sq_mutex ->
    *                      tschain_t::tc_mutex ->
    *                          spc::spc_mutex
  *   *
  *      Each process has an scheduler state mutex (proc::p_smutex), and a   *      Each process has an scheduler state mutex (proc::p_smutex), and a
  *      number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and   *      number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
Line 213  __KERNEL_RCSID(0, "$NetBSD$");
Line 217  __KERNEL_RCSID(0, "$NetBSD$");
   
 #include <sys/param.h>  #include <sys/param.h>
 #include <sys/systm.h>  #include <sys/systm.h>
   #include <sys/cpu.h>
 #include <sys/pool.h>  #include <sys/pool.h>
 #include <sys/proc.h>  #include <sys/proc.h>
 #include <sys/syscallargs.h>  #include <sys/syscallargs.h>
Line 591  newlwp(struct lwp *l1, struct proc *p2, 
Line 596  newlwp(struct lwp *l1, struct proc *p2, 
         l2->l_priority = l1->l_priority;          l2->l_priority = l1->l_priority;
         l2->l_usrpri = l1->l_usrpri;          l2->l_usrpri = l1->l_usrpri;
         l2->l_inheritedprio = MAXPRI;          l2->l_inheritedprio = MAXPRI;
         l2->l_mutex = &sched_mutex;          l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
         l2->l_cpu = l1->l_cpu;          l2->l_cpu = l1->l_cpu;
         l2->l_flag = inmem ? LW_INMEM : 0;          l2->l_flag = inmem ? LW_INMEM : 0;
         lwp_initspecific(l2);          lwp_initspecific(l2);
           sched_lwp_fork(l2);
   
         if (p2->p_flag & PK_SYSTEM) {          if (p2->p_flag & PK_SYSTEM) {
                 /*                  /*
Line 650  newlwp(struct lwp *l1, struct proc *p2, 
Line 656  newlwp(struct lwp *l1, struct proc *p2, 
 }  }
   
 /*  /*
  * Quit the process.  This will call cpu_exit, which will call cpu_switch,   * Called by MD code when a new LWP begins execution.  Must be called
  * so this can only be used meaningfully if you're willing to switch away.   * with the previous LWP locked (so at splsched), or if there is no
  * Calling with l!=curlwp would be weird.   * previous LWP, at splsched.
    */
   void
   lwp_startup(struct lwp *prev, struct lwp *new)
   {
   
           curlwp = new;
           if (prev != NULL) {
                   lwp_unlock(prev);
           }
           spl0();
           pmap_activate(new);
           LOCKDEBUG_BARRIER(NULL, 0);
           KERNEL_LOCK(1, new);
   }
   
   /*
    * Quit the process.
    * this can only be used meaningfully if you're willing to switch away.
    * Calling with l != curlwp would be weird.
  */   */
 void  void
 lwp_exit(struct lwp *l)  lwp_exit(struct lwp *l)
Line 774  lwp_exit(struct lwp *l)
Line 799  lwp_exit(struct lwp *l)
         KERNEL_UNLOCK_ALL(l, NULL);          KERNEL_UNLOCK_ALL(l, NULL);
 #endif  #endif
   
         cpu_exit(l);          lwp_exit_switchaway(l);
 }  }
   
 /*  
  * We are called from cpu_exit() once it is safe to schedule the dead LWP's  
  * resources to be freed (i.e., once we've switched to the idle PCB for the  
  * current CPU).  
  */  
 void  void
 lwp_exit2(struct lwp *l)  lwp_exit_switchaway(struct lwp *l)
 {  {
         /* XXXSMP re-enable preemption */          struct cpu_info *ci;
           struct lwp *idlelwp;
   
           /* Unlocked, but is for statistics only. */
           uvmexp.swtch++;
   
           (void)splsched();
           l->l_flag &= ~LW_RUNNING;
           ci = curcpu();
           idlelwp = ci->ci_data.cpu_idlelwp;
           idlelwp->l_stat = LSONPROC;
           cpu_switchto(NULL, idlelwp);
 }  }
   
 /*  /*
Line 809  lwp_free(struct lwp *l, bool recycle, bo
Line 840  lwp_free(struct lwp *l, bool recycle, bo
                  * This needs to co-incide with coming off p_lwps.                   * This needs to co-incide with coming off p_lwps.
                  */                   */
                 timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);                  timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
                   p->p_pctcpu += l->l_pctcpu;
                 LIST_REMOVE(l, l_sibling);                  LIST_REMOVE(l, l_sibling);
                 p->p_nlwps--;                  p->p_nlwps--;
                 p->p_nzlwps--;                  p->p_nzlwps--;
Line 830  lwp_free(struct lwp *l, bool recycle, bo
Line 862  lwp_free(struct lwp *l, bool recycle, bo
          * all locks to avoid deadlock against interrupt handlers on           * all locks to avoid deadlock against interrupt handlers on
          * the target CPU.           * the target CPU.
          */           */
         if (l->l_cpu->ci_curlwp == l) {          if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
                 int count;                  int count;
                   (void)count; /* XXXgcc */
                 KERNEL_UNLOCK_ALL(curlwp, &count);                  KERNEL_UNLOCK_ALL(curlwp, &count);
                 while (l->l_cpu->ci_curlwp == l)                  while ((l->l_flag & LW_RUNNING) != 0 ||
                       l->l_cpu->ci_curlwp == l)
                         SPINLOCK_BACKOFF_HOOK;                          SPINLOCK_BACKOFF_HOOK;
                 KERNEL_LOCK(count, curlwp);                  KERNEL_LOCK(count, curlwp);
         }          }
Line 849  lwp_free(struct lwp *l, bool recycle, bo
Line 883  lwp_free(struct lwp *l, bool recycle, bo
   
         /*          /*
          * Free the LWP's turnstile and the LWP structure itself unless the           * Free the LWP's turnstile and the LWP structure itself unless the
          * caller wants to recycle them.           * caller wants to recycle them.  Also, free the scheduler specific data.
          *           *
          * We can't return turnstile0 to the pool (it didn't come from it),           * We can't return turnstile0 to the pool (it didn't come from it),
          * so if it comes up just drop it quietly and move on.           * so if it comes up just drop it quietly and move on.
Line 857  lwp_free(struct lwp *l, bool recycle, bo
Line 891  lwp_free(struct lwp *l, bool recycle, bo
          * We don't recycle the VM resources at this time.           * We don't recycle the VM resources at this time.
          */           */
         KERNEL_LOCK(1, curlwp);         /* XXXSMP */          KERNEL_LOCK(1, curlwp);         /* XXXSMP */
   
           sched_lwp_exit(l);
   
         if (!recycle && l->l_ts != &turnstile0)          if (!recycle && l->l_ts != &turnstile0)
                 pool_cache_put(&turnstile_cache, l->l_ts);                  pool_cache_put(&turnstile_cache, l->l_ts);
 #ifndef __NO_CPU_LWP_FREE  #ifndef __NO_CPU_LWP_FREE
Line 908  proc_representative_lwp(struct proc *p, 
Line 945  proc_representative_lwp(struct proc *p, 
                 onproc = running = sleeping = stopped = suspended = NULL;                  onproc = running = sleeping = stopped = suspended = NULL;
                 signalled = NULL;                  signalled = NULL;
                 LIST_FOREACH(l, &p->p_lwps, l_sibling) {                  LIST_FOREACH(l, &p->p_lwps, l_sibling) {
                           if ((l->l_flag & LW_IDLE) != 0) {
                                   continue;
                           }
                         if (l->l_lid == p->p_sigctx.ps_lwp)                          if (l->l_lid == p->p_sigctx.ps_lwp)
                                 signalled = l;                                  signalled = l;
                         switch (l->l_stat) {                          switch (l->l_stat) {
Line 1044  lwp_locked(struct lwp *l, kmutex_t *mtx)
Line 1084  lwp_locked(struct lwp *l, kmutex_t *mtx)
 {  {
         kmutex_t *cur = l->l_mutex;          kmutex_t *cur = l->l_mutex;
   
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         return mutex_owned(cur) && (mtx == cur || mtx == NULL);          return mutex_owned(cur) && (mtx == cur || mtx == NULL);
 #else  
         return mutex_owned(cur);  
 #endif  
 }  }
   
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
 /*  /*
  * Lock an LWP.   * Lock an LWP.
  */   */
Line 1083  lwp_lock_retry(struct lwp *l, kmutex_t *
Line 1118  lwp_lock_retry(struct lwp *l, kmutex_t *
         } while (__predict_false(l->l_mutex != old));          } while (__predict_false(l->l_mutex != old));
 #endif  #endif
 }  }
 #endif  
   
 /*  /*
  * Lend a new mutex to an LWP.  The old mutex must be held.   * Lend a new mutex to an LWP.  The old mutex must be held.
Line 1094  lwp_setlock(struct lwp *l, kmutex_t *new
Line 1128  lwp_setlock(struct lwp *l, kmutex_t *new
   
         KASSERT(mutex_owned(l->l_mutex));          KASSERT(mutex_owned(l->l_mutex));
   
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         mb_write();          mb_write();
         l->l_mutex = new;          l->l_mutex = new;
 #else  
         (void)new;  
 #endif  
 }  }
   
 /*  /*
Line 1114  lwp_unlock_to(struct lwp *l, kmutex_t *n
Line 1144  lwp_unlock_to(struct lwp *l, kmutex_t *n
         KASSERT(mutex_owned(l->l_mutex));          KASSERT(mutex_owned(l->l_mutex));
   
         old = l->l_mutex;          old = l->l_mutex;
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         mb_write();          mb_write();
         l->l_mutex = new;          l->l_mutex = new;
 #else  
         (void)new;  
 #endif  
         mutex_spin_exit(old);          mutex_spin_exit(old);
 }  }
   
Line 1130  lwp_unlock_to(struct lwp *l, kmutex_t *n
Line 1156  lwp_unlock_to(struct lwp *l, kmutex_t *n
 void  void
 lwp_relock(struct lwp *l, kmutex_t *new)  lwp_relock(struct lwp *l, kmutex_t *new)
 {  {
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         kmutex_t *old;          kmutex_t *old;
 #endif  
   
         KASSERT(mutex_owned(l->l_mutex));          KASSERT(mutex_owned(l->l_mutex));
   
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         old = l->l_mutex;          old = l->l_mutex;
         if (old != new) {          if (old != new) {
                 mutex_spin_enter(new);                  mutex_spin_enter(new);
                 l->l_mutex = new;                  l->l_mutex = new;
                 mutex_spin_exit(old);                  mutex_spin_exit(old);
         }          }
 #else  
         (void)new;  
 #endif  
 }  }
   
 int  int
 lwp_trylock(struct lwp *l)  lwp_trylock(struct lwp *l)
 {  {
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         kmutex_t *old;          kmutex_t *old;
   
         for (;;) {          for (;;) {
Line 1161  lwp_trylock(struct lwp *l)
Line 1180  lwp_trylock(struct lwp *l)
                         return 1;                          return 1;
                 mutex_spin_exit(old);                  mutex_spin_exit(old);
         }          }
 #else  
         return mutex_tryenter(l->l_mutex);  
 #endif  
 }  }
   
 /*  /*
Line 1216  lwp_userret(struct lwp *l)
Line 1232  lwp_userret(struct lwp *l)
                         lwp_lock(l);                          lwp_lock(l);
                         l->l_stat = LSSUSPENDED;                          l->l_stat = LSSUSPENDED;
                         mutex_exit(&p->p_smutex);                          mutex_exit(&p->p_smutex);
                         mi_switch(l, NULL);                          mi_switch(l);
                 }                  }
   
                 /* Process is exiting. */                  /* Process is exiting. */

Legend:
Removed from v.1.63  
changed lines
  Added in v.1.64

CVSweb <webmaster@jp.NetBSD.org>