[BACK]Return to kern_mutex.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/kern/kern_mutex.c between version 1.14 and 1.15

version 1.14, 2007/05/17 14:51:40 version 1.15, 2007/07/09 21:10:53
Line 312  mutex_init(kmutex_t *mtx, kmutex_type_t 
Line 312  mutex_init(kmutex_t *mtx, kmutex_type_t 
   
         memset(mtx, 0, sizeof(*mtx));          memset(mtx, 0, sizeof(*mtx));
   
         if (type == MUTEX_DRIVER)          switch (type) {
           case MUTEX_ADAPTIVE:
           case MUTEX_DEFAULT:
                   KASSERT(ipl == IPL_NONE);
                   break;
           case MUTEX_DRIVER:
                 type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);                  type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);
                   break;
           default:
                   break;
           }
   
         switch (type) {          switch (type) {
         case MUTEX_NODEBUG:          case MUTEX_NODEBUG:
                 KASSERT(ipl == IPL_NONE);  
                 id = LOCKDEBUG_ALLOC(mtx, NULL);                  id = LOCKDEBUG_ALLOC(mtx, NULL);
                 MUTEX_INITIALIZE_ADAPTIVE(mtx, id);                  MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
                 break;                  break;
         case MUTEX_ADAPTIVE:          case MUTEX_ADAPTIVE:
         case MUTEX_DEFAULT:          case MUTEX_DEFAULT:
                 KASSERT(ipl == IPL_NONE);  
                 id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops);                  id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops);
                 MUTEX_INITIALIZE_ADAPTIVE(mtx, id);                  MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
                 break;                  break;
Line 362  mutex_destroy(kmutex_t *mtx)
Line 369  mutex_destroy(kmutex_t *mtx)
  *   *
  *      Return true if an adaptive mutex owner is running on a CPU in the   *      Return true if an adaptive mutex owner is running on a CPU in the
  *      system.  If the target is waiting on the kernel big lock, then we   *      system.  If the target is waiting on the kernel big lock, then we
  *      return false immediately.  This is necessary to avoid deadlock   *      must release it.  This is necessary to avoid deadlock.
  *      against the big lock.  
  *   *
  *      Note that we can't use the mutex owner field as an LWP pointer.  We   *      Note that we can't use the mutex owner field as an LWP pointer.  We
  *      don't have full control over the timing of our execution, and so the   *      don't have full control over the timing of our execution, and so the
  *      pointer could be completely invalid by the time we dereference it.   *      pointer could be completely invalid by the time we dereference it.
  *  
  *      XXX This should be optimised further to reduce potential cache line  
  *      ping-ponging and skewing of the spin time while busy waiting.  
  */   */
 #ifdef MULTIPROCESSOR  #ifdef MULTIPROCESSOR
 int  int
Line 384  mutex_onproc(uintptr_t owner, struct cpu
Line 387  mutex_onproc(uintptr_t owner, struct cpu
                 return 0;                  return 0;
         l = (struct lwp *)MUTEX_OWNER(owner);          l = (struct lwp *)MUTEX_OWNER(owner);
   
           /* See if the target is running on a CPU somewhere. */
         if ((ci = *cip) != NULL && ci->ci_curlwp == l)          if ((ci = *cip) != NULL && ci->ci_curlwp == l)
                 return ci->ci_biglock_wanted != l;                  goto run;
           for (CPU_INFO_FOREACH(cii, ci))
         for (CPU_INFO_FOREACH(cii, ci)) {                  if (ci->ci_curlwp == l)
                 if (ci->ci_curlwp == l) {                          goto run;
                         *cip = ci;  
                         return ci->ci_biglock_wanted != l;  
                 }  
         }  
   
           /* No: it may be safe to block now. */
         *cip = NULL;          *cip = NULL;
         return 0;          return 0;
   
    run:
           /* Target is running; do we need to block? */
           *cip = ci;
           return ci->ci_biglock_wanted != l;
 }  }
 #endif  #endif  /* MULTIPROCESSOR */
   
 /*  /*
  * mutex_vector_enter:   * mutex_vector_enter:
Line 704  mutex_vector_exit(kmutex_t *mtx)
Line 710  mutex_vector_exit(kmutex_t *mtx)
         MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);          MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
         MUTEX_UNLOCKED(mtx);          MUTEX_UNLOCKED(mtx);
   
   #ifdef LOCKDEBUG
           /*
            * Avoid having to take the turnstile chain lock every time
            * around.  Raise the priority level to splhigh() in order
            * to disable preemption and so make the following atomic.
            */
           {
                   int s = splhigh();
                   if (!MUTEX_HAS_WAITERS(mtx)) {
                           MUTEX_RELEASE(mtx);
                           splx(s);
                           return;
                   }
                   splx(s);
           }
   #endif
   
         /*          /*
          * Get this lock's turnstile.  This gets the interlock on           * Get this lock's turnstile.  This gets the interlock on
          * the sleep queue.  Once we have that, we can clear the           * the sleep queue.  Once we have that, we can clear the

Legend:
Removed from v.1.14  
changed lines
  Added in v.1.15

CVSweb <webmaster@jp.NetBSD.org>