[BACK]Return to pthread_mutex.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / lib / libpthread

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/lib/libpthread/pthread_mutex.c between version 1.64.4.2 and 1.65

version 1.64.4.2, 2020/04/08 14:07:15 version 1.65, 2019/03/05 22:49:38
Line 131  pthread_mutex_init(pthread_mutex_t *ptm,
Line 131  pthread_mutex_init(pthread_mutex_t *ptm,
                 return __libc_mutex_init_stub(ptm, attr);                  return __libc_mutex_init_stub(ptm, attr);
 #endif  #endif
   
         pthread__error(EINVAL, "Invalid mutes attribute",  
             attr == NULL || attr->ptma_magic == _PT_MUTEXATTR_MAGIC);  
   
         if (attr == NULL) {          if (attr == NULL) {
                 type = PTHREAD_MUTEX_NORMAL;                  type = PTHREAD_MUTEX_NORMAL;
                 proto = PTHREAD_PRIO_NONE;                  proto = PTHREAD_PRIO_NONE;
Line 200  pthread_mutex_lock(pthread_mutex_t *ptm)
Line 197  pthread_mutex_lock(pthread_mutex_t *ptm)
         if (__predict_false(__uselibcstub))          if (__predict_false(__uselibcstub))
                 return __libc_mutex_lock_stub(ptm);                  return __libc_mutex_lock_stub(ptm);
   
         pthread__error(EINVAL, "Invalid mutex",  
             ptm->ptm_magic == _PT_MUTEX_MAGIC);  
   
         self = pthread__self();          self = pthread__self();
         val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);          val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
         if (__predict_true(val == NULL)) {          if (__predict_true(val == NULL)) {
Line 220  pthread_mutex_timedlock(pthread_mutex_t*
Line 214  pthread_mutex_timedlock(pthread_mutex_t*
         pthread_t self;          pthread_t self;
         void *val;          void *val;
   
         pthread__error(EINVAL, "Invalid mutex",  
             ptm->ptm_magic == _PT_MUTEX_MAGIC);  
   
         self = pthread__self();          self = pthread__self();
         val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);          val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
         if (__predict_true(val == NULL)) {          if (__predict_true(val == NULL)) {
Line 244  pthread__mutex_pause(void)
Line 235  pthread__mutex_pause(void)
   
 /*  /*
  * Spin while the holder is running.  'lwpctl' gives us the true   * Spin while the holder is running.  'lwpctl' gives us the true
  * status of the thread.   * status of the thread.  pt_blocking is set by libpthread in order
    * to cut out system call and kernel spinlock overhead on remote CPUs
    * (could represent many thousands of clock cycles).  pt_blocking also
    * makes this thread yield if the target is calling sched_yield().
  */   */
 NOINLINE static void *  NOINLINE static void *
 pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)  pthread__mutex_spin(pthread_mutex_t *ptm, pthread_t owner)
Line 256  pthread__mutex_spin(pthread_mutex_t *ptm
Line 250  pthread__mutex_spin(pthread_mutex_t *ptm
                 thread = (pthread_t)MUTEX_OWNER(owner);                  thread = (pthread_t)MUTEX_OWNER(owner);
                 if (thread == NULL)                  if (thread == NULL)
                         break;                          break;
                 if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE)                  if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
                       thread->pt_blocking)
                         break;                          break;
                 if (count < 128)                  if (count < 128)
                         count += count;                          count += count;
Line 267  pthread__mutex_spin(pthread_mutex_t *ptm
Line 262  pthread__mutex_spin(pthread_mutex_t *ptm
         return owner;          return owner;
 }  }
   
 NOINLINE static bool  NOINLINE static void
 pthread__mutex_setwaiters(pthread_t self, pthread_mutex_t *ptm)  pthread__mutex_setwaiters(pthread_t self, pthread_mutex_t *ptm)
 {  {
         void *owner, *next;          void *new, *owner;
   
         /*          /*
          * Note that the mutex can become unlocked before we set           * Note that the mutex can become unlocked before we set
Line 286  pthread__mutex_setwaiters(pthread_t self
Line 281  pthread__mutex_setwaiters(pthread_t self
          * the value of ptm_owner/pt_mutexwait after we have entered           * the value of ptm_owner/pt_mutexwait after we have entered
          * the waiters list (the CAS itself must be atomic).           * the waiters list (the CAS itself must be atomic).
          */           */
         for (owner = ptm->ptm_owner;; owner = next) {  again:
                 if (MUTEX_OWNER(owner) == 0) {          membar_consumer();
                         pthread__mutex_wakeup(self, ptm);          owner = ptm->ptm_owner;
                         return true;  
                 }          if (MUTEX_OWNER(owner) == 0) {
                 if (MUTEX_HAS_WAITERS(owner)) {                  pthread__mutex_wakeup(self, ptm);
                         return false;                  return;
           }
           if (!MUTEX_HAS_WAITERS(owner)) {
                   new = (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT);
                   if (atomic_cas_ptr(&ptm->ptm_owner, owner, new) != owner) {
                           goto again;
                 }                  }
                 next = atomic_cas_ptr(&ptm->ptm_owner, owner,          }
                     (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT));  
           /*
            * Note that pthread_mutex_unlock() can do a non-interlocked CAS.
            * We cannot know if the presence of the waiters bit is stable
            * while the holding thread is running.  There are many assumptions;
            * see sys/kern/kern_mutex.c for details.  In short, we must spin if
            * we see that the holder is running again.
            */
           membar_sync();
           if (MUTEX_OWNER(owner) != (uintptr_t)self)
                   pthread__mutex_spin(ptm, owner);
   
           if (membar_consumer(), !MUTEX_HAS_WAITERS(ptm->ptm_owner)) {
                   goto again;
         }          }
 }  }
   
Line 307  pthread__mutex_lock_slow(pthread_mutex_t
Line 320  pthread__mutex_lock_slow(pthread_mutex_t
         int serrno;          int serrno;
         int error;          int error;
   
           pthread__error(EINVAL, "Invalid mutex",
               ptm->ptm_magic == _PT_MUTEX_MAGIC);
   
         owner = ptm->ptm_owner;          owner = ptm->ptm_owner;
         self = pthread__self();          self = pthread__self();
   
Line 370  pthread__mutex_lock_slow(pthread_mutex_t
Line 386  pthread__mutex_lock_slow(pthread_mutex_t
                         if (next == waiters)                          if (next == waiters)
                                 break;                                  break;
                 }                  }
   
                 /* Set the waiters bit and block. */                  /* Set the waiters bit and block. */
                 membar_sync();                  pthread__mutex_setwaiters(self, ptm);
                 if (pthread__mutex_setwaiters(self, ptm)) {  
                         continue;  
                 }  
   
                 /*                  /*
                  * We may have been awoken by the current thread above,                   * We may have been awoken by the current thread above,
Line 385  pthread__mutex_lock_slow(pthread_mutex_t
Line 398  pthread__mutex_lock_slow(pthread_mutex_t
                  * being set to zero).  Otherwise it is unsafe to re-enter                   * being set to zero).  Otherwise it is unsafe to re-enter
                  * the thread onto the waiters list.                   * the thread onto the waiters list.
                  */                   */
                 membar_sync();  
                 while (self->pt_mutexwait) {                  while (self->pt_mutexwait) {
                           self->pt_blocking++;
                         error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,                          error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
                             __UNCONST(ts), self->pt_unpark,                              __UNCONST(ts), self->pt_unpark,
                             __UNVOLATILE(&ptm->ptm_waiters),                              __UNVOLATILE(&ptm->ptm_waiters),
                             __UNVOLATILE(&ptm->ptm_waiters));                              __UNVOLATILE(&ptm->ptm_waiters));
                         self->pt_unpark = 0;                          self->pt_unpark = 0;
                           self->pt_blocking--;
                           membar_sync();
                         if (__predict_true(error != -1)) {                          if (__predict_true(error != -1)) {
                                 continue;                                  continue;
                         }                          }
Line 416  pthread_mutex_trylock(pthread_mutex_t *p
Line 431  pthread_mutex_trylock(pthread_mutex_t *p
         if (__predict_false(__uselibcstub))          if (__predict_false(__uselibcstub))
                 return __libc_mutex_trylock_stub(ptm);                  return __libc_mutex_trylock_stub(ptm);
   
         pthread__error(EINVAL, "Invalid mutex",  
             ptm->ptm_magic == _PT_MUTEX_MAGIC);  
   
         self = pthread__self();          self = pthread__self();
         val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);          val = atomic_cas_ptr(&ptm->ptm_owner, NULL, self);
         if (__predict_true(val == NULL)) {          if (__predict_true(val == NULL)) {
Line 459  pthread_mutex_unlock(pthread_mutex_t *pt
Line 471  pthread_mutex_unlock(pthread_mutex_t *pt
         if (__predict_false(__uselibcstub))          if (__predict_false(__uselibcstub))
                 return __libc_mutex_unlock_stub(ptm);                  return __libc_mutex_unlock_stub(ptm);
   
         pthread__error(EINVAL, "Invalid mutex",          /*
             ptm->ptm_magic == _PT_MUTEX_MAGIC);           * Note this may be a non-interlocked CAS.  See lock_slow()
            * above and sys/kern/kern_mutex.c for details.
            */
 #ifndef PTHREAD__ATOMIC_IS_MEMBAR  #ifndef PTHREAD__ATOMIC_IS_MEMBAR
         membar_exit();          membar_exit();
 #endif  #endif
         self = pthread__self();          self = pthread__self();
         value = atomic_cas_ptr(&ptm->ptm_owner, self, NULL);          value = atomic_cas_ptr_ni(&ptm->ptm_owner, self, NULL);
         if (__predict_true(value == self)) {          if (__predict_true(value == self)) {
                 pthread__smt_wake();                  pthread__smt_wake();
                 return 0;                  return 0;
Line 478  NOINLINE static int
Line 491  NOINLINE static int
 pthread__mutex_unlock_slow(pthread_mutex_t *ptm)  pthread__mutex_unlock_slow(pthread_mutex_t *ptm)
 {  {
         pthread_t self, owner, new;          pthread_t self, owner, new;
         int weown, error;          int weown, error, deferred;
   
           pthread__error(EINVAL, "Invalid mutex",
               ptm->ptm_magic == _PT_MUTEX_MAGIC);
   
         self = pthread__self();          self = pthread__self();
         owner = ptm->ptm_owner;          owner = ptm->ptm_owner;
         weown = (MUTEX_OWNER(owner) == (uintptr_t)self);          weown = (MUTEX_OWNER(owner) == (uintptr_t)self);
           deferred = (int)((uintptr_t)owner & MUTEX_DEFERRED_BIT);
         error = 0;          error = 0;
   
         if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) {          if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) {
Line 524  pthread__mutex_unlock_slow(pthread_mutex
Line 541  pthread__mutex_unlock_slow(pthread_mutex
                         pthread__mutex_wakeup(self, ptm);                          pthread__mutex_wakeup(self, ptm);
                         return 0;                          return 0;
                 }                  }
                 error = 0;  
         }          }
   
           /*
            * There were no waiters, but we may have deferred waking
            * other threads until mutex unlock - we must wake them now.
            */
           if (!deferred)
                   return error;
   
         if (self->pt_nwaiters == 1) {          if (self->pt_nwaiters == 1) {
                 /*                  /*
                  * If the calling thread is about to block, defer                   * If the calling thread is about to block, defer
Line 538  pthread__mutex_unlock_slow(pthread_mutex
Line 561  pthread__mutex_unlock_slow(pthread_mutex
                         (void)_lwp_unpark(self->pt_waiters[0],                          (void)_lwp_unpark(self->pt_waiters[0],
                             __UNVOLATILE(&ptm->ptm_waiters));                              __UNVOLATILE(&ptm->ptm_waiters));
                 }                  }
         } else if (self->pt_nwaiters > 0) {          } else {
                 (void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters,                  (void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters,
                     __UNVOLATILE(&ptm->ptm_waiters));                      __UNVOLATILE(&ptm->ptm_waiters));
         }          }
Line 559  pthread__mutex_wakeup(pthread_t self, pt
Line 582  pthread__mutex_wakeup(pthread_t self, pt
         pthread_t thread, next;          pthread_t thread, next;
         ssize_t n, rv;          ssize_t n, rv;
   
         /* Take ownership of the current set of waiters. */          /*
            * Take ownership of the current set of waiters.  No
            * need for a memory barrier following this, all loads
            * are dependent upon 'thread'.
            */
         thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL);          thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL);
         membar_datadep_consumer(); /* for alpha */  
         pthread__smt_wake();          pthread__smt_wake();
   
         for (;;) {          for (;;) {
Line 619  pthread__mutex_wakeup(pthread_t self, pt
Line 645  pthread__mutex_wakeup(pthread_t self, pt
 int  int
 pthread_mutexattr_init(pthread_mutexattr_t *attr)  pthread_mutexattr_init(pthread_mutexattr_t *attr)
 {  {
 #if 0  
         if (__predict_false(__uselibcstub))          if (__predict_false(__uselibcstub))
                 return __libc_mutexattr_init_stub(attr);                  return __libc_mutexattr_init_stub(attr);
 #endif  
   
         attr->ptma_magic = _PT_MUTEXATTR_MAGIC;          attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
         attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT;          attr->ptma_private = (void *)PTHREAD_MUTEX_DEFAULT;
Line 638  pthread_mutexattr_destroy(pthread_mutexa
Line 662  pthread_mutexattr_destroy(pthread_mutexa
         pthread__error(EINVAL, "Invalid mutex attribute",          pthread__error(EINVAL, "Invalid mutex attribute",
             attr->ptma_magic == _PT_MUTEXATTR_MAGIC);              attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
   
         attr->ptma_magic = _PT_MUTEXATTR_DEAD;  
   
         return 0;          return 0;
 }  }
   
Line 736  pthread_mutexattr_getpshared(const pthre
Line 758  pthread_mutexattr_getpshared(const pthre
     int * __restrict pshared)      int * __restrict pshared)
 {  {
   
         pthread__error(EINVAL, "Invalid mutex attribute",  
                 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);  
   
         *pshared = PTHREAD_PROCESS_PRIVATE;          *pshared = PTHREAD_PROCESS_PRIVATE;
         return 0;          return 0;
 }  }
Line 747  int
Line 766  int
 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)  pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
 {  {
   
         pthread__error(EINVAL, "Invalid mutex attribute",  
                 attr->ptma_magic == _PT_MUTEXATTR_MAGIC);  
   
         switch(pshared) {          switch(pshared) {
         case PTHREAD_PROCESS_PRIVATE:          case PTHREAD_PROCESS_PRIVATE:
                 return 0;                  return 0;
Line 789  pthread__mutex_deferwake(pthread_t self,
Line 805  pthread__mutex_deferwake(pthread_t self,
 int  int
 pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil)  pthread_mutex_getprioceiling(const pthread_mutex_t *ptm, int *ceil)
 {  {
   
         pthread__error(EINVAL, "Invalid mutex",  
             ptm->ptm_magic == _PT_MUTEX_MAGIC);  
   
         *ceil = ptm->ptm_ceiling;          *ceil = ptm->ptm_ceiling;
         return 0;          return 0;
 }  }
Line 802  pthread_mutex_setprioceiling(pthread_mut
Line 814  pthread_mutex_setprioceiling(pthread_mut
 {  {
         int error;          int error;
   
         pthread__error(EINVAL, "Invalid mutex",  
             ptm->ptm_magic == _PT_MUTEX_MAGIC);  
   
         error = pthread_mutex_lock(ptm);          error = pthread_mutex_lock(ptm);
         if (error == 0) {          if (error == 0) {
                 *old_ceil = ptm->ptm_ceiling;                  *old_ceil = ptm->ptm_ceiling;

Legend:
Removed from v.1.64.4.2  
changed lines
  Added in v.1.65

CVSweb <webmaster@jp.NetBSD.org>