Annotation of src/lib/libpthread/pthread_rwlock.c, Revision 1.28.2.2
1.28.2.2! yamt 1: /* $NetBSD: pthread_rwlock.c,v 1.28.2.1 2008/05/18 12:30:40 yamt Exp $ */
1.2 thorpej 2:
3: /*-
1.27 ad 4: * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
1.2 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.27 ad 8: * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
1.2 thorpej 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
1.5 lukem 32: #include <sys/cdefs.h>
1.28.2.2! yamt 33: __RCSID("$NetBSD: pthread_rwlock.c,v 1.28.2.1 2008/05/18 12:30:40 yamt Exp $");
! 34:
! 35: #include <sys/types.h>
! 36: #include <sys/lwpctl.h>
1.5 lukem 37:
1.2 thorpej 38: #include <errno.h>
1.27 ad 39: #include <stddef.h>
1.2 thorpej 40:
41: #include "pthread.h"
42: #include "pthread_int.h"
43:
1.27 ad 44: #define _RW_LOCKED 0
45: #define _RW_WANT_WRITE 1
46: #define _RW_WANT_READ 2
47:
1.28.2.2! yamt 48: #if __GNUC_PREREQ__(3, 0)
! 49: #define NOINLINE __attribute ((noinline))
! 50: #else
! 51: #define NOINLINE /* nothing */
! 52: #endif
! 53:
1.27 ad 54: static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
55: static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
56: static void pthread__rwlock_early(void *);
1.24 christos 57:
1.23 ad 58: int _pthread_rwlock_held_np(pthread_rwlock_t *);
59: int _pthread_rwlock_rdheld_np(pthread_rwlock_t *);
60: int _pthread_rwlock_wrheld_np(pthread_rwlock_t *);
61:
1.27 ad 62: #ifndef lint
63: __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np);
64: __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np);
65: __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np);
66: #endif
67:
1.2 thorpej 68: __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
69: __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
70: __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
71: __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
72: __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
73: __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
74: __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
75:
1.27 ad 76: static inline uintptr_t
77: rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
78: {
79:
80: return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
81: (void *)n);
82: }
83:
1.2 thorpej 84: int
1.27 ad 85: pthread_rwlock_init(pthread_rwlock_t *ptr,
1.2 thorpej 86: const pthread_rwlockattr_t *attr)
87: {
1.27 ad 88:
89: if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
1.2 thorpej 90: return EINVAL;
1.27 ad 91: ptr->ptr_magic = _PT_RWLOCK_MAGIC;
92: PTQ_INIT(&ptr->ptr_rblocked);
93: PTQ_INIT(&ptr->ptr_wblocked);
94: ptr->ptr_nreaders = 0;
95: ptr->ptr_owner = NULL;
1.2 thorpej 96:
97: return 0;
98: }
99:
100:
101: int
1.27 ad 102: pthread_rwlock_destroy(pthread_rwlock_t *ptr)
1.2 thorpej 103: {
1.27 ad 104:
105: if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
106: (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
107: (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
108: (ptr->ptr_nreaders != 0) ||
109: (ptr->ptr_owner != NULL))
1.2 thorpej 110: return EINVAL;
1.27 ad 111: ptr->ptr_magic = _PT_RWLOCK_DEAD;
1.2 thorpej 112:
113: return 0;
114: }
115:
1.28.2.2! yamt 116: /* We want function call overhead. */
! 117: NOINLINE static void
! 118: pthread__rwlock_pause(void)
! 119: {
! 120:
! 121: pthread__smt_pause();
! 122: }
! 123:
! 124: NOINLINE static int
! 125: pthread__rwlock_spin(uintptr_t owner)
! 126: {
! 127: pthread_t thread;
! 128: unsigned int i;
! 129:
! 130: thread = (pthread_t)(owner & RW_THREAD);
! 131: if (thread == NULL || (owner & ~RW_THREAD) != RW_WRITE_LOCKED)
! 132: return 0;
! 133: if (thread->pt_lwpctl->lc_curcpu == LWPCTL_CPU_NONE ||
! 134: thread->pt_blocking)
! 135: return 0;
! 136: for (i = 128; i != 0; i--)
! 137: pthread__rwlock_pause();
! 138: return 1;
! 139: }
! 140:
1.27 ad 141: static int
142: pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
1.2 thorpej 143: {
1.27 ad 144: uintptr_t owner, next;
1.28.2.2! yamt 145: pthread_mutex_t *interlock;
1.2 thorpej 146: pthread_t self;
1.27 ad 147: int error;
148:
149: self = pthread__self();
150:
1.2 thorpej 151: #ifdef ERRORCHECK
1.27 ad 152: if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
1.2 thorpej 153: return EINVAL;
154: #endif
1.27 ad 155:
156: for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
157: /*
158: * Read the lock owner field. If the need-to-wait
159: * indicator is clear, then try to acquire the lock.
160: */
161: if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
162: next = rw_cas(ptr, owner, owner + RW_READ_INCR);
163: if (owner == next) {
164: /* Got it! */
165: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
166: membar_enter();
167: #endif
168: return 0;
169: }
170:
171: /*
172: * Didn't get it -- spin around again (we'll
173: * probably sleep on the next iteration).
174: */
175: continue;
176: }
177:
178: if ((owner & RW_THREAD) == (uintptr_t)self)
179: return EDEADLK;
180:
1.28.2.2! yamt 181: /* If held write locked and no waiters, spin. */
! 182: if (pthread__rwlock_spin(owner)) {
! 183: while (pthread__rwlock_spin(owner)) {
! 184: owner = (uintptr_t)ptr->ptr_owner;
! 185: }
! 186: next = owner;
! 187: continue;
! 188: }
! 189:
1.27 ad 190: /*
191: * Grab the interlock. Once we have that, we
192: * can adjust the waiter bits and sleep queue.
193: */
1.28.2.2! yamt 194: interlock = pthread__hashlock(ptr);
! 195: pthread_mutex_lock(interlock);
1.27 ad 196:
197: /*
198: * Mark the rwlock as having waiters. If the set fails,
199: * then we may not need to sleep and should spin again.
200: */
201: next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
202: if (owner != next) {
1.28.2.2! yamt 203: pthread_mutex_unlock(interlock);
1.27 ad 204: continue;
205: }
206:
207: /* The waiters bit is set - it's safe to sleep. */
208: PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
209: ptr->ptr_nreaders++;
210: self->pt_rwlocked = _RW_WANT_READ;
211: self->pt_sleepobj = &ptr->ptr_rblocked;
212: self->pt_early = pthread__rwlock_early;
1.28.2.2! yamt 213: error = pthread__park(self, interlock, &ptr->ptr_rblocked,
! 214: ts, 0, &ptr->ptr_rblocked);
1.27 ad 215:
216: /* Did we get the lock? */
217: if (self->pt_rwlocked == _RW_LOCKED) {
218: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
219: membar_enter();
1.2 thorpej 220: #endif
1.27 ad 221: return 0;
222: }
223: if (error != 0)
224: return error;
225:
226: pthread__errorfunc(__FILE__, __LINE__, __func__,
227: "direct handoff failure");
1.2 thorpej 228: }
229: }
230:
231:
232: int
1.27 ad 233: pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
1.2 thorpej 234: {
1.27 ad 235: uintptr_t owner, next;
1.20 ad 236:
1.2 thorpej 237: #ifdef ERRORCHECK
1.27 ad 238: if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
1.2 thorpej 239: return EINVAL;
240: #endif
1.27 ad 241:
1.2 thorpej 242: /*
243: * Don't get a readlock if there is a writer or if there are waiting
244: * writers; i.e. prefer writers to readers. This strategy is dictated
245: * by SUSv3.
246: */
1.27 ad 247: for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
248: if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
249: return EBUSY;
250: next = rw_cas(ptr, owner, owner + RW_READ_INCR);
251: if (owner == next) {
252: /* Got it! */
253: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
254: membar_enter();
255: #endif
256: return 0;
257: }
1.2 thorpej 258: }
259: }
260:
1.27 ad 261: static int
262: pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
1.2 thorpej 263: {
1.27 ad 264: uintptr_t owner, next;
1.28.2.2! yamt 265: pthread_mutex_t *interlock;
1.2 thorpej 266: pthread_t self;
1.27 ad 267: int error;
268:
269: self = pthread__self();
1.13 chs 270:
1.2 thorpej 271: #ifdef ERRORCHECK
1.27 ad 272: if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
1.2 thorpej 273: return EINVAL;
274: #endif
1.27 ad 275:
276: for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
277: /*
278: * Read the lock owner field. If the need-to-wait
279: * indicator is clear, then try to acquire the lock.
280: */
281: if ((owner & RW_THREAD) == 0) {
282: next = rw_cas(ptr, owner,
283: (uintptr_t)self | RW_WRITE_LOCKED);
284: if (owner == next) {
285: /* Got it! */
286: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
287: membar_enter();
288: #endif
289: return 0;
290: }
291:
292: /*
293: * Didn't get it -- spin around again (we'll
294: * probably sleep on the next iteration).
295: */
296: continue;
297: }
298:
299: if ((owner & RW_THREAD) == (uintptr_t)self)
1.13 chs 300: return EDEADLK;
1.27 ad 301:
1.28.2.2! yamt 302: /* If held write locked and no waiters, spin. */
! 303: if (pthread__rwlock_spin(owner)) {
! 304: while (pthread__rwlock_spin(owner)) {
! 305: owner = (uintptr_t)ptr->ptr_owner;
! 306: }
! 307: next = owner;
! 308: continue;
! 309: }
! 310:
1.27 ad 311: /*
312: * Grab the interlock. Once we have that, we
313: * can adjust the waiter bits and sleep queue.
314: */
1.28.2.2! yamt 315: interlock = pthread__hashlock(ptr);
! 316: pthread_mutex_lock(interlock);
1.27 ad 317:
318: /*
319: * Mark the rwlock as having waiters. If the set fails,
320: * then we may not need to sleep and should spin again.
321: */
322: next = rw_cas(ptr, owner,
323: owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
324: if (owner != next) {
1.28.2.2! yamt 325: pthread_mutex_unlock(interlock);
1.27 ad 326: continue;
1.13 chs 327: }
1.27 ad 328:
329: /* The waiters bit is set - it's safe to sleep. */
330: PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
331: self->pt_rwlocked = _RW_WANT_WRITE;
332: self->pt_sleepobj = &ptr->ptr_wblocked;
333: self->pt_early = pthread__rwlock_early;
1.28.2.2! yamt 334: error = pthread__park(self, interlock, &ptr->ptr_wblocked,
! 335: ts, 0, &ptr->ptr_wblocked);
1.27 ad 336:
337: /* Did we get the lock? */
338: if (self->pt_rwlocked == _RW_LOCKED) {
339: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
340: membar_enter();
1.13 chs 341: #endif
1.27 ad 342: return 0;
343: }
344: if (error != 0)
345: return error;
346:
347: pthread__errorfunc(__FILE__, __LINE__, __func__,
348: "direct handoff failure");
1.2 thorpej 349: }
350: }
351:
352:
353: int
1.27 ad 354: pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
1.2 thorpej 355: {
1.27 ad 356: uintptr_t owner, next;
1.2 thorpej 357: pthread_t self;
1.27 ad 358:
1.2 thorpej 359: #ifdef ERRORCHECK
1.27 ad 360: if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
1.2 thorpej 361: return EINVAL;
362: #endif
1.27 ad 363:
1.2 thorpej 364: self = pthread__self();
1.27 ad 365:
366: for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
367: if (owner != 0)
368: return EBUSY;
369: next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
370: if (owner == next) {
371: /* Got it! */
372: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
373: membar_enter();
374: #endif
375: return 0;
376: }
1.2 thorpej 377: }
1.27 ad 378: }
1.2 thorpej 379:
1.27 ad 380: int
381: pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
382: {
1.2 thorpej 383:
1.27 ad 384: return pthread__rwlock_rdlock(ptr, NULL);
1.2 thorpej 385: }
386:
387: int
1.27 ad 388: pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
389: const struct timespec *abs_timeout)
1.2 thorpej 390: {
1.12 chs 391:
1.10 nathanw 392: if (abs_timeout == NULL)
1.2 thorpej 393: return EINVAL;
1.10 nathanw 394: if ((abs_timeout->tv_nsec >= 1000000000) ||
395: (abs_timeout->tv_nsec < 0) ||
396: (abs_timeout->tv_sec < 0))
397: return EINVAL;
1.12 chs 398:
1.27 ad 399: return pthread__rwlock_rdlock(ptr, abs_timeout);
400: }
1.2 thorpej 401:
1.27 ad 402: int
403: pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
404: {
1.2 thorpej 405:
1.27 ad 406: return pthread__rwlock_wrlock(ptr, NULL);
1.2 thorpej 407: }
408:
409: int
1.27 ad 410: pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
411: const struct timespec *abs_timeout)
1.2 thorpej 412: {
1.12 chs 413:
1.10 nathanw 414: if (abs_timeout == NULL)
415: return EINVAL;
416: if ((abs_timeout->tv_nsec >= 1000000000) ||
417: (abs_timeout->tv_nsec < 0) ||
418: (abs_timeout->tv_sec < 0))
419: return EINVAL;
1.12 chs 420:
1.27 ad 421: return pthread__rwlock_wrlock(ptr, abs_timeout);
1.2 thorpej 422: }
423:
424:
425: int
1.27 ad 426: pthread_rwlock_unlock(pthread_rwlock_t *ptr)
1.2 thorpej 427: {
1.27 ad 428: uintptr_t owner, decr, new, next;
1.28.2.2! yamt 429: pthread_mutex_t *interlock;
1.27 ad 430: pthread_t self, thread;
431:
1.2 thorpej 432: #ifdef ERRORCHECK
1.27 ad 433: if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
1.2 thorpej 434: return EINVAL;
435: #endif
1.27 ad 436:
437: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
438: membar_exit();
439: #endif
440:
441: /*
442: * Since we used an add operation to set the required lock
443: * bits, we can use a subtract to clear them, which makes
444: * the read-release and write-release path similar.
445: */
1.2 thorpej 446: self = pthread__self();
1.27 ad 447: owner = (uintptr_t)ptr->ptr_owner;
448: if ((owner & RW_WRITE_LOCKED) != 0) {
449: decr = (uintptr_t)self | RW_WRITE_LOCKED;
450: if ((owner & RW_THREAD) != (uintptr_t)self) {
451: return EPERM;
452: }
453: } else {
454: decr = RW_READ_INCR;
455: if (owner == 0) {
1.2 thorpej 456: return EPERM;
457: }
1.27 ad 458: }
459:
460: for (;; owner = next) {
461: /*
462: * Compute what we expect the new value of the lock to be.
463: * Only proceed to do direct handoff if there are waiters,
464: * and if the lock would become unowned.
465: */
466: new = (owner - decr);
467: if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
468: next = rw_cas(ptr, owner, new);
469: if (owner == next) {
470: /* Released! */
471: return 0;
472: }
473: continue;
474: }
475:
476: /*
477: * Grab the interlock. Once we have that, we can adjust
478: * the waiter bits. We must check to see if there are
479: * still waiters before proceeding.
480: */
1.28.2.2! yamt 481: interlock = pthread__hashlock(ptr);
! 482: pthread_mutex_lock(interlock);
1.27 ad 483: owner = (uintptr_t)ptr->ptr_owner;
484: if ((owner & RW_HAS_WAITERS) == 0) {
1.28.2.2! yamt 485: pthread_mutex_unlock(interlock);
1.27 ad 486: next = owner;
487: continue;
1.2 thorpej 488: }
1.27 ad 489:
490: /*
491: * Give the lock away. SUSv3 dictates that we must give
492: * preference to writers.
493: */
494: if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
495: new = (uintptr_t)thread | RW_WRITE_LOCKED;
496:
497: if (PTQ_NEXT(thread, pt_sleep) != NULL)
498: new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
499: else if (ptr->ptr_nreaders != 0)
500: new |= RW_HAS_WAITERS;
501:
502: /*
503: * Set in the new value. The lock becomes owned
504: * by the writer that we are about to wake.
505: */
506: (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
507:
508: /* Wake the writer. */
509: thread->pt_rwlocked = _RW_LOCKED;
1.28.2.2! yamt 510: pthread__unpark(&ptr->ptr_wblocked, self,
! 511: interlock);
1.27 ad 512: } else {
513: new = 0;
514: PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
515: /*
516: * May have already been handed the lock,
517: * since pthread__unpark_all() can release
518: * our interlock before awakening all
519: * threads.
520: */
521: if (thread->pt_sleepobj == NULL)
522: continue;
523: new += RW_READ_INCR;
524: thread->pt_rwlocked = _RW_LOCKED;
525: }
526:
527: /*
528: * Set in the new value. The lock becomes owned
529: * by the readers that we are about to wake.
530: */
531: (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
532:
533: /* Wake up all sleeping readers. */
534: ptr->ptr_nreaders = 0;
1.28.2.2! yamt 535: pthread__unpark_all(&ptr->ptr_rblocked, self,
! 536: interlock);
1.2 thorpej 537: }
1.28.2.2! yamt 538: pthread_mutex_unlock(interlock);
1.27 ad 539:
540: return 0;
1.2 thorpej 541: }
1.27 ad 542: }
543:
544: /*
545: * Called when a timedlock awakens early to adjust the waiter bits.
546: * The rwlock's interlock is held on entry, and the caller has been
547: * removed from the waiters lists.
548: */
549: static void
550: pthread__rwlock_early(void *obj)
551: {
552: uintptr_t owner, set, new, next;
553: pthread_rwlock_t *ptr;
554: pthread_t self;
555: u_int off;
1.2 thorpej 556:
1.27 ad 557: self = pthread__self();
558:
559: switch (self->pt_rwlocked) {
560: case _RW_WANT_READ:
561: off = offsetof(pthread_rwlock_t, ptr_rblocked);
562: break;
563: case _RW_WANT_WRITE:
564: off = offsetof(pthread_rwlock_t, ptr_wblocked);
565: break;
566: default:
567: pthread__errorfunc(__FILE__, __LINE__, __func__,
568: "bad value of pt_rwlocked");
569: off = 0;
570: /* NOTREACHED */
571: break;
572: }
573:
574: /* LINTED mind your own business */
575: ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
576: owner = (uintptr_t)ptr->ptr_owner;
577:
578: if ((owner & RW_THREAD) == 0) {
579: pthread__errorfunc(__FILE__, __LINE__, __func__,
580: "lock not held");
581: }
582:
583: if (!PTQ_EMPTY(&ptr->ptr_wblocked))
584: set = RW_HAS_WAITERS | RW_WRITE_WANTED;
585: else if (ptr->ptr_nreaders != 0)
586: set = RW_HAS_WAITERS;
1.14 ad 587: else
1.27 ad 588: set = 0;
1.6 cl 589:
1.27 ad 590: for (;; owner = next) {
591: new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
592: next = rw_cas(ptr, owner, new);
593: if (owner == next)
594: break;
595: }
1.2 thorpej 596: }
597:
598: int
1.27 ad 599: _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
1.2 thorpej 600: {
1.27 ad 601: uintptr_t owner = (uintptr_t)ptr->ptr_owner;
1.2 thorpej 602:
1.28 ad 603: if ((owner & RW_WRITE_LOCKED) != 0)
604: return (owner & RW_THREAD) == (uintptr_t)pthread__self();
1.27 ad 605: return (owner & RW_THREAD) != 0;
1.2 thorpej 606: }
607:
608: int
1.27 ad 609: _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
1.2 thorpej 610: {
1.27 ad 611: uintptr_t owner = (uintptr_t)ptr->ptr_owner;
1.2 thorpej 612:
1.27 ad 613: return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
1.2 thorpej 614: }
1.21 ad 615:
1.23 ad 616: int
1.27 ad 617: _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
1.23 ad 618: {
1.27 ad 619: uintptr_t owner = (uintptr_t)ptr->ptr_owner;
1.23 ad 620:
1.27 ad 621: return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
622: ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
1.23 ad 623: }
624:
625: int
1.27 ad 626: pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
1.23 ad 627: {
628:
1.27 ad 629: if (attr == NULL)
630: return EINVAL;
631: attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
632:
633: return 0;
1.23 ad 634: }
635:
1.27 ad 636:
1.23 ad 637: int
1.27 ad 638: pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
1.23 ad 639: {
640:
1.27 ad 641: if ((attr == NULL) ||
642: (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
643: return EINVAL;
644: attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
645:
646: return 0;
1.23 ad 647: }
CVSweb <webmaster@jp.NetBSD.org>