Annotation of src/lib/libpthread/pthread_rwlock.c, Revision 1.29
1.29 ! martin 1: /* $NetBSD: pthread_rwlock.c,v 1.28 2008/03/08 13:46:59 ad Exp $ */
1.2 thorpej 2:
3: /*-
1.27 ad 4: * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
1.2 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.27 ad 8: * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
1.2 thorpej 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
1.5 lukem 32: #include <sys/cdefs.h>
1.29 ! martin 33: __RCSID("$NetBSD: pthread_rwlock.c,v 1.28 2008/03/08 13:46:59 ad Exp $");
1.5 lukem 34:
1.2 thorpej 35: #include <errno.h>
1.27 ad 36: #include <stddef.h>
1.2 thorpej 37:
38: #include "pthread.h"
39: #include "pthread_int.h"
40:
1.27 ad 41: #define _RW_LOCKED 0
42: #define _RW_WANT_WRITE 1
43: #define _RW_WANT_READ 2
44:
45: static int pthread__rwlock_wrlock(pthread_rwlock_t *, const struct timespec *);
46: static int pthread__rwlock_rdlock(pthread_rwlock_t *, const struct timespec *);
47: static void pthread__rwlock_early(void *);
1.24 christos 48:
1.23 ad 49: int _pthread_rwlock_held_np(pthread_rwlock_t *);
50: int _pthread_rwlock_rdheld_np(pthread_rwlock_t *);
51: int _pthread_rwlock_wrheld_np(pthread_rwlock_t *);
52:
1.27 ad 53: #ifndef lint
54: __weak_alias(pthread_rwlock_held_np,_pthread_rwlock_held_np);
55: __weak_alias(pthread_rwlock_rdheld_np,_pthread_rwlock_rdheld_np);
56: __weak_alias(pthread_rwlock_wrheld_np,_pthread_rwlock_wrheld_np);
57: #endif
58:
1.2 thorpej 59: __strong_alias(__libc_rwlock_init,pthread_rwlock_init)
60: __strong_alias(__libc_rwlock_rdlock,pthread_rwlock_rdlock)
61: __strong_alias(__libc_rwlock_wrlock,pthread_rwlock_wrlock)
62: __strong_alias(__libc_rwlock_tryrdlock,pthread_rwlock_tryrdlock)
63: __strong_alias(__libc_rwlock_trywrlock,pthread_rwlock_trywrlock)
64: __strong_alias(__libc_rwlock_unlock,pthread_rwlock_unlock)
65: __strong_alias(__libc_rwlock_destroy,pthread_rwlock_destroy)
66:
1.27 ad 67: static inline uintptr_t
68: rw_cas(pthread_rwlock_t *ptr, uintptr_t o, uintptr_t n)
69: {
70:
71: return (uintptr_t)atomic_cas_ptr(&ptr->ptr_owner, (void *)o,
72: (void *)n);
73: }
74:
1.2 thorpej 75: int
1.27 ad 76: pthread_rwlock_init(pthread_rwlock_t *ptr,
1.2 thorpej 77: const pthread_rwlockattr_t *attr)
78: {
1.27 ad 79:
80: if (attr && (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
1.2 thorpej 81: return EINVAL;
1.27 ad 82: ptr->ptr_magic = _PT_RWLOCK_MAGIC;
83: pthread_lockinit(&ptr->ptr_interlock);
84: PTQ_INIT(&ptr->ptr_rblocked);
85: PTQ_INIT(&ptr->ptr_wblocked);
86: ptr->ptr_nreaders = 0;
87: ptr->ptr_owner = NULL;
1.2 thorpej 88:
89: return 0;
90: }
91:
92:
93: int
1.27 ad 94: pthread_rwlock_destroy(pthread_rwlock_t *ptr)
1.2 thorpej 95: {
1.27 ad 96:
97: if ((ptr->ptr_magic != _PT_RWLOCK_MAGIC) ||
98: (!PTQ_EMPTY(&ptr->ptr_rblocked)) ||
99: (!PTQ_EMPTY(&ptr->ptr_wblocked)) ||
100: (ptr->ptr_nreaders != 0) ||
101: (ptr->ptr_owner != NULL))
1.2 thorpej 102: return EINVAL;
1.27 ad 103: ptr->ptr_magic = _PT_RWLOCK_DEAD;
1.2 thorpej 104:
105: return 0;
106: }
107:
1.27 ad 108: static int
109: pthread__rwlock_rdlock(pthread_rwlock_t *ptr, const struct timespec *ts)
1.2 thorpej 110: {
1.27 ad 111: uintptr_t owner, next;
1.2 thorpej 112: pthread_t self;
1.27 ad 113: int error;
114:
115: self = pthread__self();
116:
1.2 thorpej 117: #ifdef ERRORCHECK
1.27 ad 118: if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
1.2 thorpej 119: return EINVAL;
120: #endif
1.27 ad 121:
122: for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
123: /*
124: * Read the lock owner field. If the need-to-wait
125: * indicator is clear, then try to acquire the lock.
126: */
127: if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) == 0) {
128: next = rw_cas(ptr, owner, owner + RW_READ_INCR);
129: if (owner == next) {
130: /* Got it! */
131: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
132: membar_enter();
133: #endif
134: return 0;
135: }
136:
137: /*
138: * Didn't get it -- spin around again (we'll
139: * probably sleep on the next iteration).
140: */
141: continue;
142: }
143:
144: if ((owner & RW_THREAD) == (uintptr_t)self)
145: return EDEADLK;
146:
147: /*
148: * Grab the interlock. Once we have that, we
149: * can adjust the waiter bits and sleep queue.
150: */
151: pthread__spinlock(self, &ptr->ptr_interlock);
152:
153: /*
154: * Mark the rwlock as having waiters. If the set fails,
155: * then we may not need to sleep and should spin again.
156: */
157: next = rw_cas(ptr, owner, owner | RW_HAS_WAITERS);
158: if (owner != next) {
159: pthread__spinunlock(self, &ptr->ptr_interlock);
160: continue;
161: }
162:
163: /* The waiters bit is set - it's safe to sleep. */
164: PTQ_INSERT_HEAD(&ptr->ptr_rblocked, self, pt_sleep);
165: ptr->ptr_nreaders++;
166: self->pt_rwlocked = _RW_WANT_READ;
167: self->pt_sleeponq = 1;
168: self->pt_sleepobj = &ptr->ptr_rblocked;
169: self->pt_early = pthread__rwlock_early;
170: pthread__spinunlock(self, &ptr->ptr_interlock);
171:
172: error = pthread__park(self, &ptr->ptr_interlock,
173: &ptr->ptr_rblocked, ts, 0, &ptr->ptr_rblocked);
174:
175: /* Did we get the lock? */
176: if (self->pt_rwlocked == _RW_LOCKED) {
177: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
178: membar_enter();
1.2 thorpej 179: #endif
1.27 ad 180: return 0;
181: }
182: if (error != 0)
183: return error;
184:
185: pthread__errorfunc(__FILE__, __LINE__, __func__,
186: "direct handoff failure");
1.2 thorpej 187: }
188: }
189:
190:
191: int
1.27 ad 192: pthread_rwlock_tryrdlock(pthread_rwlock_t *ptr)
1.2 thorpej 193: {
1.27 ad 194: uintptr_t owner, next;
1.20 ad 195:
1.2 thorpej 196: #ifdef ERRORCHECK
1.27 ad 197: if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
1.2 thorpej 198: return EINVAL;
199: #endif
1.27 ad 200:
1.2 thorpej 201: /*
202: * Don't get a readlock if there is a writer or if there are waiting
203: * writers; i.e. prefer writers to readers. This strategy is dictated
204: * by SUSv3.
205: */
1.27 ad 206: for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
207: if ((owner & (RW_WRITE_LOCKED | RW_WRITE_WANTED)) != 0)
208: return EBUSY;
209: next = rw_cas(ptr, owner, owner + RW_READ_INCR);
210: if (owner == next) {
211: /* Got it! */
212: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
213: membar_enter();
214: #endif
215: return 0;
216: }
1.2 thorpej 217: }
218: }
219:
1.27 ad 220: static int
221: pthread__rwlock_wrlock(pthread_rwlock_t *ptr, const struct timespec *ts)
1.2 thorpej 222: {
1.27 ad 223: uintptr_t owner, next;
1.2 thorpej 224: pthread_t self;
1.27 ad 225: int error;
226:
227: self = pthread__self();
1.13 chs 228:
1.2 thorpej 229: #ifdef ERRORCHECK
1.27 ad 230: if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
1.2 thorpej 231: return EINVAL;
232: #endif
1.27 ad 233:
234: for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
235: /*
236: * Read the lock owner field. If the need-to-wait
237: * indicator is clear, then try to acquire the lock.
238: */
239: if ((owner & RW_THREAD) == 0) {
240: next = rw_cas(ptr, owner,
241: (uintptr_t)self | RW_WRITE_LOCKED);
242: if (owner == next) {
243: /* Got it! */
244: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
245: membar_enter();
246: #endif
247: return 0;
248: }
249:
250: /*
251: * Didn't get it -- spin around again (we'll
252: * probably sleep on the next iteration).
253: */
254: continue;
255: }
256:
257: if ((owner & RW_THREAD) == (uintptr_t)self)
1.13 chs 258: return EDEADLK;
1.27 ad 259:
260: /*
261: * Grab the interlock. Once we have that, we
262: * can adjust the waiter bits and sleep queue.
263: */
264: pthread__spinlock(self, &ptr->ptr_interlock);
265:
266: /*
267: * Mark the rwlock as having waiters. If the set fails,
268: * then we may not need to sleep and should spin again.
269: */
270: next = rw_cas(ptr, owner,
271: owner | RW_HAS_WAITERS | RW_WRITE_WANTED);
272: if (owner != next) {
273: pthread__spinunlock(self, &ptr->ptr_interlock);
274: continue;
1.13 chs 275: }
1.27 ad 276:
277: /* The waiters bit is set - it's safe to sleep. */
278: PTQ_INSERT_TAIL(&ptr->ptr_wblocked, self, pt_sleep);
279: self->pt_rwlocked = _RW_WANT_WRITE;
280: self->pt_sleeponq = 1;
281: self->pt_sleepobj = &ptr->ptr_wblocked;
282: self->pt_early = pthread__rwlock_early;
283: pthread__spinunlock(self, &ptr->ptr_interlock);
284:
285: error = pthread__park(self, &ptr->ptr_interlock,
286: &ptr->ptr_wblocked, ts, 0, &ptr->ptr_wblocked);
287:
288: /* Did we get the lock? */
289: if (self->pt_rwlocked == _RW_LOCKED) {
290: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
291: membar_enter();
1.13 chs 292: #endif
1.27 ad 293: return 0;
294: }
295: if (error != 0)
296: return error;
297:
298: pthread__errorfunc(__FILE__, __LINE__, __func__,
299: "direct handoff failure");
1.2 thorpej 300: }
301: }
302:
303:
304: int
1.27 ad 305: pthread_rwlock_trywrlock(pthread_rwlock_t *ptr)
1.2 thorpej 306: {
1.27 ad 307: uintptr_t owner, next;
1.2 thorpej 308: pthread_t self;
1.27 ad 309:
1.2 thorpej 310: #ifdef ERRORCHECK
1.27 ad 311: if (ptr->ptr_magic != _PT_RWLOCK_MAGIC)
1.2 thorpej 312: return EINVAL;
313: #endif
1.27 ad 314:
1.2 thorpej 315: self = pthread__self();
1.27 ad 316:
317: for (owner = (uintptr_t)ptr->ptr_owner;; owner = next) {
318: if (owner != 0)
319: return EBUSY;
320: next = rw_cas(ptr, owner, (uintptr_t)self | RW_WRITE_LOCKED);
321: if (owner == next) {
322: /* Got it! */
323: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
324: membar_enter();
325: #endif
326: return 0;
327: }
1.2 thorpej 328: }
1.27 ad 329: }
1.2 thorpej 330:
1.27 ad 331: int
332: pthread_rwlock_rdlock(pthread_rwlock_t *ptr)
333: {
1.2 thorpej 334:
1.27 ad 335: return pthread__rwlock_rdlock(ptr, NULL);
1.2 thorpej 336: }
337:
338: int
1.27 ad 339: pthread_rwlock_timedrdlock(pthread_rwlock_t *ptr,
340: const struct timespec *abs_timeout)
1.2 thorpej 341: {
1.12 chs 342:
1.10 nathanw 343: if (abs_timeout == NULL)
1.2 thorpej 344: return EINVAL;
1.10 nathanw 345: if ((abs_timeout->tv_nsec >= 1000000000) ||
346: (abs_timeout->tv_nsec < 0) ||
347: (abs_timeout->tv_sec < 0))
348: return EINVAL;
1.12 chs 349:
1.27 ad 350: return pthread__rwlock_rdlock(ptr, abs_timeout);
351: }
1.2 thorpej 352:
1.27 ad 353: int
354: pthread_rwlock_wrlock(pthread_rwlock_t *ptr)
355: {
1.2 thorpej 356:
1.27 ad 357: return pthread__rwlock_wrlock(ptr, NULL);
1.2 thorpej 358: }
359:
360: int
1.27 ad 361: pthread_rwlock_timedwrlock(pthread_rwlock_t *ptr,
362: const struct timespec *abs_timeout)
1.2 thorpej 363: {
1.12 chs 364:
1.10 nathanw 365: if (abs_timeout == NULL)
366: return EINVAL;
367: if ((abs_timeout->tv_nsec >= 1000000000) ||
368: (abs_timeout->tv_nsec < 0) ||
369: (abs_timeout->tv_sec < 0))
370: return EINVAL;
1.12 chs 371:
1.27 ad 372: return pthread__rwlock_wrlock(ptr, abs_timeout);
1.2 thorpej 373: }
374:
375:
376: int
1.27 ad 377: pthread_rwlock_unlock(pthread_rwlock_t *ptr)
1.2 thorpej 378: {
1.27 ad 379: uintptr_t owner, decr, new, next;
380: pthread_t self, thread;
381:
1.2 thorpej 382: #ifdef ERRORCHECK
1.27 ad 383: if ((ptr == NULL) || (ptr->ptr_magic != _PT_RWLOCK_MAGIC))
1.2 thorpej 384: return EINVAL;
385: #endif
1.27 ad 386:
387: #ifndef PTHREAD__ATOMIC_IS_MEMBAR
388: membar_exit();
389: #endif
390:
391: /*
392: * Since we used an add operation to set the required lock
393: * bits, we can use a subtract to clear them, which makes
394: * the read-release and write-release path similar.
395: */
1.2 thorpej 396: self = pthread__self();
1.27 ad 397: owner = (uintptr_t)ptr->ptr_owner;
398: if ((owner & RW_WRITE_LOCKED) != 0) {
399: decr = (uintptr_t)self | RW_WRITE_LOCKED;
400: if ((owner & RW_THREAD) != (uintptr_t)self) {
401: return EPERM;
402: }
403: } else {
404: decr = RW_READ_INCR;
405: if (owner == 0) {
1.2 thorpej 406: return EPERM;
407: }
1.27 ad 408: }
409:
410: for (;; owner = next) {
411: /*
412: * Compute what we expect the new value of the lock to be.
413: * Only proceed to do direct handoff if there are waiters,
414: * and if the lock would become unowned.
415: */
416: new = (owner - decr);
417: if ((new & (RW_THREAD | RW_HAS_WAITERS)) != RW_HAS_WAITERS) {
418: next = rw_cas(ptr, owner, new);
419: if (owner == next) {
420: /* Released! */
421: return 0;
422: }
423: continue;
424: }
425:
426: /*
427: * Grab the interlock. Once we have that, we can adjust
428: * the waiter bits. We must check to see if there are
429: * still waiters before proceeding.
430: */
431: pthread__spinlock(self, &ptr->ptr_interlock);
432: owner = (uintptr_t)ptr->ptr_owner;
433: if ((owner & RW_HAS_WAITERS) == 0) {
434: pthread__spinunlock(self, &ptr->ptr_interlock);
435: next = owner;
436: continue;
1.2 thorpej 437: }
1.27 ad 438:
439: /*
440: * Give the lock away. SUSv3 dictates that we must give
441: * preference to writers.
442: */
443: if ((thread = PTQ_FIRST(&ptr->ptr_wblocked)) != NULL) {
444: new = (uintptr_t)thread | RW_WRITE_LOCKED;
445:
446: if (PTQ_NEXT(thread, pt_sleep) != NULL)
447: new |= RW_HAS_WAITERS | RW_WRITE_WANTED;
448: else if (ptr->ptr_nreaders != 0)
449: new |= RW_HAS_WAITERS;
450:
451: /*
452: * Set in the new value. The lock becomes owned
453: * by the writer that we are about to wake.
454: */
455: (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
456:
457: /* Wake the writer. */
458: PTQ_REMOVE(&ptr->ptr_wblocked, thread, pt_sleep);
459: thread->pt_rwlocked = _RW_LOCKED;
460: pthread__unpark(self, &ptr->ptr_interlock,
461: &ptr->ptr_wblocked, thread);
462: } else {
463: new = 0;
464: PTQ_FOREACH(thread, &ptr->ptr_rblocked, pt_sleep) {
465: /*
466: * May have already been handed the lock,
467: * since pthread__unpark_all() can release
468: * our interlock before awakening all
469: * threads.
470: */
471: if (thread->pt_sleepobj == NULL)
472: continue;
473: new += RW_READ_INCR;
474: thread->pt_rwlocked = _RW_LOCKED;
475: }
476:
477: /*
478: * Set in the new value. The lock becomes owned
479: * by the readers that we are about to wake.
480: */
481: (void)atomic_swap_ptr(&ptr->ptr_owner, (void *)new);
482:
483: /* Wake up all sleeping readers. */
484: ptr->ptr_nreaders = 0;
485: pthread__unpark_all(self, &ptr->ptr_interlock,
486: &ptr->ptr_rblocked);
1.2 thorpej 487: }
1.27 ad 488:
489: return 0;
1.2 thorpej 490: }
1.27 ad 491: }
492:
493: /*
494: * Called when a timedlock awakens early to adjust the waiter bits.
495: * The rwlock's interlock is held on entry, and the caller has been
496: * removed from the waiters lists.
497: */
498: static void
499: pthread__rwlock_early(void *obj)
500: {
501: uintptr_t owner, set, new, next;
502: pthread_rwlock_t *ptr;
503: pthread_t self;
504: u_int off;
1.2 thorpej 505:
1.27 ad 506: self = pthread__self();
507:
508: switch (self->pt_rwlocked) {
509: case _RW_WANT_READ:
510: off = offsetof(pthread_rwlock_t, ptr_rblocked);
511: break;
512: case _RW_WANT_WRITE:
513: off = offsetof(pthread_rwlock_t, ptr_wblocked);
514: break;
515: default:
516: pthread__errorfunc(__FILE__, __LINE__, __func__,
517: "bad value of pt_rwlocked");
518: off = 0;
519: /* NOTREACHED */
520: break;
521: }
522:
523: /* LINTED mind your own business */
524: ptr = (pthread_rwlock_t *)((uint8_t *)obj - off);
525: owner = (uintptr_t)ptr->ptr_owner;
526:
527: if ((owner & RW_THREAD) == 0) {
528: pthread__errorfunc(__FILE__, __LINE__, __func__,
529: "lock not held");
530: }
531:
532: if (!PTQ_EMPTY(&ptr->ptr_wblocked))
533: set = RW_HAS_WAITERS | RW_WRITE_WANTED;
534: else if (ptr->ptr_nreaders != 0)
535: set = RW_HAS_WAITERS;
1.14 ad 536: else
1.27 ad 537: set = 0;
1.6 cl 538:
1.27 ad 539: for (;; owner = next) {
540: new = (owner & ~(RW_HAS_WAITERS | RW_WRITE_WANTED)) | set;
541: next = rw_cas(ptr, owner, new);
542: if (owner == next)
543: break;
544: }
1.2 thorpej 545: }
546:
547: int
1.27 ad 548: _pthread_rwlock_held_np(pthread_rwlock_t *ptr)
1.2 thorpej 549: {
1.27 ad 550: uintptr_t owner = (uintptr_t)ptr->ptr_owner;
1.2 thorpej 551:
1.28 ad 552: if ((owner & RW_WRITE_LOCKED) != 0)
553: return (owner & RW_THREAD) == (uintptr_t)pthread__self();
1.27 ad 554: return (owner & RW_THREAD) != 0;
1.2 thorpej 555: }
556:
557: int
1.27 ad 558: _pthread_rwlock_rdheld_np(pthread_rwlock_t *ptr)
1.2 thorpej 559: {
1.27 ad 560: uintptr_t owner = (uintptr_t)ptr->ptr_owner;
1.2 thorpej 561:
1.27 ad 562: return (owner & RW_THREAD) != 0 && (owner & RW_WRITE_LOCKED) == 0;
1.2 thorpej 563: }
1.21 ad 564:
1.23 ad 565: int
1.27 ad 566: _pthread_rwlock_wrheld_np(pthread_rwlock_t *ptr)
1.23 ad 567: {
1.27 ad 568: uintptr_t owner = (uintptr_t)ptr->ptr_owner;
1.23 ad 569:
1.27 ad 570: return (owner & (RW_THREAD | RW_WRITE_LOCKED)) ==
571: ((uintptr_t)pthread__self() | RW_WRITE_LOCKED);
1.23 ad 572: }
573:
574: int
1.27 ad 575: pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
1.23 ad 576: {
577:
1.27 ad 578: if (attr == NULL)
579: return EINVAL;
580: attr->ptra_magic = _PT_RWLOCKATTR_MAGIC;
581:
582: return 0;
1.23 ad 583: }
584:
1.27 ad 585:
1.23 ad 586: int
1.27 ad 587: pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
1.23 ad 588: {
589:
1.27 ad 590: if ((attr == NULL) ||
591: (attr->ptra_magic != _PT_RWLOCKATTR_MAGIC))
592: return EINVAL;
593: attr->ptra_magic = _PT_RWLOCKATTR_DEAD;
594:
595: return 0;
1.23 ad 596: }
CVSweb <webmaster@jp.NetBSD.org>