Annotation of src/lib/libpthread/pthread.c, Revision 1.1.2.38
1.1.2.38! nathanw 1: /* $NetBSD: pthread.c,v 1.1.2.37 2002/12/30 22:24:34 thorpej Exp $ */
1.1.2.3 nathanw 2:
3: /*-
4: * Copyright (c) 2001 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Nathan J. Williams.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
1.1.2.1 nathanw 38:
39: #include <assert.h>
40: #include <err.h>
41: #include <errno.h>
1.1.2.14 nathanw 42: #include <lwp.h>
1.1.2.1 nathanw 43: #include <signal.h>
44: #include <stdlib.h>
1.1.2.11 nathanw 45: #include <string.h>
1.1.2.1 nathanw 46: #include <ucontext.h>
1.1.2.19 nathanw 47: #include <sys/cdefs.h>
1.1.2.1 nathanw 48:
1.1.2.30 thorpej 49: #include <sched.h>
1.1.2.1 nathanw 50: #include "pthread.h"
51: #include "pthread_int.h"
52:
53:
1.1.2.34 nathanw 54: #undef PTHREAD_MAIN_DEBUG
55:
56: #ifdef PTHREAD_MAIN_DEBUG
57: #define SDPRINTF(x) DPRINTF(x)
58: #else
59: #define SDPRINTF(x)
60: #endif
61:
1.1.2.1 nathanw 62: static void pthread__create_tramp(void *(*start)(void *), void *arg);
63:
1.1.2.38! nathanw 64: int pthread__started;
1.1.2.1 nathanw 65:
1.1.2.13 nathanw 66: pthread_spin_t allqueue_lock;
67: struct pthread_queue_t allqueue;
1.1.2.9 nathanw 68: static int nthreads;
1.1.2.1 nathanw 69:
1.1.2.13 nathanw 70: pthread_spin_t deadqueue_lock;
71: struct pthread_queue_t deadqueue;
72: struct pthread_queue_t reidlequeue;
1.1.2.1 nathanw 73:
74:
1.1.2.38! nathanw 75: static int nextthread;
! 76: static pthread_spin_t nextthread_lock;
! 77: static pthread_attr_t pthread_default_attr;
! 78:
1.1.2.13 nathanw 79: extern struct pthread_queue_t runqueue;
80: extern struct pthread_queue_t idlequeue;
81: extern pthread_spin_t runqueue_lock;
1.1.2.1 nathanw 82:
1.1.2.24 nathanw 83: pthread_ops_t pthread_ops = {
84: pthread_mutex_init,
85: pthread_mutex_lock,
86: pthread_mutex_trylock,
87: pthread_mutex_unlock,
88: pthread_mutex_destroy,
89: pthread_mutexattr_init,
90: pthread_mutexattr_destroy,
91:
92: pthread_cond_init,
93: pthread_cond_signal,
94: pthread_cond_broadcast,
95: pthread_cond_wait,
96: pthread_cond_timedwait,
97: pthread_cond_destroy,
98: pthread_condattr_init,
99: pthread_condattr_destroy,
100:
101: pthread_key_create,
102: pthread_setspecific,
103: pthread_getspecific,
104: pthread_key_delete,
105:
106: pthread_once,
107:
1.1.2.32 nathanw 108: pthread_self,
1.1.2.24 nathanw 109:
110: pthread_sigmask,
111:
112: pthread__errno
113: };
114:
1.1.2.38! nathanw 115:
1.1.2.27 nathanw 116: /*
1.1.2.32 nathanw 117: * This needs to be started by the library loading code, before main()
1.1.2.8 nathanw 118: * gets to run, for various things that use the state of the initial thread
1.1.2.32 nathanw 119: * to work properly (thread-specific data is an application-visible example;
1.1.2.8 nathanw 120: * spinlock counts for mutexes is an internal example).
121: */
122: void pthread_init(void)
1.1.2.1 nathanw 123: {
1.1.2.8 nathanw 124: pthread_t first;
125: extern int __isthreaded;
1.1.2.24 nathanw 126: extern pthread_ops_t *__libc_pthread_ops;
1.1.2.37 thorpej 127:
128: /* Initialize locks first; they're needed elsewhere. */
129: pthread__lockprim_init();
1.1.2.15 nathanw 130:
1.1.2.1 nathanw 131: /* Basic data structure setup */
132: pthread_attr_init(&pthread_default_attr);
1.1.2.2 nathanw 133: PTQ_INIT(&allqueue);
134: PTQ_INIT(&deadqueue);
135: PTQ_INIT(&reidlequeue);
136: PTQ_INIT(&runqueue);
137: PTQ_INIT(&idlequeue);
1.1.2.32 nathanw 138:
1.1.2.1 nathanw 139: /* Create the thread structure corresponding to main() */
140: pthread__initmain(&first);
1.1.2.21 nathanw 141: pthread__initthread(first, first);
1.1.2.23 nathanw 142: first->pt_state = PT_STATE_RUNNING;
1.1.2.1 nathanw 143: sigprocmask(0, NULL, &first->pt_sigmask);
1.1.2.2 nathanw 144: PTQ_INSERT_HEAD(&allqueue, first, pt_allq);
1.1.2.1 nathanw 145:
1.1.2.16 nathanw 146: /* Start subsystems */
147: pthread__alarm_init();
1.1.2.25 nathanw 148: pthread__signal_init();
1.1.2.26 nathanw 149: PTHREAD_MD_INIT
1.1.2.35 nathanw 150: #ifdef PTHREAD__DEBUG
151: pthread__debug_init();
152: #endif
1.1.2.16 nathanw 153:
1.1.2.8 nathanw 154: /* Tell libc that we're here and it should role-play accordingly. */
1.1.2.24 nathanw 155: __libc_pthread_ops = &pthread_ops;
1.1.2.8 nathanw 156: __isthreaded = 1;
157:
158: }
159:
1.1.2.38! nathanw 160:
1.1.2.8 nathanw 161: static void
162: pthread__start(void)
163: {
164: pthread_t self, idle;
165: int i, ret;
166:
167: self = pthread__self(); /* should be the "main()" thread */
168:
1.1.2.34 nathanw 169:
1.1.2.18 nathanw 170: /* Create idle threads */
1.1.2.4 nathanw 171: for (i = 0; i < NIDLETHREADS; i++) {
172: ret = pthread__stackalloc(&idle);
173: if (ret != 0)
174: err(1, "Couldn't allocate stack for idle thread!");
1.1.2.21 nathanw 175: pthread__initthread(self, idle);
1.1.2.17 nathanw 176: sigfillset(&idle->pt_sigmask);
1.1.2.36 nathanw 177: idle->pt_type = PT_THREAD_IDLE;
1.1.2.4 nathanw 178: PTQ_INSERT_HEAD(&allqueue, idle, pt_allq);
1.1.2.8 nathanw 179: pthread__sched_idle(self, idle);
1.1.2.4 nathanw 180: }
1.1.2.1 nathanw 181:
1.1.2.9 nathanw 182: nthreads = 1;
1.1.2.1 nathanw 183: /* Start up the SA subsystem */
184: pthread__sa_start();
1.1.2.34 nathanw 185: SDPRINTF(("(pthread__start %p) Started.\n", self));
1.1.2.1 nathanw 186: }
187:
1.1.2.38! nathanw 188:
1.1.2.1 nathanw 189: /* General-purpose thread data structure sanitization. */
190: void
1.1.2.21 nathanw 191: pthread__initthread(pthread_t self, pthread_t t)
1.1.2.1 nathanw 192: {
1.1.2.21 nathanw 193: int id;
194:
195: pthread_spinlock(self, &nextthread_lock);
196: id = nextthread;
197: nextthread++;
198: pthread_spinunlock(self, &nextthread_lock);
199: t->pt_num = id;
200:
1.1.2.1 nathanw 201: t->pt_magic = PT_MAGIC;
202: t->pt_type = PT_THREAD_NORMAL;
203: t->pt_state = PT_STATE_RUNNABLE;
1.1.2.14 nathanw 204: pthread_lockinit(&t->pt_statelock);
1.1.2.1 nathanw 205: t->pt_spinlocks = 0;
206: t->pt_next = NULL;
207: t->pt_exitval = NULL;
208: t->pt_flags = 0;
1.1.2.13 nathanw 209: t->pt_cancel = 0;
210: t->pt_errno = 0;
1.1.2.1 nathanw 211: t->pt_parent = NULL;
212: t->pt_heldlock = NULL;
1.1.2.7 nathanw 213: t->pt_switchto = NULL;
214: t->pt_sleepuc = NULL;
1.1.2.1 nathanw 215: sigemptyset(&t->pt_siglist);
216: sigemptyset(&t->pt_sigmask);
1.1.2.14 nathanw 217: pthread_lockinit(&t->pt_siglock);
1.1.2.2 nathanw 218: PTQ_INIT(&t->pt_joiners);
1.1.2.14 nathanw 219: pthread_lockinit(&t->pt_join_lock);
1.1.2.13 nathanw 220: PTQ_INIT(&t->pt_cleanup_stack);
1.1.2.8 nathanw 221: memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
1.1.2.1 nathanw 222: #ifdef PTHREAD__DEBUG
223: t->blocks = 0;
224: t->preempts = 0;
225: t->rescheds = 0;
226: #endif
227: }
228:
1.1.2.38! nathanw 229:
1.1.2.1 nathanw 230: int
1.1.2.32 nathanw 231: pthread_create(pthread_t *thread, const pthread_attr_t *attr,
1.1.2.1 nathanw 232: void *(*startfunc)(void *), void *arg)
233: {
234: pthread_t self, newthread;
235: pthread_attr_t nattr;
236: int ret;
237:
238: PTHREADD_ADD(PTHREADD_CREATE);
239: assert(thread != NULL);
240:
1.1.2.27 nathanw 241: /*
242: * It's okay to check this without a lock because there can
243: * only be one thread before it becomes true.
1.1.2.1 nathanw 244: */
1.1.2.28 nathanw 245: if (pthread__started == 0) {
1.1.2.1 nathanw 246: pthread__start();
1.1.2.28 nathanw 247: pthread__started = 1;
1.1.2.1 nathanw 248: }
249:
250: if (attr == NULL)
251: nattr = pthread_default_attr;
1.1.2.2 nathanw 252: else if (((attr != NULL) && (attr->pta_magic == PT_ATTR_MAGIC)))
1.1.2.1 nathanw 253: nattr = *attr;
254: else
255: return EINVAL;
1.1.2.32 nathanw 256:
1.1.2.1 nathanw 257:
258: self = pthread__self();
259:
1.1.2.20 nathanw 260: pthread_spinlock(self, &deadqueue_lock);
261: if (!PTQ_EMPTY(&deadqueue)) {
262: newthread= PTQ_FIRST(&deadqueue);
263: PTQ_REMOVE(&deadqueue, newthread, pt_allq);
1.1.2.32 nathanw 264: pthread_spinunlock(self, &deadqueue_lock);
1.1.2.20 nathanw 265: } else {
266: pthread_spinunlock(self, &deadqueue_lock);
267: /* Set up a stack and allocate space for a pthread_st. */
268: ret = pthread__stackalloc(&newthread);
269: if (ret != 0)
270: return ret;
271: }
1.1.2.32 nathanw 272:
1.1.2.1 nathanw 273: /* 2. Set up state. */
1.1.2.21 nathanw 274: pthread__initthread(self, newthread);
1.1.2.2 nathanw 275: newthread->pt_flags = nattr.pta_flags;
1.1.2.1 nathanw 276: newthread->pt_sigmask = self->pt_sigmask;
1.1.2.32 nathanw 277:
1.1.2.27 nathanw 278: /*
279: * 3. Set up context.
280: *
281: * The pt_uc pointer points to a location safely below the
1.1.2.1 nathanw 282: * stack start; this is arranged by pthread__stackalloc().
283: */
1.1.2.29 nathanw 284: _INITCONTEXT_U(newthread->pt_uc);
1.1.2.1 nathanw 285: newthread->pt_uc->uc_stack = newthread->pt_stack;
286: newthread->pt_uc->uc_link = NULL;
287: makecontext(newthread->pt_uc, pthread__create_tramp, 2,
288: startfunc, arg);
289:
1.1.2.9 nathanw 290: /* 4. Add to list of all threads. */
1.1.2.1 nathanw 291: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 292: PTQ_INSERT_HEAD(&allqueue, newthread, pt_allq);
1.1.2.9 nathanw 293: nthreads++;
1.1.2.1 nathanw 294: pthread_spinunlock(self, &allqueue_lock);
1.1.2.32 nathanw 295:
1.1.2.34 nathanw 296: SDPRINTF(("(pthread_create %p) Created new thread %p.\n", self, newthread));
1.1.2.9 nathanw 297: /* 5. Put on run queue. */
1.1.2.1 nathanw 298: pthread__sched(self, newthread);
1.1.2.32 nathanw 299:
1.1.2.1 nathanw 300: *thread = newthread;
301:
302: return 0;
303: }
304:
1.1.2.38! nathanw 305:
1.1.2.1 nathanw 306: static void
307: pthread__create_tramp(void *(*start)(void *), void *arg)
308: {
309: void *retval;
310:
311: retval = start(arg);
312:
313: pthread_exit(retval);
314:
315: /* NOTREACHED */
316: }
317:
318:
319: /*
320: * Other threads will switch to the idle thread so that they
321: * can dispose of any awkward locks or recycle upcall state.
322: */
323: void
324: pthread__idle(void)
325: {
326: pthread_t self;
327:
328: PTHREADD_ADD(PTHREADD_IDLE);
329: self = pthread__self();
330:
1.1.2.27 nathanw 331: /*
1.1.2.32 nathanw 332: * The drill here is that we want to yield the processor,
1.1.2.1 nathanw 333: * but for the thread itself to be recovered, we need to be on
1.1.2.32 nathanw 334: * a list somewhere for the thread system to know about us.
1.1.2.1 nathanw 335: */
336: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 337: PTQ_INSERT_TAIL(&reidlequeue, self, pt_runq);
1.1.2.1 nathanw 338: self->pt_flags |= PT_FLAG_IDLED;
339: pthread_spinunlock(self, &deadqueue_lock);
340:
341: /*
342: * If we get to run this, then no preemption has happened
1.1.2.33 skrll 343: * (because the upcall handler will not continue an idle thread with
1.1.2.1 nathanw 344: * PT_FLAG_IDLED set), and so we can yield the processor safely.
345: */
346: sa_yield();
347: }
348:
349:
350: void
351: pthread_exit(void *retval)
352: {
353: pthread_t self, joiner;
1.1.2.13 nathanw 354: struct pt_clean_t *cleanup;
1.1.2.9 nathanw 355: int nt;
1.1.2.1 nathanw 356:
357: self = pthread__self();
1.1.2.34 nathanw 358: SDPRINTF(("(pthread_exit %p) Exiting.\n", self));
1.1.2.8 nathanw 359:
1.1.2.14 nathanw 360: /* Disable cancellability. */
361: self->pt_flags |= PT_FLAG_CS_DISABLED;
362:
1.1.2.13 nathanw 363: /* Call any cancellation cleanup handlers */
364: while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
365: cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
366: PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
367: (*cleanup->ptc_cleanup)(cleanup->ptc_arg);
368: }
369:
1.1.2.8 nathanw 370: /* Perform cleanup of thread-specific data */
371: pthread__destroy_tsd(self);
1.1.2.32 nathanw 372:
1.1.2.1 nathanw 373: self->pt_exitval = retval;
374:
375: pthread_spinlock(self, &self->pt_join_lock);
376: if (self->pt_flags & PT_FLAG_DETACHED) {
377: pthread_spinunlock(self, &self->pt_join_lock);
378:
379: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 380: PTQ_REMOVE(&allqueue, self, pt_allq);
1.1.2.9 nathanw 381: nthreads--;
382: nt = nthreads;
1.1.2.32 nathanw 383: pthread_spinunlock(self, &allqueue_lock);
1.1.2.1 nathanw 384:
385: self->pt_state = PT_STATE_DEAD;
1.1.2.9 nathanw 386: if (nt == 0) {
387: /* Whoah, we're the last one. Time to go. */
388: exit(0);
389: }
390:
1.1.2.1 nathanw 391: /* Yeah, yeah, doing work while we're dead is tacky. */
392: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 393: PTQ_INSERT_HEAD(&deadqueue, self, pt_allq);
1.1.2.1 nathanw 394: pthread__block(self, &deadqueue_lock);
395: } else {
1.1.2.9 nathanw 396: pthread_spinlock(self, &allqueue_lock);
397: nthreads--;
398: nt = nthreads;
1.1.2.1 nathanw 399: self->pt_state = PT_STATE_ZOMBIE;
1.1.2.32 nathanw 400: pthread_spinunlock(self, &allqueue_lock);
1.1.2.9 nathanw 401: if (nt == 0) {
402: /* Whoah, we're the last one. Time to go. */
403: exit(0);
404: }
1.1.2.1 nathanw 405: /* Wake up all the potential joiners. Only one can win.
406: * (Can you say "Thundering Herd"? I knew you could.)
407: */
1.1.2.32 nathanw 408: PTQ_FOREACH(joiner, &self->pt_joiners, pt_sleep)
1.1.2.1 nathanw 409: pthread__sched(self, joiner);
410: pthread__block(self, &self->pt_join_lock);
411: }
412:
413: /* NOTREACHED */
414: assert(0);
1.1.2.8 nathanw 415: exit(1);
1.1.2.1 nathanw 416: }
417:
418:
419: int
420: pthread_join(pthread_t thread, void **valptr)
421: {
422: pthread_t self;
423:
424: self = pthread__self();
1.1.2.34 nathanw 425: SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
1.1.2.14 nathanw 426:
1.1.2.31 nathanw 427: if (pthread__find(self, thread) != 0)
428: return ESRCH;
429:
430: if (thread->pt_magic != PT_MAGIC)
431: return EINVAL;
1.1.2.32 nathanw 432:
1.1.2.14 nathanw 433: if (thread == self)
434: return EDEADLK;
1.1.2.32 nathanw 435:
1.1.2.1 nathanw 436: pthread_spinlock(self, &thread->pt_join_lock);
437:
438: if (thread->pt_flags & PT_FLAG_DETACHED) {
439: pthread_spinunlock(self, &thread->pt_join_lock);
440: return EINVAL;
441: }
442:
443: if ((thread->pt_state != PT_STATE_ZOMBIE) &&
444: (thread->pt_state != PT_STATE_DEAD)) {
445: /*
446: * "I'm not dead yet!"
447: * "You will be soon enough."
448: */
1.1.2.14 nathanw 449: pthread_spinlock(self, &self->pt_statelock);
450: if (self->pt_cancel) {
451: pthread_spinunlock(self, &self->pt_statelock);
452: pthread_spinunlock(self, &thread->pt_join_lock);
453: pthread_exit(PTHREAD_CANCELED);
454: }
455: self->pt_state = PT_STATE_BLOCKED_QUEUE;
1.1.2.22 nathanw 456: self->pt_sleepobj = thread;
457: self->pt_sleepq = &thread->pt_joiners;
458: self->pt_sleeplock = &thread->pt_join_lock;
1.1.2.14 nathanw 459: pthread_spinunlock(self, &self->pt_statelock);
1.1.2.1 nathanw 460:
1.1.2.14 nathanw 461: PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
1.1.2.1 nathanw 462: pthread__block(self, &thread->pt_join_lock);
463: pthread_spinlock(self, &thread->pt_join_lock);
464: }
1.1.2.32 nathanw 465:
1.1.2.1 nathanw 466: if ((thread->pt_state == PT_STATE_DEAD) ||
467: (thread->pt_flags & PT_FLAG_DETACHED)) {
468: /* Someone beat us to the join, or called pthread_detach(). */
469: pthread_spinunlock(self, &thread->pt_join_lock);
470: return ESRCH;
471: }
472:
473: /* All ours. */
474: thread->pt_state = PT_STATE_DEAD;
1.1.2.6 nathanw 475: pthread_spinunlock(self, &thread->pt_join_lock);
1.1.2.1 nathanw 476:
477: if (valptr != NULL)
478: *valptr = thread->pt_exitval;
1.1.2.34 nathanw 479:
480: SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
1.1.2.1 nathanw 481:
482: /* Cleanup time. Move the dead thread from allqueue to the deadqueue */
483: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 484: PTQ_REMOVE(&allqueue, thread, pt_allq);
1.1.2.1 nathanw 485: pthread_spinunlock(self, &allqueue_lock);
486:
487: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 488: PTQ_INSERT_HEAD(&deadqueue, thread, pt_allq);
1.1.2.1 nathanw 489: pthread_spinunlock(self, &deadqueue_lock);
490:
491: return 0;
492: }
493:
1.1.2.38! nathanw 494:
1.1.2.1 nathanw 495: int
496: pthread_equal(pthread_t t1, pthread_t t2)
497: {
498:
499: /* Nothing special here. */
500: return (t1 == t2);
501: }
502:
1.1.2.38! nathanw 503:
1.1.2.1 nathanw 504: int
505: pthread_detach(pthread_t thread)
506: {
507: pthread_t self, joiner;
508:
1.1.2.31 nathanw 509: self = pthread__self();
510:
511: if (pthread__find(self, thread) != 0)
512: return ESRCH;
513:
514: if (thread->pt_magic != PT_MAGIC)
1.1.2.1 nathanw 515: return EINVAL;
516:
517: pthread_spinlock(self, &thread->pt_join_lock);
1.1.2.32 nathanw 518:
1.1.2.1 nathanw 519: if (thread->pt_flags & PT_FLAG_DETACHED) {
520: pthread_spinunlock(self, &thread->pt_join_lock);
521: return EINVAL;
522: }
523:
524: thread->pt_flags |= PT_FLAG_DETACHED;
525:
526: /* Any joiners have to be punted now. */
1.1.2.32 nathanw 527: PTQ_FOREACH(joiner, &thread->pt_joiners, pt_sleep)
1.1.2.1 nathanw 528: pthread__sched(self, joiner);
529:
530: pthread_spinunlock(self, &thread->pt_join_lock);
531:
532: return 0;
533: }
534:
1.1.2.38! nathanw 535:
1.1.2.1 nathanw 536: int
537: pthread_attr_init(pthread_attr_t *attr)
538: {
539:
1.1.2.2 nathanw 540: attr->pta_magic = PT_ATTR_MAGIC;
541: attr->pta_flags = 0;
1.1.2.1 nathanw 542:
543: return 0;
544: }
545:
546:
547: int
548: pthread_attr_destroy(pthread_attr_t *attr)
549: {
550:
551: return 0;
552: }
553:
554:
555: int
556: pthread_attr_getdetachstate(pthread_attr_t *attr, int *detachstate)
557: {
558:
1.1.2.2 nathanw 559: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 560: return EINVAL;
561:
1.1.2.2 nathanw 562: *detachstate = (attr->pta_flags & PT_FLAG_DETACHED);
1.1.2.1 nathanw 563:
564: return 0;
565: }
566:
567:
568: int
569: pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
570: {
1.1.2.2 nathanw 571: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 572: return EINVAL;
1.1.2.32 nathanw 573:
1.1.2.1 nathanw 574: switch (detachstate) {
575: case PTHREAD_CREATE_JOINABLE:
1.1.2.2 nathanw 576: attr->pta_flags &= ~PT_FLAG_DETACHED;
1.1.2.1 nathanw 577: break;
578: case PTHREAD_CREATE_DETACHED:
1.1.2.2 nathanw 579: attr->pta_flags |= PT_FLAG_DETACHED;
1.1.2.1 nathanw 580: break;
581: default:
582: return EINVAL;
583: }
584:
585: return 0;
586: }
587:
588:
589: int
1.1.2.32 nathanw 590: pthread_attr_setschedparam(pthread_attr_t *attr,
1.1.2.1 nathanw 591: const struct sched_param *param)
592: {
593:
1.1.2.2 nathanw 594: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 595: return EINVAL;
1.1.2.32 nathanw 596:
1.1.2.1 nathanw 597: if (param == NULL)
598: return EINVAL;
599:
600: if (param->sched_priority != 0)
601: return EINVAL;
602:
603: return 0;
604: }
605:
606:
607: int
608: pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param *param)
609: {
610:
1.1.2.2 nathanw 611: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 612: return EINVAL;
1.1.2.32 nathanw 613:
1.1.2.1 nathanw 614: if (param == NULL)
615: return EINVAL;
1.1.2.32 nathanw 616:
1.1.2.1 nathanw 617: param->sched_priority = 0;
618:
619: return 0;
620: }
621:
1.1.2.38! nathanw 622:
1.1.2.27 nathanw 623: /*
624: * XXX There should be a way for applications to use the efficent
1.1.2.32 nathanw 625: * inline version, but there are opacity/namespace issues.
1.1.2.1 nathanw 626: */
627: pthread_t
628: pthread_self(void)
629: {
1.1.2.38! nathanw 630:
1.1.2.1 nathanw 631: return pthread__self();
1.1.2.13 nathanw 632: }
633:
634:
635: int
636: pthread_cancel(pthread_t thread)
637: {
1.1.2.14 nathanw 638: pthread_t self;
639: int flags;
640:
1.1.2.21 nathanw 641: if (!(thread->pt_state == PT_STATE_RUNNING ||
1.1.2.32 nathanw 642: thread->pt_state == PT_STATE_RUNNABLE ||
1.1.2.14 nathanw 643: thread->pt_state == PT_STATE_BLOCKED_QUEUE ||
644: thread->pt_state == PT_STATE_BLOCKED_SYS))
645: return ESRCH;
646:
647: self = pthread__self();
648: flags = thread->pt_flags;
649:
650: flags |= PT_FLAG_CS_PENDING;
651: if ((flags & PT_FLAG_CS_DISABLED) == 0) {
652: thread->pt_cancel = 1;
653: pthread_spinlock(self, &thread->pt_statelock);
654: if (thread->pt_state == PT_STATE_BLOCKED_SYS) {
1.1.2.27 nathanw 655: /*
656: * It's sleeping in the kernel. If we can wake
1.1.2.16 nathanw 657: * it up, it will notice the cancellation when
658: * it returns. If it doesn't wake up when we
659: * make this call, then it's blocked
660: * uninterruptably in the kernel, and there's
1.1.2.32 nathanw 661: * not much to be done about it.
1.1.2.14 nathanw 662: */
663: _lwp_wakeup(thread->pt_blockedlwp);
664: } else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
1.1.2.27 nathanw 665: /*
666: * We're blocked somewhere (pthread__block()
1.1.2.16 nathanw 667: * was called. Cause it to wake up and the
1.1.2.14 nathanw 668: * caller will check for the cancellation.
669: */
670: pthread_spinlock(self, thread->pt_sleeplock);
1.1.2.32 nathanw 671: PTQ_REMOVE(thread->pt_sleepq, thread,
1.1.2.14 nathanw 672: pt_sleep);
673: pthread_spinunlock(self, thread->pt_sleeplock);
674: pthread__sched(self, thread);
675: } else {
1.1.2.27 nathanw 676: /*
677: * Nothing. The target thread is running and will
1.1.2.14 nathanw 678: * notice at the next deferred cancellation point.
679: */
680: }
681: pthread_spinunlock(self, &thread->pt_statelock);
682: }
683:
684: thread->pt_flags = flags;
685:
1.1.2.13 nathanw 686: return 0;
687: }
688:
689:
690: int
691: pthread_setcancelstate(int state, int *oldstate)
692: {
693: pthread_t self;
694: int flags;
1.1.2.32 nathanw 695:
1.1.2.13 nathanw 696: self = pthread__self();
697: flags = self->pt_flags;
698:
699: if (oldstate != NULL) {
700: if (flags & PT_FLAG_CS_DISABLED)
701: *oldstate = PTHREAD_CANCEL_DISABLE;
702: else
703: *oldstate = PTHREAD_CANCEL_ENABLE;
704: }
705:
706: if (state == PTHREAD_CANCEL_DISABLE)
707: flags |= PT_FLAG_CS_DISABLED;
708: else if (state == PTHREAD_CANCEL_ENABLE) {
709: flags &= ~PT_FLAG_CS_DISABLED;
710: /*
711: * If a cancellation was requested while cancellation
712: * was disabled, note that fact for future
713: * cancellation tests.
714: */
715: if (flags & PT_FLAG_CS_PENDING) {
716: self->pt_cancel = 1;
717: /* This is not a deferred cancellation point. */
718: if (flags & PT_FLAG_CS_ASYNC)
1.1.2.14 nathanw 719: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 720: }
721: } else
722: return EINVAL;
1.1.2.32 nathanw 723:
1.1.2.13 nathanw 724: self->pt_flags = flags;
725:
726: return 0;
727: }
728:
729:
730: int
731: pthread_setcanceltype(int type, int *oldtype)
732: {
733: pthread_t self;
734: int flags;
1.1.2.32 nathanw 735:
1.1.2.13 nathanw 736: self = pthread__self();
737: flags = self->pt_flags;
738:
739: if (oldtype != NULL) {
740: if (flags & PT_FLAG_CS_ASYNC)
741: *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
742: else
743: *oldtype = PTHREAD_CANCEL_DEFERRED;
744: }
745:
746: if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
747: flags |= PT_FLAG_CS_ASYNC;
748: if (self->pt_cancel)
1.1.2.14 nathanw 749: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 750: } else if (type == PTHREAD_CANCEL_DEFERRED)
751: flags &= ~PT_FLAG_CS_ASYNC;
752: else
753: return EINVAL;
754:
755: self->pt_flags = flags;
756:
757: return 0;
758: }
759:
760:
761: void
762: pthread_testcancel()
763: {
764: pthread_t self;
1.1.2.32 nathanw 765:
1.1.2.13 nathanw 766: self = pthread__self();
1.1.2.14 nathanw 767: if (self->pt_cancel)
768: pthread_exit(PTHREAD_CANCELED);
1.1.2.31 nathanw 769: }
770:
1.1.2.38! nathanw 771:
1.1.2.31 nathanw 772: /*
773: * POSIX requires that certain functions return an error rather than
774: * invoking undefined behavior even when handed completely bogus
775: * pthread_t values, e.g. stack garbage or (pthread_t)666. This
776: * utility routine searches the list of threads for the pthread_t
777: * value without dereferencing it.
778: */
779: int
780: pthread__find(pthread_t self, pthread_t id)
781: {
782: pthread_t target;
783:
784: pthread_spinlock(self, &allqueue_lock);
785: PTQ_FOREACH(target, &allqueue, pt_allq)
786: if (target == id)
787: break;
788: pthread_spinunlock(self, &allqueue_lock);
789:
790: if (target == NULL)
791: return ESRCH;
792:
793: return 0;
1.1.2.13 nathanw 794: }
795:
796:
797: void
798: pthread__testcancel(pthread_t self)
799: {
800:
801: if (self->pt_cancel)
1.1.2.14 nathanw 802: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 803: }
804:
1.1.2.38! nathanw 805:
1.1.2.13 nathanw 806: void
807: pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
808: {
809: pthread_t self;
810: struct pt_clean_t *entry;
1.1.2.32 nathanw 811:
1.1.2.13 nathanw 812: self = pthread__self();
813: entry = store;
814: entry->ptc_cleanup = cleanup;
815: entry->ptc_arg = arg;
816: PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
817: }
818:
1.1.2.38! nathanw 819:
1.1.2.13 nathanw 820: void
821: pthread__cleanup_pop(int ex, void *store)
822: {
823: pthread_t self;
824: struct pt_clean_t *entry;
1.1.2.32 nathanw 825:
1.1.2.13 nathanw 826: self = pthread__self();
827: entry = store;
1.1.2.32 nathanw 828:
1.1.2.13 nathanw 829: PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
830: if (ex)
831: (*entry->ptc_cleanup)(entry->ptc_arg);
832: }
833:
834:
835: int *
836: pthread__errno(void)
837: {
838: pthread_t self;
1.1.2.32 nathanw 839:
1.1.2.13 nathanw 840: self = pthread__self();
841:
842: return &(self->pt_errno);
1.1.2.1 nathanw 843: }
CVSweb <webmaster@jp.NetBSD.org>