Annotation of src/lib/libpthread/pthread.c, Revision 1.1.2.36
1.1.2.36! nathanw 1: /* $NetBSD: pthread.c,v 1.1.2.35 2002/12/18 22:51:47 nathanw Exp $ */
1.1.2.3 nathanw 2:
3: /*-
4: * Copyright (c) 2001 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Nathan J. Williams.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
1.1.2.1 nathanw 38:
39: #include <assert.h>
40: #include <err.h>
41: #include <errno.h>
1.1.2.14 nathanw 42: #include <lwp.h>
1.1.2.1 nathanw 43: #include <signal.h>
44: #include <stdlib.h>
1.1.2.11 nathanw 45: #include <string.h>
1.1.2.1 nathanw 46: #include <ucontext.h>
1.1.2.19 nathanw 47: #include <sys/cdefs.h>
1.1.2.1 nathanw 48:
1.1.2.30 thorpej 49: #include <sched.h>
1.1.2.1 nathanw 50: #include "pthread.h"
51: #include "pthread_int.h"
52:
53:
1.1.2.34 nathanw 54: #undef PTHREAD_MAIN_DEBUG
55:
56: #ifdef PTHREAD_MAIN_DEBUG
57: #define SDPRINTF(x) DPRINTF(x)
58: #else
59: #define SDPRINTF(x)
60: #endif
61:
1.1.2.1 nathanw 62: static void pthread__create_tramp(void *(*start)(void *), void *arg);
63:
64: static pthread_attr_t pthread_default_attr;
65:
1.1.2.13 nathanw 66: pthread_spin_t allqueue_lock;
67: struct pthread_queue_t allqueue;
1.1.2.9 nathanw 68: static int nthreads;
1.1.2.1 nathanw 69:
1.1.2.13 nathanw 70: pthread_spin_t deadqueue_lock;
71: struct pthread_queue_t deadqueue;
72: struct pthread_queue_t reidlequeue;
1.1.2.1 nathanw 73:
74:
1.1.2.13 nathanw 75: extern struct pthread_queue_t runqueue;
76: extern struct pthread_queue_t idlequeue;
77: extern pthread_spin_t runqueue_lock;
1.1.2.1 nathanw 78:
1.1.2.28 nathanw 79: int pthread__started;
1.1.2.19 nathanw 80:
1.1.2.21 nathanw 81: static int nextthread;
82: pthread_spin_t nextthread_lock;
83:
1.1.2.24 nathanw 84: pthread_ops_t pthread_ops = {
85: pthread_mutex_init,
86: pthread_mutex_lock,
87: pthread_mutex_trylock,
88: pthread_mutex_unlock,
89: pthread_mutex_destroy,
90: pthread_mutexattr_init,
91: pthread_mutexattr_destroy,
92:
93: pthread_cond_init,
94: pthread_cond_signal,
95: pthread_cond_broadcast,
96: pthread_cond_wait,
97: pthread_cond_timedwait,
98: pthread_cond_destroy,
99: pthread_condattr_init,
100: pthread_condattr_destroy,
101:
102: pthread_key_create,
103: pthread_setspecific,
104: pthread_getspecific,
105: pthread_key_delete,
106:
107: pthread_once,
108:
1.1.2.32 nathanw 109: pthread_self,
1.1.2.24 nathanw 110:
111: pthread_sigmask,
112:
113: pthread__errno
114: };
115:
1.1.2.27 nathanw 116: /*
1.1.2.32 nathanw 117: * This needs to be started by the library loading code, before main()
1.1.2.8 nathanw 118: * gets to run, for various things that use the state of the initial thread
1.1.2.32 nathanw 119: * to work properly (thread-specific data is an application-visible example;
1.1.2.8 nathanw 120: * spinlock counts for mutexes is an internal example).
121: */
122: void pthread_init(void)
1.1.2.1 nathanw 123: {
1.1.2.8 nathanw 124: pthread_t first;
125: extern int __isthreaded;
1.1.2.24 nathanw 126: extern pthread_ops_t *__libc_pthread_ops;
1.1.2.15 nathanw 127:
1.1.2.1 nathanw 128: /* Basic data structure setup */
129: pthread_attr_init(&pthread_default_attr);
1.1.2.2 nathanw 130: PTQ_INIT(&allqueue);
131: PTQ_INIT(&deadqueue);
132: PTQ_INIT(&reidlequeue);
133: PTQ_INIT(&runqueue);
134: PTQ_INIT(&idlequeue);
1.1.2.32 nathanw 135:
1.1.2.1 nathanw 136: /* Create the thread structure corresponding to main() */
137: pthread__initmain(&first);
1.1.2.21 nathanw 138: pthread__initthread(first, first);
1.1.2.23 nathanw 139: first->pt_state = PT_STATE_RUNNING;
1.1.2.1 nathanw 140: sigprocmask(0, NULL, &first->pt_sigmask);
1.1.2.2 nathanw 141: PTQ_INSERT_HEAD(&allqueue, first, pt_allq);
1.1.2.1 nathanw 142:
1.1.2.16 nathanw 143: /* Start subsystems */
144: pthread__alarm_init();
1.1.2.25 nathanw 145: pthread__signal_init();
1.1.2.26 nathanw 146: PTHREAD_MD_INIT
1.1.2.35 nathanw 147: #ifdef PTHREAD__DEBUG
148: pthread__debug_init();
149: #endif
1.1.2.16 nathanw 150:
1.1.2.8 nathanw 151: /* Tell libc that we're here and it should role-play accordingly. */
1.1.2.24 nathanw 152: __libc_pthread_ops = &pthread_ops;
1.1.2.8 nathanw 153: __isthreaded = 1;
154:
155: }
156:
157: static void
158: pthread__start(void)
159: {
160: pthread_t self, idle;
161: int i, ret;
162:
163: self = pthread__self(); /* should be the "main()" thread */
164:
1.1.2.34 nathanw 165:
1.1.2.18 nathanw 166: /* Create idle threads */
1.1.2.4 nathanw 167: for (i = 0; i < NIDLETHREADS; i++) {
168: ret = pthread__stackalloc(&idle);
169: if (ret != 0)
170: err(1, "Couldn't allocate stack for idle thread!");
1.1.2.21 nathanw 171: pthread__initthread(self, idle);
1.1.2.17 nathanw 172: sigfillset(&idle->pt_sigmask);
1.1.2.36! nathanw 173: idle->pt_type = PT_THREAD_IDLE;
1.1.2.4 nathanw 174: PTQ_INSERT_HEAD(&allqueue, idle, pt_allq);
1.1.2.8 nathanw 175: pthread__sched_idle(self, idle);
1.1.2.4 nathanw 176: }
1.1.2.1 nathanw 177:
1.1.2.9 nathanw 178: nthreads = 1;
1.1.2.1 nathanw 179: /* Start up the SA subsystem */
180: pthread__sa_start();
1.1.2.34 nathanw 181: SDPRINTF(("(pthread__start %p) Started.\n", self));
1.1.2.1 nathanw 182: }
183:
184: /* General-purpose thread data structure sanitization. */
185: void
1.1.2.21 nathanw 186: pthread__initthread(pthread_t self, pthread_t t)
1.1.2.1 nathanw 187: {
1.1.2.21 nathanw 188: int id;
189:
190: pthread_spinlock(self, &nextthread_lock);
191: id = nextthread;
192: nextthread++;
193: pthread_spinunlock(self, &nextthread_lock);
194: t->pt_num = id;
195:
1.1.2.1 nathanw 196: t->pt_magic = PT_MAGIC;
197: t->pt_type = PT_THREAD_NORMAL;
198: t->pt_state = PT_STATE_RUNNABLE;
1.1.2.14 nathanw 199: pthread_lockinit(&t->pt_statelock);
1.1.2.1 nathanw 200: t->pt_spinlocks = 0;
201: t->pt_next = NULL;
202: t->pt_exitval = NULL;
203: t->pt_flags = 0;
1.1.2.13 nathanw 204: t->pt_cancel = 0;
205: t->pt_errno = 0;
1.1.2.1 nathanw 206: t->pt_parent = NULL;
207: t->pt_heldlock = NULL;
1.1.2.7 nathanw 208: t->pt_switchto = NULL;
209: t->pt_sleepuc = NULL;
1.1.2.1 nathanw 210: sigemptyset(&t->pt_siglist);
211: sigemptyset(&t->pt_sigmask);
1.1.2.14 nathanw 212: pthread_lockinit(&t->pt_siglock);
1.1.2.2 nathanw 213: PTQ_INIT(&t->pt_joiners);
1.1.2.14 nathanw 214: pthread_lockinit(&t->pt_join_lock);
1.1.2.13 nathanw 215: PTQ_INIT(&t->pt_cleanup_stack);
1.1.2.8 nathanw 216: memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
1.1.2.1 nathanw 217: #ifdef PTHREAD__DEBUG
218: t->blocks = 0;
219: t->preempts = 0;
220: t->rescheds = 0;
221: #endif
222: }
223:
224: int
1.1.2.32 nathanw 225: pthread_create(pthread_t *thread, const pthread_attr_t *attr,
1.1.2.1 nathanw 226: void *(*startfunc)(void *), void *arg)
227: {
228: pthread_t self, newthread;
229: pthread_attr_t nattr;
230: int ret;
231:
232: PTHREADD_ADD(PTHREADD_CREATE);
233: assert(thread != NULL);
234:
1.1.2.27 nathanw 235: /*
236: * It's okay to check this without a lock because there can
237: * only be one thread before it becomes true.
1.1.2.1 nathanw 238: */
1.1.2.28 nathanw 239: if (pthread__started == 0) {
1.1.2.1 nathanw 240: pthread__start();
1.1.2.28 nathanw 241: pthread__started = 1;
1.1.2.1 nathanw 242: }
243:
244: if (attr == NULL)
245: nattr = pthread_default_attr;
1.1.2.2 nathanw 246: else if (((attr != NULL) && (attr->pta_magic == PT_ATTR_MAGIC)))
1.1.2.1 nathanw 247: nattr = *attr;
248: else
249: return EINVAL;
1.1.2.32 nathanw 250:
1.1.2.1 nathanw 251:
252: self = pthread__self();
253:
1.1.2.20 nathanw 254: pthread_spinlock(self, &deadqueue_lock);
255: if (!PTQ_EMPTY(&deadqueue)) {
256: newthread= PTQ_FIRST(&deadqueue);
257: PTQ_REMOVE(&deadqueue, newthread, pt_allq);
1.1.2.32 nathanw 258: pthread_spinunlock(self, &deadqueue_lock);
1.1.2.20 nathanw 259: } else {
260: pthread_spinunlock(self, &deadqueue_lock);
261: /* Set up a stack and allocate space for a pthread_st. */
262: ret = pthread__stackalloc(&newthread);
263: if (ret != 0)
264: return ret;
265: }
1.1.2.32 nathanw 266:
1.1.2.1 nathanw 267: /* 2. Set up state. */
1.1.2.21 nathanw 268: pthread__initthread(self, newthread);
1.1.2.2 nathanw 269: newthread->pt_flags = nattr.pta_flags;
1.1.2.1 nathanw 270: newthread->pt_sigmask = self->pt_sigmask;
1.1.2.32 nathanw 271:
1.1.2.27 nathanw 272: /*
273: * 3. Set up context.
274: *
275: * The pt_uc pointer points to a location safely below the
1.1.2.1 nathanw 276: * stack start; this is arranged by pthread__stackalloc().
277: */
1.1.2.29 nathanw 278: _INITCONTEXT_U(newthread->pt_uc);
1.1.2.1 nathanw 279: newthread->pt_uc->uc_stack = newthread->pt_stack;
280: newthread->pt_uc->uc_link = NULL;
281: makecontext(newthread->pt_uc, pthread__create_tramp, 2,
282: startfunc, arg);
283:
1.1.2.9 nathanw 284: /* 4. Add to list of all threads. */
1.1.2.1 nathanw 285: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 286: PTQ_INSERT_HEAD(&allqueue, newthread, pt_allq);
1.1.2.9 nathanw 287: nthreads++;
1.1.2.1 nathanw 288: pthread_spinunlock(self, &allqueue_lock);
1.1.2.32 nathanw 289:
1.1.2.34 nathanw 290: SDPRINTF(("(pthread_create %p) Created new thread %p.\n", self, newthread));
1.1.2.9 nathanw 291: /* 5. Put on run queue. */
1.1.2.1 nathanw 292: pthread__sched(self, newthread);
1.1.2.32 nathanw 293:
1.1.2.1 nathanw 294: *thread = newthread;
295:
296: return 0;
297: }
298:
299: static void
300: pthread__create_tramp(void *(*start)(void *), void *arg)
301: {
302: void *retval;
303:
304: retval = start(arg);
305:
306: pthread_exit(retval);
307:
308: /* NOTREACHED */
309: }
310:
311:
312: /*
313: * Other threads will switch to the idle thread so that they
314: * can dispose of any awkward locks or recycle upcall state.
315: */
316: void
317: pthread__idle(void)
318: {
319: pthread_t self;
320:
321: PTHREADD_ADD(PTHREADD_IDLE);
322: self = pthread__self();
323:
1.1.2.27 nathanw 324: /*
1.1.2.32 nathanw 325: * The drill here is that we want to yield the processor,
1.1.2.1 nathanw 326: * but for the thread itself to be recovered, we need to be on
1.1.2.32 nathanw 327: * a list somewhere for the thread system to know about us.
1.1.2.1 nathanw 328: */
329: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 330: PTQ_INSERT_TAIL(&reidlequeue, self, pt_runq);
1.1.2.1 nathanw 331: self->pt_flags |= PT_FLAG_IDLED;
332: pthread_spinunlock(self, &deadqueue_lock);
333:
334: /*
335: * If we get to run this, then no preemption has happened
1.1.2.33 skrll 336: * (because the upcall handler will not continue an idle thread with
1.1.2.1 nathanw 337: * PT_FLAG_IDLED set), and so we can yield the processor safely.
338: */
339: sa_yield();
340: }
341:
342:
343: void
344: pthread_exit(void *retval)
345: {
346: pthread_t self, joiner;
1.1.2.13 nathanw 347: struct pt_clean_t *cleanup;
1.1.2.9 nathanw 348: int nt;
1.1.2.1 nathanw 349:
350: self = pthread__self();
1.1.2.34 nathanw 351: SDPRINTF(("(pthread_exit %p) Exiting.\n", self));
1.1.2.8 nathanw 352:
1.1.2.14 nathanw 353: /* Disable cancellability. */
354: self->pt_flags |= PT_FLAG_CS_DISABLED;
355:
1.1.2.13 nathanw 356: /* Call any cancellation cleanup handlers */
357: while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
358: cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
359: PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
360: (*cleanup->ptc_cleanup)(cleanup->ptc_arg);
361: }
362:
1.1.2.8 nathanw 363: /* Perform cleanup of thread-specific data */
364: pthread__destroy_tsd(self);
1.1.2.32 nathanw 365:
1.1.2.1 nathanw 366: self->pt_exitval = retval;
367:
368: pthread_spinlock(self, &self->pt_join_lock);
369: if (self->pt_flags & PT_FLAG_DETACHED) {
370: pthread_spinunlock(self, &self->pt_join_lock);
371:
372: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 373: PTQ_REMOVE(&allqueue, self, pt_allq);
1.1.2.9 nathanw 374: nthreads--;
375: nt = nthreads;
1.1.2.32 nathanw 376: pthread_spinunlock(self, &allqueue_lock);
1.1.2.1 nathanw 377:
378: self->pt_state = PT_STATE_DEAD;
1.1.2.9 nathanw 379: if (nt == 0) {
380: /* Whoah, we're the last one. Time to go. */
381: exit(0);
382: }
383:
1.1.2.1 nathanw 384: /* Yeah, yeah, doing work while we're dead is tacky. */
385: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 386: PTQ_INSERT_HEAD(&deadqueue, self, pt_allq);
1.1.2.1 nathanw 387: pthread__block(self, &deadqueue_lock);
388: } else {
1.1.2.9 nathanw 389: pthread_spinlock(self, &allqueue_lock);
390: nthreads--;
391: nt = nthreads;
1.1.2.1 nathanw 392: self->pt_state = PT_STATE_ZOMBIE;
1.1.2.32 nathanw 393: pthread_spinunlock(self, &allqueue_lock);
1.1.2.9 nathanw 394: if (nt == 0) {
395: /* Whoah, we're the last one. Time to go. */
396: exit(0);
397: }
1.1.2.1 nathanw 398: /* Wake up all the potential joiners. Only one can win.
399: * (Can you say "Thundering Herd"? I knew you could.)
400: */
1.1.2.32 nathanw 401: PTQ_FOREACH(joiner, &self->pt_joiners, pt_sleep)
1.1.2.1 nathanw 402: pthread__sched(self, joiner);
403: pthread__block(self, &self->pt_join_lock);
404: }
405:
406:
407: /* NOTREACHED */
408: assert(0);
1.1.2.8 nathanw 409: exit(1);
1.1.2.1 nathanw 410: }
411:
412:
413: int
414: pthread_join(pthread_t thread, void **valptr)
415: {
416: pthread_t self;
417:
418: self = pthread__self();
1.1.2.34 nathanw 419: SDPRINTF(("(pthread_join %p) Joining %p.\n", self, thread));
1.1.2.14 nathanw 420:
1.1.2.31 nathanw 421: if (pthread__find(self, thread) != 0)
422: return ESRCH;
423:
424: if (thread->pt_magic != PT_MAGIC)
425: return EINVAL;
1.1.2.32 nathanw 426:
1.1.2.14 nathanw 427: if (thread == self)
428: return EDEADLK;
1.1.2.32 nathanw 429:
1.1.2.1 nathanw 430: pthread_spinlock(self, &thread->pt_join_lock);
431:
432: if (thread->pt_flags & PT_FLAG_DETACHED) {
433: pthread_spinunlock(self, &thread->pt_join_lock);
434: return EINVAL;
435: }
436:
437: if ((thread->pt_state != PT_STATE_ZOMBIE) &&
438: (thread->pt_state != PT_STATE_DEAD)) {
439: /*
440: * "I'm not dead yet!"
441: * "You will be soon enough."
442: */
1.1.2.14 nathanw 443: pthread_spinlock(self, &self->pt_statelock);
444: if (self->pt_cancel) {
445: pthread_spinunlock(self, &self->pt_statelock);
446: pthread_spinunlock(self, &thread->pt_join_lock);
447: pthread_exit(PTHREAD_CANCELED);
448: }
449: self->pt_state = PT_STATE_BLOCKED_QUEUE;
1.1.2.22 nathanw 450: self->pt_sleepobj = thread;
451: self->pt_sleepq = &thread->pt_joiners;
452: self->pt_sleeplock = &thread->pt_join_lock;
1.1.2.14 nathanw 453: pthread_spinunlock(self, &self->pt_statelock);
1.1.2.1 nathanw 454:
1.1.2.14 nathanw 455: PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
1.1.2.1 nathanw 456: pthread__block(self, &thread->pt_join_lock);
457: pthread_spinlock(self, &thread->pt_join_lock);
458: }
1.1.2.32 nathanw 459:
1.1.2.1 nathanw 460: if ((thread->pt_state == PT_STATE_DEAD) ||
461: (thread->pt_flags & PT_FLAG_DETACHED)) {
462: /* Someone beat us to the join, or called pthread_detach(). */
463: pthread_spinunlock(self, &thread->pt_join_lock);
464: return ESRCH;
465: }
466:
467: /* All ours. */
468: thread->pt_state = PT_STATE_DEAD;
1.1.2.6 nathanw 469: pthread_spinunlock(self, &thread->pt_join_lock);
1.1.2.1 nathanw 470:
471: if (valptr != NULL)
472: *valptr = thread->pt_exitval;
1.1.2.34 nathanw 473:
474: SDPRINTF(("(pthread_join %p) Joined %p.\n", self, thread));
1.1.2.1 nathanw 475:
476: /* Cleanup time. Move the dead thread from allqueue to the deadqueue */
477: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 478: PTQ_REMOVE(&allqueue, thread, pt_allq);
1.1.2.1 nathanw 479: pthread_spinunlock(self, &allqueue_lock);
480:
481: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 482: PTQ_INSERT_HEAD(&deadqueue, thread, pt_allq);
1.1.2.1 nathanw 483: pthread_spinunlock(self, &deadqueue_lock);
484:
485: return 0;
486: }
487:
488: int
489: pthread_equal(pthread_t t1, pthread_t t2)
490: {
491:
492: /* Nothing special here. */
493: return (t1 == t2);
494: }
495:
496: int
497: pthread_detach(pthread_t thread)
498: {
499: pthread_t self, joiner;
500:
1.1.2.31 nathanw 501: self = pthread__self();
502:
503: if (pthread__find(self, thread) != 0)
504: return ESRCH;
505:
506: if (thread->pt_magic != PT_MAGIC)
1.1.2.1 nathanw 507: return EINVAL;
508:
509: pthread_spinlock(self, &thread->pt_join_lock);
1.1.2.32 nathanw 510:
1.1.2.1 nathanw 511: if (thread->pt_flags & PT_FLAG_DETACHED) {
512: pthread_spinunlock(self, &thread->pt_join_lock);
513: return EINVAL;
514: }
515:
516: thread->pt_flags |= PT_FLAG_DETACHED;
517:
518: /* Any joiners have to be punted now. */
1.1.2.32 nathanw 519: PTQ_FOREACH(joiner, &thread->pt_joiners, pt_sleep)
1.1.2.1 nathanw 520: pthread__sched(self, joiner);
521:
522: pthread_spinunlock(self, &thread->pt_join_lock);
523:
524: return 0;
525: }
526:
527: int
528: pthread_attr_init(pthread_attr_t *attr)
529: {
530:
1.1.2.2 nathanw 531: attr->pta_magic = PT_ATTR_MAGIC;
532: attr->pta_flags = 0;
1.1.2.1 nathanw 533:
534: return 0;
535: }
536:
537:
538: int
539: pthread_attr_destroy(pthread_attr_t *attr)
540: {
541:
542: return 0;
543: }
544:
545:
546: int
547: pthread_attr_getdetachstate(pthread_attr_t *attr, int *detachstate)
548: {
549:
1.1.2.2 nathanw 550: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 551: return EINVAL;
552:
1.1.2.2 nathanw 553: *detachstate = (attr->pta_flags & PT_FLAG_DETACHED);
1.1.2.1 nathanw 554:
555: return 0;
556: }
557:
558:
559: int
560: pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
561: {
1.1.2.2 nathanw 562: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 563: return EINVAL;
1.1.2.32 nathanw 564:
1.1.2.1 nathanw 565: switch (detachstate) {
566: case PTHREAD_CREATE_JOINABLE:
1.1.2.2 nathanw 567: attr->pta_flags &= ~PT_FLAG_DETACHED;
1.1.2.1 nathanw 568: break;
569: case PTHREAD_CREATE_DETACHED:
1.1.2.2 nathanw 570: attr->pta_flags |= PT_FLAG_DETACHED;
1.1.2.1 nathanw 571: break;
572: default:
573: return EINVAL;
574: }
575:
576: return 0;
577: }
578:
579:
580: int
1.1.2.32 nathanw 581: pthread_attr_setschedparam(pthread_attr_t *attr,
1.1.2.1 nathanw 582: const struct sched_param *param)
583: {
584:
1.1.2.2 nathanw 585: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 586: return EINVAL;
1.1.2.32 nathanw 587:
1.1.2.1 nathanw 588: if (param == NULL)
589: return EINVAL;
590:
591: if (param->sched_priority != 0)
592: return EINVAL;
593:
594: return 0;
595: }
596:
597:
598: int
599: pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param *param)
600: {
601:
1.1.2.2 nathanw 602: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 603: return EINVAL;
1.1.2.32 nathanw 604:
1.1.2.1 nathanw 605: if (param == NULL)
606: return EINVAL;
1.1.2.32 nathanw 607:
1.1.2.1 nathanw 608: param->sched_priority = 0;
609:
610: return 0;
611: }
612:
1.1.2.27 nathanw 613: /*
614: * XXX There should be a way for applications to use the efficent
1.1.2.32 nathanw 615: * inline version, but there are opacity/namespace issues.
1.1.2.1 nathanw 616: */
617:
618: pthread_t
619: pthread_self(void)
620: {
621: return pthread__self();
1.1.2.13 nathanw 622: }
623:
624:
625: int
626: pthread_cancel(pthread_t thread)
627: {
1.1.2.14 nathanw 628: pthread_t self;
629: int flags;
630:
1.1.2.21 nathanw 631: if (!(thread->pt_state == PT_STATE_RUNNING ||
1.1.2.32 nathanw 632: thread->pt_state == PT_STATE_RUNNABLE ||
1.1.2.14 nathanw 633: thread->pt_state == PT_STATE_BLOCKED_QUEUE ||
634: thread->pt_state == PT_STATE_BLOCKED_SYS))
635: return ESRCH;
636:
637: self = pthread__self();
638: flags = thread->pt_flags;
639:
640: flags |= PT_FLAG_CS_PENDING;
641: if ((flags & PT_FLAG_CS_DISABLED) == 0) {
642: thread->pt_cancel = 1;
643: pthread_spinlock(self, &thread->pt_statelock);
644: if (thread->pt_state == PT_STATE_BLOCKED_SYS) {
1.1.2.27 nathanw 645: /*
646: * It's sleeping in the kernel. If we can wake
1.1.2.16 nathanw 647: * it up, it will notice the cancellation when
648: * it returns. If it doesn't wake up when we
649: * make this call, then it's blocked
650: * uninterruptably in the kernel, and there's
1.1.2.32 nathanw 651: * not much to be done about it.
1.1.2.14 nathanw 652: */
653: _lwp_wakeup(thread->pt_blockedlwp);
654: } else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
1.1.2.27 nathanw 655: /*
656: * We're blocked somewhere (pthread__block()
1.1.2.16 nathanw 657: * was called. Cause it to wake up and the
1.1.2.14 nathanw 658: * caller will check for the cancellation.
659: */
660: pthread_spinlock(self, thread->pt_sleeplock);
1.1.2.32 nathanw 661: PTQ_REMOVE(thread->pt_sleepq, thread,
1.1.2.14 nathanw 662: pt_sleep);
663: pthread_spinunlock(self, thread->pt_sleeplock);
664: pthread__sched(self, thread);
665: } else {
1.1.2.27 nathanw 666: /*
667: * Nothing. The target thread is running and will
1.1.2.14 nathanw 668: * notice at the next deferred cancellation point.
669: */
670: }
671: pthread_spinunlock(self, &thread->pt_statelock);
672: }
673:
674: thread->pt_flags = flags;
675:
1.1.2.13 nathanw 676: return 0;
677: }
678:
679:
680:
681: int
682: pthread_setcancelstate(int state, int *oldstate)
683: {
684: pthread_t self;
685: int flags;
1.1.2.32 nathanw 686:
1.1.2.13 nathanw 687: self = pthread__self();
688: flags = self->pt_flags;
689:
690: if (oldstate != NULL) {
691: if (flags & PT_FLAG_CS_DISABLED)
692: *oldstate = PTHREAD_CANCEL_DISABLE;
693: else
694: *oldstate = PTHREAD_CANCEL_ENABLE;
695: }
696:
697: if (state == PTHREAD_CANCEL_DISABLE)
698: flags |= PT_FLAG_CS_DISABLED;
699: else if (state == PTHREAD_CANCEL_ENABLE) {
700: flags &= ~PT_FLAG_CS_DISABLED;
701: /*
702: * If a cancellation was requested while cancellation
703: * was disabled, note that fact for future
704: * cancellation tests.
705: */
706: if (flags & PT_FLAG_CS_PENDING) {
707: self->pt_cancel = 1;
708: /* This is not a deferred cancellation point. */
709: if (flags & PT_FLAG_CS_ASYNC)
1.1.2.14 nathanw 710: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 711: }
712: } else
713: return EINVAL;
1.1.2.32 nathanw 714:
1.1.2.13 nathanw 715: self->pt_flags = flags;
716:
717: return 0;
718: }
719:
720:
721: int
722: pthread_setcanceltype(int type, int *oldtype)
723: {
724: pthread_t self;
725: int flags;
1.1.2.32 nathanw 726:
1.1.2.13 nathanw 727: self = pthread__self();
728: flags = self->pt_flags;
729:
730: if (oldtype != NULL) {
731: if (flags & PT_FLAG_CS_ASYNC)
732: *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
733: else
734: *oldtype = PTHREAD_CANCEL_DEFERRED;
735: }
736:
737: if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
738: flags |= PT_FLAG_CS_ASYNC;
739: if (self->pt_cancel)
1.1.2.14 nathanw 740: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 741: } else if (type == PTHREAD_CANCEL_DEFERRED)
742: flags &= ~PT_FLAG_CS_ASYNC;
743: else
744: return EINVAL;
745:
746: self->pt_flags = flags;
747:
748: return 0;
749: }
750:
751:
752: void
753: pthread_testcancel()
754: {
755: pthread_t self;
1.1.2.32 nathanw 756:
1.1.2.13 nathanw 757: self = pthread__self();
1.1.2.14 nathanw 758: if (self->pt_cancel)
759: pthread_exit(PTHREAD_CANCELED);
1.1.2.31 nathanw 760: }
761:
762: /*
763: * POSIX requires that certain functions return an error rather than
764: * invoking undefined behavior even when handed completely bogus
765: * pthread_t values, e.g. stack garbage or (pthread_t)666. This
766: * utility routine searches the list of threads for the pthread_t
767: * value without dereferencing it.
768: */
769: int
770: pthread__find(pthread_t self, pthread_t id)
771: {
772: pthread_t target;
773:
774: pthread_spinlock(self, &allqueue_lock);
775: PTQ_FOREACH(target, &allqueue, pt_allq)
776: if (target == id)
777: break;
778: pthread_spinunlock(self, &allqueue_lock);
779:
780: if (target == NULL)
781: return ESRCH;
782:
783: return 0;
1.1.2.13 nathanw 784: }
785:
786:
787: void
788: pthread__testcancel(pthread_t self)
789: {
790:
791: if (self->pt_cancel)
1.1.2.14 nathanw 792: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 793: }
794:
795: void
796: pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
797: {
798: pthread_t self;
799: struct pt_clean_t *entry;
1.1.2.32 nathanw 800:
1.1.2.13 nathanw 801: self = pthread__self();
802: entry = store;
803: entry->ptc_cleanup = cleanup;
804: entry->ptc_arg = arg;
805: PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1.1.2.32 nathanw 806:
1.1.2.13 nathanw 807: }
808:
809: void
810: pthread__cleanup_pop(int ex, void *store)
811: {
812: pthread_t self;
813: struct pt_clean_t *entry;
1.1.2.32 nathanw 814:
1.1.2.13 nathanw 815: self = pthread__self();
816: entry = store;
1.1.2.32 nathanw 817:
1.1.2.13 nathanw 818: PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
819: if (ex)
820: (*entry->ptc_cleanup)(entry->ptc_arg);
1.1.2.32 nathanw 821:
1.1.2.13 nathanw 822: }
823:
824:
825: int *
826: pthread__errno(void)
827: {
828: pthread_t self;
1.1.2.32 nathanw 829:
1.1.2.13 nathanw 830: self = pthread__self();
831:
832: return &(self->pt_errno);
1.1.2.1 nathanw 833: }
CVSweb <webmaster@jp.NetBSD.org>