Annotation of src/lib/libpthread/pthread.c, Revision 1.1.2.19
1.1.2.19! nathanw 1: /* $NetBSD: pthread.c,v 1.1.2.18 2002/03/01 01:20:09 nathanw Exp $ */
1.1.2.3 nathanw 2:
3: /*-
4: * Copyright (c) 2001 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Nathan J. Williams.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
1.1.2.1 nathanw 38:
39: #include <assert.h>
40: #include <err.h>
41: #include <errno.h>
1.1.2.14 nathanw 42: #include <lwp.h>
1.1.2.1 nathanw 43: #include <signal.h>
44: #include <stdlib.h>
1.1.2.11 nathanw 45: #include <string.h>
1.1.2.1 nathanw 46: #include <ucontext.h>
1.1.2.19! nathanw 47: #include <sys/cdefs.h>
1.1.2.1 nathanw 48:
49: #include "sched.h"
50: #include "pthread.h"
51: #include "pthread_int.h"
52:
53:
54: static void pthread__create_tramp(void *(*start)(void *), void *arg);
55:
56: static pthread_attr_t pthread_default_attr;
57:
1.1.2.13 nathanw 58: pthread_spin_t allqueue_lock;
59: struct pthread_queue_t allqueue;
1.1.2.9 nathanw 60: static int nthreads;
1.1.2.1 nathanw 61:
1.1.2.13 nathanw 62: pthread_spin_t deadqueue_lock;
63: struct pthread_queue_t deadqueue;
64: struct pthread_queue_t reidlequeue;
1.1.2.1 nathanw 65:
66:
1.1.2.13 nathanw 67: extern struct pthread_queue_t runqueue;
68: extern struct pthread_queue_t idlequeue;
69: extern pthread_spin_t runqueue_lock;
1.1.2.1 nathanw 70:
71: static int started;
1.1.2.19! nathanw 72:
! 73: /* Aliases for use by libc */
! 74: __weak_alias(_libc_pthread_exit, pthread_exit)
! 75: __weak_alias(_libc_pthread__erno, pthread__errno)
1.1.2.1 nathanw 76:
1.1.2.8 nathanw 77: /* This needs to be started by the library loading code, before main()
78: * gets to run, for various things that use the state of the initial thread
79: * to work properly (thread-specific data is an application-visible example;
80: * spinlock counts for mutexes is an internal example).
81: */
82: void pthread_init(void)
1.1.2.1 nathanw 83: {
1.1.2.8 nathanw 84: pthread_t first;
85: extern int __isthreaded;
1.1.2.15 nathanw 86:
1.1.2.5 nathanw 87: #ifdef PTHREAD__DEBUG
88: pthread__debug_init();
89: #endif
1.1.2.1 nathanw 90: /* Basic data structure setup */
91: pthread_attr_init(&pthread_default_attr);
1.1.2.2 nathanw 92: PTQ_INIT(&allqueue);
93: PTQ_INIT(&deadqueue);
94: PTQ_INIT(&reidlequeue);
95: PTQ_INIT(&runqueue);
96: PTQ_INIT(&idlequeue);
1.1.2.14 nathanw 97:
1.1.2.1 nathanw 98: /* Create the thread structure corresponding to main() */
99: pthread__initmain(&first);
100: pthread__initthread(first);
101: sigprocmask(0, NULL, &first->pt_sigmask);
1.1.2.2 nathanw 102: PTQ_INSERT_HEAD(&allqueue, first, pt_allq);
1.1.2.1 nathanw 103:
1.1.2.16 nathanw 104: /* Start subsystems */
105: pthread__alarm_init();
106:
1.1.2.8 nathanw 107: /* Tell libc that we're here and it should role-play accordingly. */
108: __isthreaded = 1;
109:
110: }
111:
112: static void
113: pthread__start(void)
114: {
115: pthread_t self, idle;
116: int i, ret;
117:
118: self = pthread__self(); /* should be the "main()" thread */
119:
1.1.2.18 nathanw 120: /* Create idle threads */
1.1.2.4 nathanw 121: for (i = 0; i < NIDLETHREADS; i++) {
122: ret = pthread__stackalloc(&idle);
123: if (ret != 0)
124: err(1, "Couldn't allocate stack for idle thread!");
125: pthread__initthread(idle);
1.1.2.17 nathanw 126: sigfillset(&idle->pt_sigmask);
1.1.2.4 nathanw 127: PTQ_INSERT_HEAD(&allqueue, idle, pt_allq);
1.1.2.8 nathanw 128: pthread__sched_idle(self, idle);
1.1.2.4 nathanw 129: }
1.1.2.1 nathanw 130:
1.1.2.9 nathanw 131: nthreads = 1;
1.1.2.1 nathanw 132: /* Start up the SA subsystem */
133: pthread__sa_start();
134: }
135:
136: /* General-purpose thread data structure sanitization. */
137: void
138: pthread__initthread(pthread_t t)
139: {
140: t->pt_magic = PT_MAGIC;
141: t->pt_type = PT_THREAD_NORMAL;
142: t->pt_state = PT_STATE_RUNNABLE;
1.1.2.14 nathanw 143: pthread_lockinit(&t->pt_statelock);
1.1.2.1 nathanw 144: t->pt_spinlocks = 0;
145: t->pt_next = NULL;
146: t->pt_exitval = NULL;
147: t->pt_flags = 0;
1.1.2.13 nathanw 148: t->pt_cancel = 0;
149: t->pt_errno = 0;
1.1.2.1 nathanw 150: t->pt_parent = NULL;
151: t->pt_heldlock = NULL;
1.1.2.7 nathanw 152: t->pt_switchto = NULL;
153: t->pt_sleepuc = NULL;
1.1.2.1 nathanw 154: sigemptyset(&t->pt_siglist);
155: sigemptyset(&t->pt_sigmask);
1.1.2.14 nathanw 156: pthread_lockinit(&t->pt_siglock);
1.1.2.2 nathanw 157: PTQ_INIT(&t->pt_joiners);
1.1.2.14 nathanw 158: pthread_lockinit(&t->pt_join_lock);
1.1.2.13 nathanw 159: PTQ_INIT(&t->pt_cleanup_stack);
1.1.2.8 nathanw 160: memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
1.1.2.1 nathanw 161: #ifdef PTHREAD__DEBUG
162: t->blocks = 0;
163: t->preempts = 0;
164: t->rescheds = 0;
165: #endif
166: }
167:
168: int
169: pthread_create(pthread_t *thread, const pthread_attr_t *attr,
170: void *(*startfunc)(void *), void *arg)
171: {
172: pthread_t self, newthread;
173: pthread_attr_t nattr;
174: int ret;
175:
176: PTHREADD_ADD(PTHREADD_CREATE);
177: assert(thread != NULL);
178:
179: /* It's okay to check this without a lock because there can
1.1.2.8 nathanw 180: * only be one thread before it becomes true.
1.1.2.1 nathanw 181: */
182: if (started == 0) {
183: started = 1;
184: pthread__start();
185: }
186:
187: if (attr == NULL)
188: nattr = pthread_default_attr;
1.1.2.2 nathanw 189: else if (((attr != NULL) && (attr->pta_magic == PT_ATTR_MAGIC)))
1.1.2.1 nathanw 190: nattr = *attr;
191: else
192: return EINVAL;
193:
194:
195: self = pthread__self();
196:
197: /* 1. Set up a stack and allocate space for a pthread_st. */
198: ret = pthread__stackalloc(&newthread);
199: if (ret != 0)
200: return ret;
201:
202: /* 2. Set up state. */
203: pthread__initthread(newthread);
1.1.2.2 nathanw 204: newthread->pt_flags = nattr.pta_flags;
1.1.2.1 nathanw 205: newthread->pt_sigmask = self->pt_sigmask;
206:
207: /* 3. Set up context. */
208: /* The pt_uc pointer points to a location safely below the
209: * stack start; this is arranged by pthread__stackalloc().
210: */
1.1.2.12 nathanw 211: getcontext(newthread->pt_uc);
1.1.2.1 nathanw 212: newthread->pt_uc->uc_stack = newthread->pt_stack;
213: newthread->pt_uc->uc_link = NULL;
214: makecontext(newthread->pt_uc, pthread__create_tramp, 2,
215: startfunc, arg);
216:
1.1.2.9 nathanw 217: /* 4. Add to list of all threads. */
1.1.2.1 nathanw 218: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 219: PTQ_INSERT_HEAD(&allqueue, newthread, pt_allq);
1.1.2.9 nathanw 220: nthreads++;
1.1.2.1 nathanw 221: pthread_spinunlock(self, &allqueue_lock);
1.1.2.9 nathanw 222:
223: /* 5. Put on run queue. */
1.1.2.1 nathanw 224: pthread__sched(self, newthread);
225:
226: *thread = newthread;
227:
228: return 0;
229: }
230:
231: static void
232: pthread__create_tramp(void *(*start)(void *), void *arg)
233: {
234: void *retval;
235:
236: retval = start(arg);
237:
238: pthread_exit(retval);
239:
240: /* NOTREACHED */
241: }
242:
243:
244: /*
245: * Other threads will switch to the idle thread so that they
246: * can dispose of any awkward locks or recycle upcall state.
247: */
248: void
249: pthread__idle(void)
250: {
251: pthread_t self;
252:
253: PTHREADD_ADD(PTHREADD_IDLE);
254: self = pthread__self();
255:
256: /* The drill here is that we want to yield the processor,
257: * but for the thread itself to be recovered, we need to be on
258: * a list somewhere for the thread system to know about us.
259: */
260: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 261: PTQ_INSERT_TAIL(&reidlequeue, self, pt_runq);
1.1.2.1 nathanw 262: self->pt_flags |= PT_FLAG_IDLED;
263: pthread_spinunlock(self, &deadqueue_lock);
264:
265: /*
266: * If we get to run this, then no preemption has happened
267: * (because the upcall handler will not contiune an idle thread with
268: * PT_FLAG_IDLED set), and so we can yield the processor safely.
269: */
270: sa_yield();
271: }
272:
273:
274: void
275: pthread_exit(void *retval)
276: {
277: pthread_t self, joiner;
1.1.2.13 nathanw 278: struct pt_clean_t *cleanup;
1.1.2.9 nathanw 279: int nt;
1.1.2.1 nathanw 280:
281: self = pthread__self();
1.1.2.8 nathanw 282:
1.1.2.14 nathanw 283: /* Disable cancellability. */
284: self->pt_flags |= PT_FLAG_CS_DISABLED;
285:
1.1.2.13 nathanw 286: /* Call any cancellation cleanup handlers */
287: while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
288: cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
289: PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
290: (*cleanup->ptc_cleanup)(cleanup->ptc_arg);
291: }
292:
1.1.2.8 nathanw 293: /* Perform cleanup of thread-specific data */
294: pthread__destroy_tsd(self);
1.1.2.1 nathanw 295:
296: self->pt_exitval = retval;
297:
298: pthread_spinlock(self, &self->pt_join_lock);
299: if (self->pt_flags & PT_FLAG_DETACHED) {
300: pthread_spinunlock(self, &self->pt_join_lock);
301:
302: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 303: PTQ_REMOVE(&allqueue, self, pt_allq);
1.1.2.9 nathanw 304: nthreads--;
305: nt = nthreads;
1.1.2.1 nathanw 306: pthread_spinunlock(self, &allqueue_lock);
307:
308: self->pt_state = PT_STATE_DEAD;
1.1.2.9 nathanw 309: if (nt == 0) {
310: /* Whoah, we're the last one. Time to go. */
311: exit(0);
312: }
313:
1.1.2.1 nathanw 314: /* Yeah, yeah, doing work while we're dead is tacky. */
315: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 316: PTQ_INSERT_HEAD(&deadqueue, self, pt_allq);
1.1.2.1 nathanw 317: pthread__block(self, &deadqueue_lock);
318: } else {
1.1.2.9 nathanw 319: pthread_spinlock(self, &allqueue_lock);
320: nthreads--;
321: nt = nthreads;
322: pthread_spinunlock(self, &allqueue_lock);
1.1.2.1 nathanw 323: self->pt_state = PT_STATE_ZOMBIE;
1.1.2.9 nathanw 324: if (nt == 0) {
325: /* Whoah, we're the last one. Time to go. */
326: exit(0);
327: }
1.1.2.1 nathanw 328: /* Wake up all the potential joiners. Only one can win.
329: * (Can you say "Thundering Herd"? I knew you could.)
330: */
1.1.2.2 nathanw 331: PTQ_FOREACH(joiner, &self->pt_joiners, pt_sleep)
1.1.2.1 nathanw 332: pthread__sched(self, joiner);
333: pthread__block(self, &self->pt_join_lock);
334: }
335:
336:
337: /* NOTREACHED */
338: assert(0);
1.1.2.8 nathanw 339: exit(1);
1.1.2.1 nathanw 340: }
341:
342:
343: int
344: pthread_join(pthread_t thread, void **valptr)
345: {
346: pthread_t self;
347:
348: if ((thread == NULL) || (thread->pt_magic != PT_MAGIC))
349: return EINVAL;
350:
351: self = pthread__self();
1.1.2.14 nathanw 352:
353: if (thread == self)
354: return EDEADLK;
355:
1.1.2.1 nathanw 356: pthread_spinlock(self, &thread->pt_join_lock);
357:
358: if (thread->pt_flags & PT_FLAG_DETACHED) {
359: pthread_spinunlock(self, &thread->pt_join_lock);
360: return EINVAL;
361: }
362:
363: if ((thread->pt_state != PT_STATE_ZOMBIE) &&
364: (thread->pt_state != PT_STATE_DEAD)) {
365: /*
366: * "I'm not dead yet!"
367: * "You will be soon enough."
368: */
1.1.2.14 nathanw 369: pthread_spinlock(self, &self->pt_statelock);
370: if (self->pt_cancel) {
371: pthread_spinunlock(self, &self->pt_statelock);
372: pthread_spinunlock(self, &thread->pt_join_lock);
373: pthread_exit(PTHREAD_CANCELED);
374: }
375: self->pt_state = PT_STATE_BLOCKED_QUEUE;
376: pthread_spinunlock(self, &self->pt_statelock);
1.1.2.1 nathanw 377:
1.1.2.14 nathanw 378: PTQ_INSERT_TAIL(&thread->pt_joiners, self, pt_sleep);
1.1.2.1 nathanw 379: pthread__block(self, &thread->pt_join_lock);
380: pthread_spinlock(self, &thread->pt_join_lock);
381: }
382:
383: if ((thread->pt_state == PT_STATE_DEAD) ||
384: (thread->pt_flags & PT_FLAG_DETACHED)) {
385: /* Someone beat us to the join, or called pthread_detach(). */
386: pthread_spinunlock(self, &thread->pt_join_lock);
387: return ESRCH;
388: }
389:
390: /* All ours. */
391: thread->pt_state = PT_STATE_DEAD;
1.1.2.6 nathanw 392: pthread_spinunlock(self, &thread->pt_join_lock);
1.1.2.1 nathanw 393:
394: if (valptr != NULL)
395: *valptr = thread->pt_exitval;
396:
397: /* Cleanup time. Move the dead thread from allqueue to the deadqueue */
398: pthread_spinlock(self, &allqueue_lock);
1.1.2.2 nathanw 399: PTQ_REMOVE(&allqueue, thread, pt_allq);
1.1.2.1 nathanw 400: pthread_spinunlock(self, &allqueue_lock);
401:
402: pthread_spinlock(self, &deadqueue_lock);
1.1.2.2 nathanw 403: PTQ_INSERT_HEAD(&deadqueue, thread, pt_allq);
1.1.2.1 nathanw 404: pthread_spinunlock(self, &deadqueue_lock);
405:
406: return 0;
407: }
408:
409: int
410: pthread_equal(pthread_t t1, pthread_t t2)
411: {
412:
413: /* Nothing special here. */
414: return (t1 == t2);
415: }
416:
417: int
418: pthread_detach(pthread_t thread)
419: {
420: pthread_t self, joiner;
421:
422: if ((thread == NULL) || (thread->pt_magic != PT_MAGIC))
423: return EINVAL;
424:
425: self = pthread__self();
426: pthread_spinlock(self, &thread->pt_join_lock);
427:
428: if (thread->pt_flags & PT_FLAG_DETACHED) {
429: pthread_spinunlock(self, &thread->pt_join_lock);
430: return EINVAL;
431: }
432:
433: thread->pt_flags |= PT_FLAG_DETACHED;
434:
435: /* Any joiners have to be punted now. */
1.1.2.2 nathanw 436: PTQ_FOREACH(joiner, &thread->pt_joiners, pt_sleep)
1.1.2.1 nathanw 437: pthread__sched(self, joiner);
438:
439: pthread_spinunlock(self, &thread->pt_join_lock);
440:
441: return 0;
442: }
443:
444: int
445: sched_yield(void)
446: {
447: /* XXX implement me */
448: return 0;
449: }
450:
451:
452:
453: int
454: pthread_attr_init(pthread_attr_t *attr)
455: {
456:
1.1.2.2 nathanw 457: attr->pta_magic = PT_ATTR_MAGIC;
458: attr->pta_flags = 0;
1.1.2.1 nathanw 459:
460: return 0;
461: }
462:
463:
464: int
465: pthread_attr_destroy(pthread_attr_t *attr)
466: {
467:
468: return 0;
469: }
470:
471:
472: int
473: pthread_attr_getdetachstate(pthread_attr_t *attr, int *detachstate)
474: {
475:
1.1.2.2 nathanw 476: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 477: return EINVAL;
478:
1.1.2.2 nathanw 479: *detachstate = (attr->pta_flags & PT_FLAG_DETACHED);
1.1.2.1 nathanw 480:
481: return 0;
482: }
483:
484:
485: int
486: pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
487: {
1.1.2.2 nathanw 488: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 489: return EINVAL;
490:
491: switch (detachstate) {
492: case PTHREAD_CREATE_JOINABLE:
1.1.2.2 nathanw 493: attr->pta_flags &= ~PT_FLAG_DETACHED;
1.1.2.1 nathanw 494: break;
495: case PTHREAD_CREATE_DETACHED:
1.1.2.2 nathanw 496: attr->pta_flags |= PT_FLAG_DETACHED;
1.1.2.1 nathanw 497: break;
498: default:
499: return EINVAL;
500: }
501:
502: return 0;
503: }
504:
505:
506: int
507: pthread_attr_setschedparam(pthread_attr_t *attr,
508: const struct sched_param *param)
509: {
510:
1.1.2.2 nathanw 511: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 512: return EINVAL;
513:
514: if (param == NULL)
515: return EINVAL;
516:
517: if (param->sched_priority != 0)
518: return EINVAL;
519:
520: return 0;
521: }
522:
523:
524: int
525: pthread_attr_getschedparam(pthread_attr_t *attr, struct sched_param *param)
526: {
527:
1.1.2.2 nathanw 528: if ((attr == NULL) || (attr->pta_magic != PT_ATTR_MAGIC))
1.1.2.1 nathanw 529: return EINVAL;
530:
531: if (param == NULL)
532: return EINVAL;
533:
534: param->sched_priority = 0;
535:
536: return 0;
537: }
538:
539: /* XXX There should be a way for applications to use the efficent
540: * inline version, but there are opacity/namespace issues.
541: */
542:
543: pthread_t
544: pthread_self(void)
545: {
546: return pthread__self();
1.1.2.13 nathanw 547: }
548:
549:
550: int
551: pthread_cancel(pthread_t thread)
552: {
1.1.2.14 nathanw 553: pthread_t self;
554: int flags;
555:
556: if (!(thread->pt_state == PT_STATE_RUNNABLE ||
557: thread->pt_state == PT_STATE_BLOCKED_QUEUE ||
558: thread->pt_state == PT_STATE_BLOCKED_SYS))
559: return ESRCH;
560:
561: self = pthread__self();
562: flags = thread->pt_flags;
563:
564: flags |= PT_FLAG_CS_PENDING;
565: if ((flags & PT_FLAG_CS_DISABLED) == 0) {
566: thread->pt_cancel = 1;
567: pthread_spinlock(self, &thread->pt_statelock);
568: if (thread->pt_state == PT_STATE_BLOCKED_SYS) {
1.1.2.16 nathanw 569: /* It's sleeping in the kernel. If we can wake
570: * it up, it will notice the cancellation when
571: * it returns. If it doesn't wake up when we
572: * make this call, then it's blocked
573: * uninterruptably in the kernel, and there's
574: * not much to be done about it.
1.1.2.14 nathanw 575: */
576: _lwp_wakeup(thread->pt_blockedlwp);
577: } else if (thread->pt_state == PT_STATE_BLOCKED_QUEUE) {
578: /* We're blocked somewhere (pthread__block()
1.1.2.16 nathanw 579: * was called. Cause it to wake up and the
1.1.2.14 nathanw 580: * caller will check for the cancellation.
581: */
582: pthread_spinlock(self, thread->pt_sleeplock);
583: PTQ_REMOVE(thread->pt_sleepq, thread,
584: pt_sleep);
585: pthread_spinunlock(self, thread->pt_sleeplock);
586: pthread__sched(self, thread);
587: } else {
588: /* Nothing. The target thread is running and will
589: * notice at the next deferred cancellation point.
590: */
591: }
592: pthread_spinunlock(self, &thread->pt_statelock);
593: }
594:
595: thread->pt_flags = flags;
596:
1.1.2.13 nathanw 597: return 0;
598: }
599:
600:
601:
602: int
603: pthread_setcancelstate(int state, int *oldstate)
604: {
605: pthread_t self;
606: int flags;
607:
608: self = pthread__self();
609: flags = self->pt_flags;
610:
611: if (oldstate != NULL) {
612: if (flags & PT_FLAG_CS_DISABLED)
613: *oldstate = PTHREAD_CANCEL_DISABLE;
614: else
615: *oldstate = PTHREAD_CANCEL_ENABLE;
616: }
617:
618: if (state == PTHREAD_CANCEL_DISABLE)
619: flags |= PT_FLAG_CS_DISABLED;
620: else if (state == PTHREAD_CANCEL_ENABLE) {
621: flags &= ~PT_FLAG_CS_DISABLED;
622: /*
623: * If a cancellation was requested while cancellation
624: * was disabled, note that fact for future
625: * cancellation tests.
626: */
627: if (flags & PT_FLAG_CS_PENDING) {
628: self->pt_cancel = 1;
629: /* This is not a deferred cancellation point. */
630: if (flags & PT_FLAG_CS_ASYNC)
1.1.2.14 nathanw 631: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 632: }
633: } else
634: return EINVAL;
635:
636: self->pt_flags = flags;
637:
638: return 0;
639: }
640:
641:
642: int
643: pthread_setcanceltype(int type, int *oldtype)
644: {
645: pthread_t self;
646: int flags;
647:
648: self = pthread__self();
649: flags = self->pt_flags;
650:
651: if (oldtype != NULL) {
652: if (flags & PT_FLAG_CS_ASYNC)
653: *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
654: else
655: *oldtype = PTHREAD_CANCEL_DEFERRED;
656: }
657:
658: if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
659: flags |= PT_FLAG_CS_ASYNC;
660: if (self->pt_cancel)
1.1.2.14 nathanw 661: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 662: } else if (type == PTHREAD_CANCEL_DEFERRED)
663: flags &= ~PT_FLAG_CS_ASYNC;
664: else
665: return EINVAL;
666:
667: self->pt_flags = flags;
668:
669: return 0;
670: }
671:
672:
673: void
674: pthread_testcancel()
675: {
676: pthread_t self;
677:
678: self = pthread__self();
1.1.2.14 nathanw 679: if (self->pt_cancel)
680: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 681: }
682:
683:
684: void
685: pthread__testcancel(pthread_t self)
686: {
687:
688: if (self->pt_cancel)
1.1.2.14 nathanw 689: pthread_exit(PTHREAD_CANCELED);
1.1.2.13 nathanw 690: }
691:
692: void
693: pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
694: {
695: pthread_t self;
696: struct pt_clean_t *entry;
697:
698: self = pthread__self();
699: entry = store;
700: entry->ptc_cleanup = cleanup;
701: entry->ptc_arg = arg;
702: PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
703:
704: }
705:
706: void
707: pthread__cleanup_pop(int ex, void *store)
708: {
709: pthread_t self;
710: struct pt_clean_t *entry;
711:
712: self = pthread__self();
713: entry = store;
714:
715: PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
716: if (ex)
717: (*entry->ptc_cleanup)(entry->ptc_arg);
718:
719: }
720:
721:
722: int *
723: pthread__errno(void)
724: {
725: pthread_t self;
726:
727: self = pthread__self();
728:
729: return &(self->pt_errno);
1.1.2.1 nathanw 730: }
CVSweb <webmaster@jp.NetBSD.org>