Annotation of src/lib/libpthread/pthread.c, Revision 1.87
1.87 ! ad 1: /* $NetBSD: pthread.c,v 1.86 2007/11/07 00:55:22 ad Exp $ */
1.2 thorpej 2:
3: /*-
1.62 ad 4: * Copyright (c) 2001, 2002, 2003, 2006, 2007 The NetBSD Foundation, Inc.
1.2 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.49 ad 8: * by Nathan J. Williams and Andrew Doran.
1.2 thorpej 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
1.14 lukem 39: #include <sys/cdefs.h>
1.87 ! ad 40: __RCSID("$NetBSD: pthread.c,v 1.86 2007/11/07 00:55:22 ad Exp $");
1.76 ad 41:
42: #define __EXPOSE_STACK 1
43:
44: #include <sys/param.h>
45: #include <sys/mman.h>
46: #include <sys/sysctl.h>
1.14 lukem 47:
1.2 thorpej 48: #include <err.h>
49: #include <errno.h>
50: #include <lwp.h>
51: #include <signal.h>
1.9 nathanw 52: #include <stdio.h>
1.2 thorpej 53: #include <stdlib.h>
54: #include <string.h>
1.24 nathanw 55: #include <syslog.h>
1.2 thorpej 56: #include <ucontext.h>
1.9 nathanw 57: #include <unistd.h>
1.76 ad 58: #include <sched.h>
1.2 thorpej 59:
60: #include "pthread.h"
61: #include "pthread_int.h"
62:
1.84 ad 63: pthread_rwlock_t pthread__alltree_lock = PTHREAD_RWLOCK_INITIALIZER;
64: RB_HEAD(__pthread__alltree, __pthread_st) pthread__alltree;
65:
66: #ifndef lint
67: static int pthread__cmp(struct __pthread_st *, struct __pthread_st *);
68: RB_PROTOTYPE_STATIC(__pthread__alltree, __pthread_st, pt_alltree, pthread__cmp)
69: #endif
70:
1.69 ad 71: static void pthread__create_tramp(void *(*)(void *), void *);
72: static void pthread__initthread(pthread_t);
1.77 ad 73: static void pthread__scrubthread(pthread_t, char *, int);
1.76 ad 74: static int pthread__stackid_setup(void *, size_t, pthread_t *);
75: static int pthread__stackalloc(pthread_t *);
76: static void pthread__initmain(pthread_t *);
1.2 thorpej 77:
1.87 ! ad 78: void pthread__init(void);
! 79:
1.2 thorpej 80: int pthread__started;
81:
1.82 ad 82: pthread_mutex_t pthread__deadqueue_lock = PTHREAD_MUTEX_INITIALIZER;
1.69 ad 83: pthread_queue_t pthread__deadqueue;
1.85 ad 84: pthread_queue_t pthread__allqueue;
1.2 thorpej 85:
86: static pthread_attr_t pthread_default_attr;
87:
1.24 nathanw 88: enum {
89: DIAGASSERT_ABORT = 1<<0,
90: DIAGASSERT_STDERR = 1<<1,
91: DIAGASSERT_SYSLOG = 1<<2
92: };
1.17 nathanw 93:
1.24 nathanw 94: static int pthread__diagassert = DIAGASSERT_ABORT | DIAGASSERT_STDERR;
1.17 nathanw 95:
1.74 ad 96: int pthread__concurrency;
97: int pthread__nspins;
1.52 ad 98: int pthread__unpark_max = PTHREAD__UNPARK_MAX;
1.73 ad 99: int pthread__osrev;
1.33 cl 100:
1.76 ad 101: /*
102: * We have to initialize the pthread_stack* variables here because
103: * mutexes are used before pthread_init() and thus pthread__initmain()
104: * are called. Since mutexes only save the stack pointer and not a
105: * pointer to the thread data, it is safe to change the mapping from
106: * stack pointer to thread data afterwards.
107: */
108: #define _STACKSIZE_LG 18
109: int pthread__stacksize_lg = _STACKSIZE_LG;
110: size_t pthread__stacksize = 1 << _STACKSIZE_LG;
111: vaddr_t pthread__stackmask = (1 << _STACKSIZE_LG) - 1;
1.84 ad 112: vaddr_t pthread__threadmask = (vaddr_t)~((1 << _STACKSIZE_LG) - 1);
1.76 ad 113: #undef _STACKSIZE_LG
114:
1.48 drochner 115: int _sys___sigprocmask14(int, const sigset_t *, sigset_t *);
116:
1.2 thorpej 117: __strong_alias(__libc_thr_self,pthread_self)
1.5 thorpej 118: __strong_alias(__libc_thr_create,pthread_create)
119: __strong_alias(__libc_thr_exit,pthread_exit)
1.2 thorpej 120: __strong_alias(__libc_thr_errno,pthread__errno)
1.23 nathanw 121: __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
1.87 ! ad 122: __strong_alias(__libc_thr_init,pthread__init)
1.2 thorpej 123:
124: /*
125: * Static library kludge. Place a reference to a symbol any library
126: * file which does not already have a reference here.
127: */
128: extern int pthread__cancel_stub_binder;
129:
130: void *pthread__static_lib_binder[] = {
131: &pthread__cancel_stub_binder,
132: pthread_cond_init,
133: pthread_mutex_init,
134: pthread_rwlock_init,
135: pthread_barrier_init,
136: pthread_key_create,
1.29 nathanw 137: pthread_setspecific,
1.2 thorpej 138: };
139:
140: /*
141: * This needs to be started by the library loading code, before main()
142: * gets to run, for various things that use the state of the initial thread
143: * to work properly (thread-specific data is an application-visible example;
144: * spinlock counts for mutexes is an internal example).
145: */
146: void
1.87 ! ad 147: pthread__init(void)
1.2 thorpej 148: {
149: pthread_t first;
1.24 nathanw 150: char *p;
1.77 ad 151: int i, mib[2];
1.33 cl 152: size_t len;
1.2 thorpej 153: extern int __isthreaded;
154:
1.33 cl 155: mib[0] = CTL_HW;
156: mib[1] = HW_NCPU;
157:
1.77 ad 158: len = sizeof(pthread__concurrency);
159: if (sysctl(mib, 2, &pthread__concurrency, &len, NULL, 0) == -1)
1.66 ad 160: err(1, "sysctl(hw.ncpu");
1.33 cl 161:
1.72 ad 162: mib[0] = CTL_KERN;
163: mib[1] = KERN_OSREV;
164:
165: len = sizeof(pthread__osrev);
166: if (sysctl(mib, 2, &pthread__osrev, &len, NULL, 0) == -1)
167: err(1, "sysctl(hw.osrevision");
168:
1.2 thorpej 169: /* Initialize locks first; they're needed elsewhere. */
1.77 ad 170: pthread__lockprim_init();
1.33 cl 171:
1.77 ad 172: /* Fetch parameters. */
1.59 ad 173: i = (int)_lwp_unpark_all(NULL, 0, NULL);
174: if (i == -1)
175: err(1, "_lwp_unpark_all");
1.52 ad 176: if (i < pthread__unpark_max)
177: pthread__unpark_max = i;
1.2 thorpej 178:
179: /* Basic data structure setup */
180: pthread_attr_init(&pthread_default_attr);
1.85 ad 181: PTQ_INIT(&pthread__allqueue);
1.2 thorpej 182: PTQ_INIT(&pthread__deadqueue);
1.84 ad 183: RB_INIT(&pthread__alltree);
1.76 ad 184:
1.2 thorpej 185: /* Create the thread structure corresponding to main() */
186: pthread__initmain(&first);
1.69 ad 187: pthread__initthread(first);
1.77 ad 188: pthread__scrubthread(first, NULL, 0);
1.49 ad 189:
190: first->pt_lid = _lwp_self();
1.85 ad 191: PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
1.84 ad 192: RB_INSERT(__pthread__alltree, &pthread__alltree, first);
1.2 thorpej 193:
194: /* Start subsystems */
195: PTHREAD_MD_INIT
1.77 ad 196: pthread__debug_init();
1.2 thorpej 197:
1.87 ! ad 198: for (p = pthread__getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
1.24 nathanw 199: switch (*p) {
200: case 'a':
201: pthread__diagassert |= DIAGASSERT_ABORT;
202: break;
203: case 'A':
204: pthread__diagassert &= ~DIAGASSERT_ABORT;
205: break;
206: case 'e':
207: pthread__diagassert |= DIAGASSERT_STDERR;
208: break;
209: case 'E':
210: pthread__diagassert &= ~DIAGASSERT_STDERR;
211: break;
212: case 'l':
213: pthread__diagassert |= DIAGASSERT_SYSLOG;
214: break;
215: case 'L':
216: pthread__diagassert &= ~DIAGASSERT_SYSLOG;
217: break;
218: }
1.17 nathanw 219: }
220:
1.24 nathanw 221:
1.2 thorpej 222: /* Tell libc that we're here and it should role-play accordingly. */
223: __isthreaded = 1;
224: }
225:
1.16 nathanw 226: static void
227: pthread__child_callback(void)
228: {
229: /*
230: * Clean up data structures that a forked child process might
231: * trip over. Note that if threads have been created (causing
232: * this handler to be registered) the standards say that the
233: * child will trigger undefined behavior if it makes any
234: * pthread_* calls (or any other calls that aren't
235: * async-signal-safe), so we don't really have to clean up
236: * much. Anything that permits some pthread_* calls to work is
237: * merely being polite.
238: */
239: pthread__started = 0;
240: }
1.2 thorpej 241:
1.45 chs 242: static void
1.2 thorpej 243: pthread__start(void)
244: {
1.18 nathanw 245:
246: /*
247: * Per-process timers are cleared by fork(); despite the
248: * various restrictions on fork() and threads, it's legal to
249: * fork() before creating any threads.
250: */
1.16 nathanw 251: pthread_atfork(NULL, NULL, pthread__child_callback);
1.2 thorpej 252: }
253:
254:
255: /* General-purpose thread data structure sanitization. */
1.69 ad 256: /* ARGSUSED */
257: static void
258: pthread__initthread(pthread_t t)
1.2 thorpej 259: {
260:
1.87 ! ad 261: t->pt_self = t;
1.2 thorpej 262: t->pt_magic = PT_MAGIC;
1.72 ad 263: t->pt_willpark = 0;
264: t->pt_unpark = 0;
265: t->pt_sleeponq = 0;
1.81 ad 266: t->pt_nwaiters = 0;
1.72 ad 267: t->pt_sleepobj = NULL;
268: t->pt_signalled = 0;
1.77 ad 269: t->pt_havespecific = 0;
1.81 ad 270: t->pt_early = NULL;
1.49 ad 271:
1.87 ! ad 272: memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops));
1.82 ad 273: pthread_mutex_init(&t->pt_lock, NULL);
1.2 thorpej 274: PTQ_INIT(&t->pt_cleanup_stack);
1.71 ad 275: PTQ_INIT(&t->pt_joiners);
1.2 thorpej 276: memset(&t->pt_specific, 0, sizeof(int) * PTHREAD_KEYS_MAX);
277: }
278:
1.77 ad 279: static void
280: pthread__scrubthread(pthread_t t, char *name, int flags)
281: {
282:
283: t->pt_state = PT_STATE_RUNNING;
284: t->pt_exitval = NULL;
285: t->pt_flags = flags;
286: t->pt_cancel = 0;
287: t->pt_errno = 0;
288: t->pt_name = name;
289: t->pt_lid = 0;
290: }
291:
1.2 thorpej 292:
293: int
294: pthread_create(pthread_t *thread, const pthread_attr_t *attr,
295: void *(*startfunc)(void *), void *arg)
296: {
1.78 ad 297: pthread_t newthread;
1.2 thorpej 298: pthread_attr_t nattr;
1.11 thorpej 299: struct pthread_attr_private *p;
1.56 christos 300: char * volatile name;
1.75 ad 301: unsigned long flag;
302: int ret;
1.2 thorpej 303:
304: PTHREADD_ADD(PTHREADD_CREATE);
305:
306: /*
307: * It's okay to check this without a lock because there can
308: * only be one thread before it becomes true.
309: */
310: if (pthread__started == 0) {
311: pthread__start();
312: pthread__started = 1;
313: }
314:
315: if (attr == NULL)
316: nattr = pthread_default_attr;
1.7 drochner 317: else if (attr->pta_magic == PT_ATTR_MAGIC)
1.2 thorpej 318: nattr = *attr;
319: else
320: return EINVAL;
321:
1.11 thorpej 322: /* Fetch misc. attributes from the attr structure. */
1.12 nathanw 323: name = NULL;
324: if ((p = nattr.pta_private) != NULL)
325: if (p->ptap_name[0] != '\0')
1.11 thorpej 326: if ((name = strdup(p->ptap_name)) == NULL)
327: return ENOMEM;
1.2 thorpej 328:
1.70 ad 329: newthread = NULL;
1.2 thorpej 330:
1.77 ad 331: /*
332: * Try to reclaim a dead thread.
333: */
1.70 ad 334: if (!PTQ_EMPTY(&pthread__deadqueue)) {
1.82 ad 335: pthread_mutex_lock(&pthread__deadqueue_lock);
1.70 ad 336: newthread = PTQ_FIRST(&pthread__deadqueue);
337: if (newthread != NULL) {
1.77 ad 338: PTQ_REMOVE(&pthread__deadqueue, newthread, pt_deadq);
1.82 ad 339: pthread_mutex_unlock(&pthread__deadqueue_lock);
1.70 ad 340: if ((newthread->pt_flags & PT_FLAG_DETACHED) != 0) {
341: /* Still running? */
342: if (_lwp_kill(newthread->pt_lid, 0) == 0 ||
343: errno != ESRCH) {
1.82 ad 344: pthread_mutex_lock(
1.77 ad 345: &pthread__deadqueue_lock);
1.70 ad 346: PTQ_INSERT_TAIL(&pthread__deadqueue,
1.77 ad 347: newthread, pt_deadq);
1.82 ad 348: pthread_mutex_unlock(
1.77 ad 349: &pthread__deadqueue_lock);
1.70 ad 350: newthread = NULL;
351: }
1.69 ad 352: }
353: } else
1.82 ad 354: pthread_mutex_unlock(&pthread__deadqueue_lock);
1.60 yamt 355: }
1.70 ad 356:
1.77 ad 357: /*
358: * If necessary set up a stack, allocate space for a pthread_st,
359: * and initialize it.
360: */
1.60 yamt 361: if (newthread == NULL) {
1.2 thorpej 362: ret = pthread__stackalloc(&newthread);
1.31 christos 363: if (ret != 0) {
364: if (name)
365: free(name);
1.2 thorpej 366: return ret;
1.31 christos 367: }
1.75 ad 368:
1.77 ad 369: /* This is used only when creating the thread. */
1.75 ad 370: _INITCONTEXT_U(&newthread->pt_uc);
371: #ifdef PTHREAD_MACHINE_HAS_ID_REGISTER
372: pthread__uc_id(&newthread->pt_uc) = newthread;
373: #endif
374: newthread->pt_uc.uc_stack = newthread->pt_stack;
375: newthread->pt_uc.uc_link = NULL;
1.77 ad 376:
377: /* Add to list of all threads. */
1.84 ad 378: pthread_rwlock_wrlock(&pthread__alltree_lock);
1.85 ad 379: PTQ_INSERT_TAIL(&pthread__allqueue, newthread, pt_allq);
1.84 ad 380: RB_INSERT(__pthread__alltree, &pthread__alltree, newthread);
381: pthread_rwlock_unlock(&pthread__alltree_lock);
1.77 ad 382:
383: /* Will be reset by the thread upon exit. */
384: pthread__initthread(newthread);
1.2 thorpej 385: }
386:
1.77 ad 387: /*
388: * Create the new LWP.
389: */
390: pthread__scrubthread(newthread, name, nattr.pta_flags);
1.75 ad 391: makecontext(&newthread->pt_uc, pthread__create_tramp, 2,
1.2 thorpej 392: startfunc, arg);
393:
1.52 ad 394: flag = 0;
395: if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0)
396: flag |= LWP_SUSPENDED;
397: if ((newthread->pt_flags & PT_FLAG_DETACHED) != 0)
398: flag |= LWP_DETACHED;
1.75 ad 399: ret = _lwp_create(&newthread->pt_uc, flag, &newthread->pt_lid);
1.49 ad 400: if (ret != 0) {
401: free(name);
1.77 ad 402: newthread->pt_state = PT_STATE_DEAD;
1.82 ad 403: pthread_mutex_lock(&pthread__deadqueue_lock);
1.77 ad 404: PTQ_INSERT_HEAD(&pthread__deadqueue, newthread, pt_deadq);
1.82 ad 405: pthread_mutex_unlock(&pthread__deadqueue_lock);
1.49 ad 406: return ret;
407: }
408:
1.2 thorpej 409: *thread = newthread;
410:
411: return 0;
412: }
413:
414:
415: static void
416: pthread__create_tramp(void *(*start)(void *), void *arg)
417: {
1.86 ad 418: pthread_t self;
1.2 thorpej 419: void *retval;
420:
1.69 ad 421: /*
422: * Throw away some stack in a feeble attempt to reduce cache
423: * thrash. May help for SMT processors. XXX We should not
424: * be allocating stacks on fixed 2MB boundaries. Needs a
1.86 ad 425: * thread register or decent thread local storage. Note
426: * that pt_lid may not be set by this point, but we don't
427: * care.
1.69 ad 428: */
1.86 ad 429: self = pthread__self();
430: (void)alloca(((unsigned)self->pt_lid & 7) << 8);
431:
432: if (self->pt_name != NULL) {
433: pthread_mutex_lock(&self->pt_lock);
434: if (self->pt_name != NULL)
435: (void)_lwp_setname(_lwp_self(), self->pt_name);
436: pthread_mutex_unlock(&self->pt_lock);
437: }
1.69 ad 438:
1.35 chs 439: retval = (*start)(arg);
1.2 thorpej 440:
441: pthread_exit(retval);
442:
1.19 christos 443: /*NOTREACHED*/
444: pthread__abort();
1.30 christos 445: }
446:
447: int
448: pthread_suspend_np(pthread_t thread)
449: {
1.44 chs 450: pthread_t self;
451:
452: self = pthread__self();
1.30 christos 453: if (self == thread) {
454: return EDEADLK;
455: }
1.44 chs 456: #ifdef ERRORCHECK
1.78 ad 457: if (pthread__find(thread) != 0)
1.44 chs 458: return ESRCH;
459: #endif
1.79 ad 460: if (_lwp_suspend(thread->pt_lid) == 0)
461: return 0;
462: return errno;
1.30 christos 463: }
464:
465: int
466: pthread_resume_np(pthread_t thread)
467: {
1.78 ad 468:
1.44 chs 469: #ifdef ERRORCHECK
1.78 ad 470: if (pthread__find(thread) != 0)
1.44 chs 471: return ESRCH;
472: #endif
1.79 ad 473: if (_lwp_continue(thread->pt_lid) == 0)
474: return 0;
475: return errno;
1.2 thorpej 476: }
477:
478: void
479: pthread_exit(void *retval)
480: {
1.8 nathanw 481: pthread_t self;
1.2 thorpej 482: struct pt_clean_t *cleanup;
1.11 thorpej 483: char *name;
1.2 thorpej 484:
485: self = pthread__self();
486:
487: /* Disable cancellability. */
1.82 ad 488: pthread_mutex_lock(&self->pt_lock);
1.2 thorpej 489: self->pt_flags |= PT_FLAG_CS_DISABLED;
1.10 nathanw 490: self->pt_cancel = 0;
1.82 ad 491: pthread_mutex_unlock(&self->pt_lock);
1.2 thorpej 492:
493: /* Call any cancellation cleanup handlers */
494: while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
495: cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
496: PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
497: (*cleanup->ptc_cleanup)(cleanup->ptc_arg);
498: }
499:
500: /* Perform cleanup of thread-specific data */
501: pthread__destroy_tsd(self);
502:
503: self->pt_exitval = retval;
504:
1.82 ad 505: pthread_mutex_lock(&self->pt_lock);
1.36 yamt 506: if (self->pt_flags & PT_FLAG_DETACHED) {
507: self->pt_state = PT_STATE_DEAD;
1.11 thorpej 508: name = self->pt_name;
509: self->pt_name = NULL;
1.82 ad 510: pthread_mutex_lock(&pthread__deadqueue_lock);
1.77 ad 511: PTQ_INSERT_TAIL(&pthread__deadqueue, self, pt_deadq);
1.82 ad 512: pthread_mutex_unlock(&pthread__deadqueue_lock);
513: pthread_mutex_unlock(&self->pt_lock);
1.11 thorpej 514: if (name != NULL)
515: free(name);
1.49 ad 516: _lwp_exit();
1.2 thorpej 517: } else {
1.36 yamt 518: self->pt_state = PT_STATE_ZOMBIE;
1.82 ad 519: pthread_mutex_unlock(&self->pt_lock);
1.11 thorpej 520: /* Note: name will be freed by the joiner. */
1.49 ad 521: _lwp_exit();
1.2 thorpej 522: }
523:
1.19 christos 524: /*NOTREACHED*/
525: pthread__abort();
1.2 thorpej 526: exit(1);
527: }
528:
529:
530: int
531: pthread_join(pthread_t thread, void **valptr)
532: {
533: pthread_t self;
1.11 thorpej 534: char *name;
1.2 thorpej 535:
536: self = pthread__self();
537:
1.78 ad 538: if (pthread__find(thread) != 0)
1.2 thorpej 539: return ESRCH;
540:
541: if (thread->pt_magic != PT_MAGIC)
542: return EINVAL;
543:
544: if (thread == self)
545: return EDEADLK;
546:
1.74 ad 547: /*
548: * IEEE Std 1003.1, 2004 Edition:
549: *
550: * "The pthread_join() function shall not return an
551: * error code of [EINTR]."
552: */
553: while (_lwp_wait(thread->pt_lid, NULL) != 0) {
554: if (errno != EINTR)
1.52 ad 555: return errno;
1.49 ad 556: }
557:
1.74 ad 558: /*
559: * No need to lock - nothing else should (legally) be
560: * interested in the thread's state at this point.
561: *
562: * _lwp_wait() provides a barrier, so the user level
563: * thread state will be visible to us at this point.
564: */
565: if (thread->pt_state != PT_STATE_ZOMBIE) {
566: pthread__errorfunc(__FILE__, __LINE__, __func__,
567: "not a zombie");
568: }
569: if (valptr != NULL)
570: *valptr = thread->pt_exitval;
571: name = thread->pt_name;
572: thread->pt_name = NULL;
573: thread->pt_state = PT_STATE_DEAD;
1.82 ad 574: pthread_mutex_lock(&pthread__deadqueue_lock);
1.77 ad 575: PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_deadq);
1.82 ad 576: pthread_mutex_unlock(&pthread__deadqueue_lock);
1.74 ad 577: if (name != NULL)
578: free(name);
579: return 0;
1.2 thorpej 580: }
581:
582:
583: int
584: pthread_equal(pthread_t t1, pthread_t t2)
585: {
586:
587: /* Nothing special here. */
588: return (t1 == t2);
589: }
590:
591:
592: int
593: pthread_detach(pthread_t thread)
594: {
1.82 ad 595: int rv;
1.2 thorpej 596:
1.78 ad 597: if (pthread__find(thread) != 0)
1.2 thorpej 598: return ESRCH;
599:
600: if (thread->pt_magic != PT_MAGIC)
601: return EINVAL;
602:
1.82 ad 603: pthread_mutex_lock(&thread->pt_lock);
1.62 ad 604: thread->pt_flags |= PT_FLAG_DETACHED;
1.82 ad 605: rv = _lwp_detach(thread->pt_lid);
606: pthread_mutex_unlock(&thread->pt_lock);
1.64 ad 607:
1.82 ad 608: if (rv == 0)
1.79 ad 609: return 0;
610: return errno;
1.2 thorpej 611: }
612:
613:
614: int
1.11 thorpej 615: pthread_getname_np(pthread_t thread, char *name, size_t len)
616: {
617:
1.78 ad 618: if (pthread__find(thread) != 0)
1.11 thorpej 619: return ESRCH;
620:
621: if (thread->pt_magic != PT_MAGIC)
622: return EINVAL;
623:
1.82 ad 624: pthread_mutex_lock(&thread->pt_lock);
1.11 thorpej 625: if (thread->pt_name == NULL)
626: name[0] = '\0';
627: else
628: strlcpy(name, thread->pt_name, len);
1.82 ad 629: pthread_mutex_unlock(&thread->pt_lock);
1.11 thorpej 630:
631: return 0;
632: }
633:
634:
635: int
636: pthread_setname_np(pthread_t thread, const char *name, void *arg)
637: {
638: char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
639: int namelen;
640:
1.78 ad 641: if (pthread__find(thread) != 0)
1.11 thorpej 642: return ESRCH;
643:
644: if (thread->pt_magic != PT_MAGIC)
645: return EINVAL;
646:
647: namelen = snprintf(newname, sizeof(newname), name, arg);
648: if (namelen >= PTHREAD_MAX_NAMELEN_NP)
649: return EINVAL;
650:
651: cp = strdup(newname);
652: if (cp == NULL)
653: return ENOMEM;
654:
1.82 ad 655: pthread_mutex_lock(&thread->pt_lock);
1.11 thorpej 656: oldname = thread->pt_name;
657: thread->pt_name = cp;
1.86 ad 658: (void)_lwp_setname(thread->pt_lid, cp);
1.82 ad 659: pthread_mutex_unlock(&thread->pt_lock);
1.11 thorpej 660:
661: if (oldname != NULL)
662: free(oldname);
663:
664: return 0;
665: }
666:
1.2 thorpej 667:
668:
669: /*
670: * XXX There should be a way for applications to use the efficent
671: * inline version, but there are opacity/namespace issues.
672: */
673: pthread_t
674: pthread_self(void)
675: {
676:
677: return pthread__self();
678: }
679:
680:
681: int
682: pthread_cancel(pthread_t thread)
683: {
684:
1.78 ad 685: if (pthread__find(thread) != 0)
1.44 chs 686: return ESRCH;
1.82 ad 687: pthread_mutex_lock(&thread->pt_lock);
1.49 ad 688: thread->pt_flags |= PT_FLAG_CS_PENDING;
689: if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
690: thread->pt_cancel = 1;
1.82 ad 691: pthread_mutex_unlock(&thread->pt_lock);
1.49 ad 692: _lwp_wakeup(thread->pt_lid);
693: } else
1.82 ad 694: pthread_mutex_unlock(&thread->pt_lock);
1.2 thorpej 695:
696: return 0;
697: }
698:
699:
700: int
701: pthread_setcancelstate(int state, int *oldstate)
702: {
703: pthread_t self;
1.28 nathanw 704: int retval;
1.2 thorpej 705:
706: self = pthread__self();
1.28 nathanw 707: retval = 0;
1.2 thorpej 708:
1.82 ad 709: pthread_mutex_lock(&self->pt_lock);
1.69 ad 710:
1.2 thorpej 711: if (oldstate != NULL) {
1.28 nathanw 712: if (self->pt_flags & PT_FLAG_CS_DISABLED)
1.2 thorpej 713: *oldstate = PTHREAD_CANCEL_DISABLE;
714: else
715: *oldstate = PTHREAD_CANCEL_ENABLE;
716: }
717:
1.28 nathanw 718: if (state == PTHREAD_CANCEL_DISABLE) {
719: self->pt_flags |= PT_FLAG_CS_DISABLED;
720: if (self->pt_cancel) {
721: self->pt_flags |= PT_FLAG_CS_PENDING;
722: self->pt_cancel = 0;
723: }
724: } else if (state == PTHREAD_CANCEL_ENABLE) {
725: self->pt_flags &= ~PT_FLAG_CS_DISABLED;
1.2 thorpej 726: /*
727: * If a cancellation was requested while cancellation
728: * was disabled, note that fact for future
729: * cancellation tests.
730: */
1.28 nathanw 731: if (self->pt_flags & PT_FLAG_CS_PENDING) {
1.2 thorpej 732: self->pt_cancel = 1;
733: /* This is not a deferred cancellation point. */
1.28 nathanw 734: if (self->pt_flags & PT_FLAG_CS_ASYNC) {
1.82 ad 735: pthread_mutex_unlock(&self->pt_lock);
1.2 thorpej 736: pthread_exit(PTHREAD_CANCELED);
1.28 nathanw 737: }
1.2 thorpej 738: }
739: } else
1.28 nathanw 740: retval = EINVAL;
1.2 thorpej 741:
1.82 ad 742: pthread_mutex_unlock(&self->pt_lock);
1.69 ad 743:
1.28 nathanw 744: return retval;
1.2 thorpej 745: }
746:
747:
748: int
749: pthread_setcanceltype(int type, int *oldtype)
750: {
751: pthread_t self;
1.28 nathanw 752: int retval;
1.2 thorpej 753:
754: self = pthread__self();
1.28 nathanw 755: retval = 0;
756:
1.82 ad 757: pthread_mutex_lock(&self->pt_lock);
1.2 thorpej 758:
759: if (oldtype != NULL) {
1.28 nathanw 760: if (self->pt_flags & PT_FLAG_CS_ASYNC)
1.2 thorpej 761: *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
762: else
763: *oldtype = PTHREAD_CANCEL_DEFERRED;
764: }
765:
766: if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1.28 nathanw 767: self->pt_flags |= PT_FLAG_CS_ASYNC;
768: if (self->pt_cancel) {
1.82 ad 769: pthread_mutex_unlock(&self->pt_lock);
1.2 thorpej 770: pthread_exit(PTHREAD_CANCELED);
1.28 nathanw 771: }
1.2 thorpej 772: } else if (type == PTHREAD_CANCEL_DEFERRED)
1.28 nathanw 773: self->pt_flags &= ~PT_FLAG_CS_ASYNC;
1.2 thorpej 774: else
1.28 nathanw 775: retval = EINVAL;
1.2 thorpej 776:
1.82 ad 777: pthread_mutex_unlock(&self->pt_lock);
1.69 ad 778:
1.28 nathanw 779: return retval;
1.2 thorpej 780: }
781:
782:
783: void
784: pthread_testcancel()
785: {
786: pthread_t self;
787:
788: self = pthread__self();
789: if (self->pt_cancel)
790: pthread_exit(PTHREAD_CANCELED);
791: }
792:
793:
794: /*
795: * POSIX requires that certain functions return an error rather than
796: * invoking undefined behavior even when handed completely bogus
797: * pthread_t values, e.g. stack garbage or (pthread_t)666. This
798: * utility routine searches the list of threads for the pthread_t
799: * value without dereferencing it.
800: */
801: int
1.78 ad 802: pthread__find(pthread_t id)
1.2 thorpej 803: {
804: pthread_t target;
805:
1.84 ad 806: pthread_rwlock_rdlock(&pthread__alltree_lock);
807: /* LINTED */
808: target = RB_FIND(__pthread__alltree, &pthread__alltree, id);
809: pthread_rwlock_unlock(&pthread__alltree_lock);
1.2 thorpej 810:
1.77 ad 811: if (target == NULL || target->pt_state == PT_STATE_DEAD)
1.2 thorpej 812: return ESRCH;
813:
814: return 0;
815: }
816:
817:
818: void
819: pthread__testcancel(pthread_t self)
820: {
821:
822: if (self->pt_cancel)
823: pthread_exit(PTHREAD_CANCELED);
824: }
825:
826:
827: void
828: pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
829: {
830: pthread_t self;
831: struct pt_clean_t *entry;
832:
833: self = pthread__self();
834: entry = store;
835: entry->ptc_cleanup = cleanup;
836: entry->ptc_arg = arg;
837: PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
838: }
839:
840:
841: void
842: pthread__cleanup_pop(int ex, void *store)
843: {
844: pthread_t self;
845: struct pt_clean_t *entry;
846:
847: self = pthread__self();
848: entry = store;
849:
850: PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
851: if (ex)
852: (*entry->ptc_cleanup)(entry->ptc_arg);
853: }
854:
855:
856: int *
857: pthread__errno(void)
858: {
859: pthread_t self;
860:
861: self = pthread__self();
862:
863: return &(self->pt_errno);
1.9 nathanw 864: }
865:
1.27 nathanw 866: ssize_t _sys_write(int, const void *, size_t);
867:
1.9 nathanw 868: void
1.34 drochner 869: pthread__assertfunc(const char *file, int line, const char *function,
870: const char *expr)
1.9 nathanw 871: {
872: char buf[1024];
873: int len;
874:
875: /*
876: * snprintf should not acquire any locks, or we could
877: * end up deadlocked if the assert caller held locks.
878: */
879: len = snprintf(buf, 1024,
880: "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
881: expr, file, line,
882: function ? ", function \"" : "",
883: function ? function : "",
884: function ? "\"" : "");
885:
1.27 nathanw 886: _sys_write(STDERR_FILENO, buf, (size_t)len);
1.9 nathanw 887: (void)kill(getpid(), SIGABRT);
888:
889: _exit(1);
1.17 nathanw 890: }
891:
892:
893: void
1.34 drochner 894: pthread__errorfunc(const char *file, int line, const char *function,
895: const char *msg)
1.17 nathanw 896: {
897: char buf[1024];
1.24 nathanw 898: size_t len;
1.17 nathanw 899:
1.24 nathanw 900: if (pthread__diagassert == 0)
1.17 nathanw 901: return;
902:
903: /*
904: * snprintf should not acquire any locks, or we could
905: * end up deadlocked if the assert caller held locks.
906: */
907: len = snprintf(buf, 1024,
1.24 nathanw 908: "%s: Error detected by libpthread: %s.\n"
909: "Detected by file \"%s\", line %d%s%s%s.\n"
910: "See pthread(3) for information.\n",
911: getprogname(), msg, file, line,
1.17 nathanw 912: function ? ", function \"" : "",
913: function ? function : "",
1.24 nathanw 914: function ? "\"" : "");
915:
916: if (pthread__diagassert & DIAGASSERT_STDERR)
1.27 nathanw 917: _sys_write(STDERR_FILENO, buf, len);
1.24 nathanw 918:
919: if (pthread__diagassert & DIAGASSERT_SYSLOG)
920: syslog(LOG_DEBUG | LOG_USER, "%s", buf);
1.17 nathanw 921:
1.24 nathanw 922: if (pthread__diagassert & DIAGASSERT_ABORT) {
1.17 nathanw 923: (void)kill(getpid(), SIGABRT);
924: _exit(1);
925: }
1.2 thorpej 926: }
1.49 ad 927:
1.51 ad 928: /*
1.52 ad 929: * Thread park/unpark operations. The kernel operations are
930: * modelled after a brief description from "Multithreading in
931: * the Solaris Operating Environment":
1.51 ad 932: *
933: * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf
934: */
935:
1.49 ad 936: #define OOPS(msg) \
1.58 christos 937: pthread__errorfunc(__FILE__, __LINE__, __func__, msg)
1.49 ad 938:
939: int
940: pthread__park(pthread_t self, pthread_spin_t *lock,
1.69 ad 941: pthread_queue_t *queue, const struct timespec *abstime,
942: int cancelpt, const void *hint)
1.49 ad 943: {
1.72 ad 944: int rv, error;
1.81 ad 945: void *obj;
1.49 ad 946:
1.72 ad 947: /* Clear the willpark flag, since we're about to block. */
948: self->pt_willpark = 0;
949:
950: /*
951: * Kernels before 4.99.27 can't park and unpark in one step,
952: * so take care of it now if on an old kernel.
953: *
954: * XXX Remove this check before NetBSD 5.0 is released.
955: * It's for compatibility with recent -current only.
956: */
957: if (__predict_false(pthread__osrev < 499002700) &&
958: self->pt_unpark != 0) {
959: _lwp_unpark(self->pt_unpark, self->pt_unparkhint);
960: self->pt_unpark = 0;
961: }
962:
1.52 ad 963: /*
964: * Wait until we are awoken by a pending unpark operation,
965: * a signal, an unpark posted after we have gone asleep,
966: * or an expired timeout.
1.69 ad 967: *
968: * It is fine to test the value of both pt_sleepobj and
969: * pt_sleeponq without holding any locks, because:
970: *
971: * o Only the blocking thread (this thread) ever sets them
972: * to a non-NULL value.
973: *
974: * o Other threads may set them NULL, but if they do so they
975: * must also make this thread return from _lwp_park.
976: *
977: * o _lwp_park, _lwp_unpark and _lwp_unpark_all are system
978: * calls and all make use of spinlocks in the kernel. So
979: * these system calls act as full memory barriers, and will
980: * ensure that the calling CPU's store buffers are drained.
981: * In combination with the spinlock release before unpark,
982: * this means that modification of pt_sleepobj/onq by another
983: * thread will become globally visible before that thread
984: * schedules an unpark operation on this thread.
1.72 ad 985: *
986: * Note: the test in the while() statement dodges the park op if
987: * we have already been awoken, unless there is another thread to
988: * awaken. This saves a syscall - if we were already awakened,
989: * the next call to _lwp_park() would need to return early in order
990: * to eat the previous wakeup.
1.52 ad 991: */
992: rv = 0;
1.72 ad 993: while ((self->pt_sleepobj != NULL || self->pt_unpark != 0) && rv == 0) {
994: /*
995: * If we deferred unparking a thread, arrange to
996: * have _lwp_park() restart it before blocking.
997: */
998: error = _lwp_park(abstime, self->pt_unpark, hint,
999: self->pt_unparkhint);
1000: self->pt_unpark = 0;
1001: if (error != 0) {
1.52 ad 1002: switch (rv = errno) {
1003: case EINTR:
1004: case EALREADY:
1005: rv = 0;
1006: break;
1007: case ETIMEDOUT:
1008: break;
1009: default:
1010: OOPS("_lwp_park failed");
1011: break;
1012: }
1.49 ad 1013: }
1.67 ad 1014: /* Check for cancellation. */
1.78 ad 1015: if (cancelpt && self->pt_cancel)
1016: rv = EINTR;
1.69 ad 1017: }
1.49 ad 1018:
1.52 ad 1019: /*
1020: * If we have been awoken early but are still on the queue,
1.69 ad 1021: * then remove ourself. Again, it's safe to do the test
1022: * without holding any locks.
1.52 ad 1023: */
1.81 ad 1024: if (__predict_false(self->pt_sleeponq)) {
1.87 ! ad 1025: pthread__spinlock(self, lock);
1.69 ad 1026: if (self->pt_sleeponq) {
1027: PTQ_REMOVE(queue, self, pt_sleep);
1.81 ad 1028: obj = self->pt_sleepobj;
1.69 ad 1029: self->pt_sleepobj = NULL;
1030: self->pt_sleeponq = 0;
1.81 ad 1031: if (obj != NULL && self->pt_early != NULL)
1032: (*self->pt_early)(obj);
1.69 ad 1033: }
1.87 ! ad 1034: pthread__spinunlock(self, lock);
1.66 ad 1035: }
1.81 ad 1036: self->pt_early = NULL;
1.49 ad 1037:
1038: return rv;
1039: }
1040:
1041: void
1.66 ad 1042: pthread__unpark(pthread_t self, pthread_spin_t *lock,
1.69 ad 1043: pthread_queue_t *queue, pthread_t target)
1.49 ad 1044: {
1045: int rv;
1046:
1.67 ad 1047: if (target == NULL) {
1.87 ! ad 1048: pthread__spinunlock(self, lock);
1.67 ad 1049: return;
1050: }
1.49 ad 1051:
1.67 ad 1052: /*
1053: * Easy: the thread has already been removed from
1054: * the queue, so just awaken it.
1055: */
1056: target->pt_sleepobj = NULL;
1057: target->pt_sleeponq = 0;
1.69 ad 1058:
1059: /*
1060: * Releasing the spinlock serves as a store barrier,
1061: * which ensures that all our modifications are visible
1062: * to the thread in pthread__park() before the unpark
1063: * operation is set in motion.
1064: */
1.87 ! ad 1065: pthread__spinunlock(self, lock);
1.67 ad 1066:
1.72 ad 1067: /*
1068: * If the calling thread is about to block, defer
1069: * unparking the target until _lwp_park() is called.
1070: */
1071: if (self->pt_willpark && self->pt_unpark == 0) {
1072: self->pt_unpark = target->pt_lid;
1073: self->pt_unparkhint = queue;
1074: } else {
1075: rv = _lwp_unpark(target->pt_lid, queue);
1076: if (rv != 0 && errno != EALREADY && errno != EINTR) {
1077: OOPS("_lwp_unpark failed");
1078: }
1.67 ad 1079: }
1.49 ad 1080: }
1081:
1082: void
1.66 ad 1083: pthread__unpark_all(pthread_t self, pthread_spin_t *lock,
1.69 ad 1084: pthread_queue_t *queue)
1.49 ad 1085: {
1.65 ad 1086: ssize_t n, rv;
1.52 ad 1087: pthread_t thread, next;
1.81 ad 1088: void *wakeobj;
1.52 ad 1089:
1.81 ad 1090: if (PTQ_EMPTY(queue) && self->pt_nwaiters == 0) {
1.87 ! ad 1091: pthread__spinunlock(self, lock);
1.52 ad 1092: return;
1093: }
1094:
1.81 ad 1095: wakeobj = queue;
1.49 ad 1096:
1.81 ad 1097: for (;; n = 0) {
1098: /*
1099: * Pull waiters from the queue and add to this
1100: * thread's waiters list.
1101: */
1.52 ad 1102: thread = PTQ_FIRST(queue);
1.81 ad 1103: for (n = self->pt_nwaiters, self->pt_nwaiters = 0;
1104: n < pthread__unpark_max && thread != NULL;
1.52 ad 1105: thread = next) {
1106: /*
1107: * If the sleepobj pointer is non-NULL, it
1108: * means one of two things:
1109: *
1110: * o The thread has awoken early, spun
1111: * through application code and is
1112: * once more asleep on this object.
1113: *
1114: * o This is a new thread that has blocked
1115: * on the object after we have released
1116: * the interlock in this loop.
1117: *
1118: * In both cases we shouldn't remove the
1119: * thread from the queue.
1120: */
1121: next = PTQ_NEXT(thread, pt_sleep);
1.81 ad 1122: if (thread->pt_sleepobj != wakeobj)
1123: continue;
1124: thread->pt_sleepobj = NULL;
1.52 ad 1125: thread->pt_sleeponq = 0;
1.81 ad 1126: self->pt_waiters[n++] = thread->pt_lid;
1.49 ad 1127: PTQ_REMOVE(queue, thread, pt_sleep);
1128: }
1.52 ad 1129:
1.69 ad 1130: /*
1131: * Releasing the spinlock serves as a store barrier,
1132: * which ensures that all our modifications are visible
1133: * to the thread in pthread__park() before the unpark
1134: * operation is set in motion.
1135: */
1.52 ad 1136: switch (n) {
1.49 ad 1137: case 0:
1.87 ! ad 1138: pthread__spinunlock(self, lock);
1.49 ad 1139: return;
1140: case 1:
1.72 ad 1141: /*
1142: * If the calling thread is about to block,
1143: * defer unparking the target until _lwp_park()
1144: * is called.
1145: */
1.87 ! ad 1146: pthread__spinunlock(self, lock);
1.72 ad 1147: if (self->pt_willpark && self->pt_unpark == 0) {
1.81 ad 1148: self->pt_unpark = self->pt_waiters[0];
1.72 ad 1149: self->pt_unparkhint = queue;
1150: return;
1151: }
1.81 ad 1152: rv = (ssize_t)_lwp_unpark(self->pt_waiters[0], queue);
1.49 ad 1153: if (rv != 0 && errno != EALREADY && errno != EINTR) {
1154: OOPS("_lwp_unpark failed");
1155: }
1156: return;
1157: default:
1.81 ad 1158: /*
1159: * Clear all sleepobj pointers, since we
1160: * release the spin lock before awkening
1161: * everybody, and must synchronise with
1162: * pthread__park().
1163: */
1164: while (thread != NULL) {
1165: thread->pt_sleepobj = NULL;
1166: thread = PTQ_NEXT(thread, pt_sleep);
1167: }
1168: /*
1169: * Now only interested in waking threads
1170: * marked to be woken (sleepobj == NULL).
1171: */
1172: wakeobj = NULL;
1.87 ! ad 1173: pthread__spinunlock(self, lock);
1.81 ad 1174: rv = _lwp_unpark_all(self->pt_waiters, (size_t)n,
1175: queue);
1.49 ad 1176: if (rv != 0 && errno != EINTR) {
1177: OOPS("_lwp_unpark_all failed");
1178: }
1179: break;
1180: }
1.87 ! ad 1181: pthread__spinlock(self, lock);
1.49 ad 1182: }
1183: }
1184:
1185: #undef OOPS
1.76 ad 1186:
1187: /*
1188: * Allocate a stack for a thread, and set it up. It needs to be aligned, so
1189: * that a thread can find itself by its stack pointer.
1190: */
1191: static int
1192: pthread__stackalloc(pthread_t *newt)
1193: {
1194: void *addr;
1195:
1196: addr = mmap(NULL, pthread__stacksize, PROT_READ|PROT_WRITE,
1197: MAP_ANON|MAP_PRIVATE | MAP_ALIGNED(pthread__stacksize_lg),
1198: -1, (off_t)0);
1199:
1200: if (addr == MAP_FAILED)
1201: return ENOMEM;
1202:
1203: pthread__assert(((intptr_t)addr & pthread__stackmask) == 0);
1204:
1205: return pthread__stackid_setup(addr, pthread__stacksize, newt);
1206: }
1207:
1208:
1209: /*
1210: * Set up the slightly special stack for the "initial" thread, which
1211: * runs on the normal system stack, and thus gets slightly different
1212: * treatment.
1213: */
1214: static void
1215: pthread__initmain(pthread_t *newt)
1216: {
1217: struct rlimit slimit;
1218: size_t pagesize;
1219: pthread_t t;
1220: void *base;
1221: size_t size;
1222: int error, ret;
1223: char *value;
1224:
1225: pagesize = (size_t)sysconf(_SC_PAGESIZE);
1226: pthread__stacksize = 0;
1227: ret = getrlimit(RLIMIT_STACK, &slimit);
1228: if (ret == -1)
1229: err(1, "Couldn't get stack resource consumption limits");
1.87 ! ad 1230:
! 1231: value = pthread__getenv("PTHREAD_STACKSIZE");
! 1232: if (value != NULL) {
1.76 ad 1233: pthread__stacksize = atoi(value) * 1024;
1234: if (pthread__stacksize > slimit.rlim_cur)
1235: pthread__stacksize = (size_t)slimit.rlim_cur;
1236: }
1237: if (pthread__stacksize == 0)
1238: pthread__stacksize = (size_t)slimit.rlim_cur;
1239: if (pthread__stacksize < 4 * pagesize)
1240: errx(1, "Stacksize limit is too low, minimum %zd kbyte.",
1241: 4 * pagesize / 1024);
1242:
1243: pthread__stacksize_lg = -1;
1244: while (pthread__stacksize) {
1245: pthread__stacksize >>= 1;
1246: pthread__stacksize_lg++;
1247: }
1248:
1249: pthread__stacksize = (1 << pthread__stacksize_lg);
1250: pthread__stackmask = pthread__stacksize - 1;
1.82 ad 1251: pthread__threadmask = ~pthread__stackmask;
1.76 ad 1252:
1.82 ad 1253: base = (void *)(pthread__sp() & pthread__threadmask);
1.76 ad 1254: size = pthread__stacksize;
1255:
1256: error = pthread__stackid_setup(base, size, &t);
1257: if (error) {
1258: /* XXX */
1259: errx(2, "failed to setup main thread: error=%d", error);
1260: }
1261:
1262: *newt = t;
1263: }
1264:
1265: static int
1266: /*ARGSUSED*/
1267: pthread__stackid_setup(void *base, size_t size, pthread_t *tp)
1268: {
1269: pthread_t t;
1270: void *redaddr;
1271: size_t pagesize;
1272: int ret;
1273:
1274: t = base;
1275: pagesize = (size_t)sysconf(_SC_PAGESIZE);
1276:
1277: /*
1278: * Put a pointer to the pthread in the bottom (but
1279: * redzone-protected section) of the stack.
1280: */
1281: redaddr = STACK_SHRINK(STACK_MAX(base, size), pagesize);
1282: t->pt_stack.ss_size = size - 2 * pagesize;
1283: #ifdef __MACHINE_STACK_GROWS_UP
1284: t->pt_stack.ss_sp = (char *)(void *)base + pagesize;
1285: #else
1286: t->pt_stack.ss_sp = (char *)(void *)base + 2 * pagesize;
1287: #endif
1288:
1289: /* Protect the next-to-bottom stack page as a red zone. */
1290: ret = mprotect(redaddr, pagesize, PROT_NONE);
1291: if (ret == -1) {
1292: return errno;
1293: }
1294: *tp = t;
1295: return 0;
1296: }
1.84 ad 1297:
1298: #ifndef lint
1299: static int
1300: pthread__cmp(struct __pthread_st *a, struct __pthread_st *b)
1301: {
1302: return b - a;
1303: }
1304: RB_GENERATE_STATIC(__pthread__alltree, __pthread_st, pt_alltree, pthread__cmp)
1305: #endif
1306:
1.87 ! ad 1307: /* Because getenv() wants to use locks. */
! 1308: char *
! 1309: pthread__getenv(const char *name)
! 1310: {
! 1311: extern char *__findenv(const char *, int *);
! 1312: int off;
! 1313:
! 1314: return __findenv(name, &off);
! 1315: }
! 1316:
! 1317:
CVSweb <webmaster@jp.NetBSD.org>