Annotation of src/lib/libpthread/pthread.c, Revision 1.157
1.157 ! ad 1: /* $NetBSD: pthread.c,v 1.156 2020/01/25 18:01:28 ad Exp $ */
1.2 thorpej 2:
3: /*-
1.155 ad 4: * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008, 2020
5: * The NetBSD Foundation, Inc.
1.2 thorpej 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
1.49 ad 9: * by Nathan J. Williams and Andrew Doran.
1.2 thorpej 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: *
20: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30: * POSSIBILITY OF SUCH DAMAGE.
31: */
32:
1.14 lukem 33: #include <sys/cdefs.h>
1.157 ! ad 34: __RCSID("$NetBSD: pthread.c,v 1.156 2020/01/25 18:01:28 ad Exp $");
1.76 ad 35:
36: #define __EXPOSE_STACK 1
37:
38: #include <sys/param.h>
1.126 joerg 39: #include <sys/exec_elf.h>
1.76 ad 40: #include <sys/mman.h>
1.136 joerg 41: #include <sys/lwp.h>
1.88 ad 42: #include <sys/lwpctl.h>
1.142 dsl 43: #include <sys/resource.h>
1.148 joerg 44: #include <sys/sysctl.h>
1.121 joerg 45: #include <sys/tls.h>
1.148 joerg 46: #include <uvm/uvm_param.h>
1.14 lukem 47:
1.126 joerg 48: #include <assert.h>
49: #include <dlfcn.h>
1.2 thorpej 50: #include <err.h>
51: #include <errno.h>
52: #include <lwp.h>
53: #include <signal.h>
1.9 nathanw 54: #include <stdio.h>
1.2 thorpej 55: #include <stdlib.h>
1.139 rmind 56: #include <stddef.h>
1.2 thorpej 57: #include <string.h>
1.24 nathanw 58: #include <syslog.h>
1.2 thorpej 59: #include <ucontext.h>
1.9 nathanw 60: #include <unistd.h>
1.76 ad 61: #include <sched.h>
1.2 thorpej 62:
1.150 joerg 63: #include "atexit.h"
1.2 thorpej 64: #include "pthread.h"
65: #include "pthread_int.h"
1.145 pooka 66: #include "pthread_makelwp.h"
1.143 christos 67: #include "reentrant.h"
1.2 thorpej 68:
1.84 ad 69: pthread_rwlock_t pthread__alltree_lock = PTHREAD_RWLOCK_INITIALIZER;
1.139 rmind 70: static rb_tree_t pthread__alltree;
1.84 ad 71:
1.139 rmind 72: static signed int pthread__cmp(void *, const void *, const void *);
73:
74: static const rb_tree_ops_t pthread__alltree_ops = {
75: .rbto_compare_nodes = pthread__cmp,
76: .rbto_compare_key = pthread__cmp,
77: .rbto_node_offset = offsetof(struct __pthread_st, pt_alltree),
78: .rbto_context = NULL
79: };
1.84 ad 80:
1.110 ad 81: static void pthread__create_tramp(void *);
1.69 ad 82: static void pthread__initthread(pthread_t);
1.77 ad 83: static void pthread__scrubthread(pthread_t, char *, int);
1.76 ad 84: static void pthread__initmain(pthread_t *);
1.108 ad 85: static void pthread__fork_callback(void);
1.94 ad 86: static void pthread__reap(pthread_t);
1.96 christos 87: static void pthread__child_callback(void);
88: static void pthread__start(void);
1.2 thorpej 89:
1.87 ad 90: void pthread__init(void);
91:
1.2 thorpej 92: int pthread__started;
1.143 christos 93: int __uselibcstub = 1;
1.82 ad 94: pthread_mutex_t pthread__deadqueue_lock = PTHREAD_MUTEX_INITIALIZER;
1.69 ad 95: pthread_queue_t pthread__deadqueue;
1.85 ad 96: pthread_queue_t pthread__allqueue;
1.2 thorpej 97:
98: static pthread_attr_t pthread_default_attr;
1.88 ad 99: static lwpctl_t pthread__dummy_lwpctl = { .lc_curcpu = LWPCTL_CPU_NONE };
1.2 thorpej 100:
1.24 nathanw 101: enum {
102: DIAGASSERT_ABORT = 1<<0,
103: DIAGASSERT_STDERR = 1<<1,
104: DIAGASSERT_SYSLOG = 1<<2
105: };
1.17 nathanw 106:
1.106 ad 107: static int pthread__diagassert;
1.17 nathanw 108:
1.74 ad 109: int pthread__concurrency;
110: int pthread__nspins;
1.52 ad 111: int pthread__unpark_max = PTHREAD__UNPARK_MAX;
1.125 christos 112: int pthread__dbg; /* set by libpthread_dbg if active */
1.33 cl 113:
1.76 ad 114: /*
115: * We have to initialize the pthread_stack* variables here because
116: * mutexes are used before pthread_init() and thus pthread__initmain()
117: * are called. Since mutexes only save the stack pointer and not a
118: * pointer to the thread data, it is safe to change the mapping from
119: * stack pointer to thread data afterwards.
120: */
1.126 joerg 121: size_t pthread__stacksize;
1.148 joerg 122: size_t pthread__guardsize;
1.126 joerg 123: size_t pthread__pagesize;
1.146 manu 124: static struct __pthread_st *pthread__main;
125: static size_t __pthread_st_size;
1.76 ad 126:
1.48 drochner 127: int _sys___sigprocmask14(int, const sigset_t *, sigset_t *);
128:
1.2 thorpej 129: __strong_alias(__libc_thr_self,pthread_self)
1.5 thorpej 130: __strong_alias(__libc_thr_create,pthread_create)
131: __strong_alias(__libc_thr_exit,pthread_exit)
1.2 thorpej 132: __strong_alias(__libc_thr_errno,pthread__errno)
1.23 nathanw 133: __strong_alias(__libc_thr_setcancelstate,pthread_setcancelstate)
1.89 drochner 134: __strong_alias(__libc_thr_equal,pthread_equal)
1.87 ad 135: __strong_alias(__libc_thr_init,pthread__init)
1.2 thorpej 136:
137: /*
138: * Static library kludge. Place a reference to a symbol any library
139: * file which does not already have a reference here.
140: */
141: extern int pthread__cancel_stub_binder;
142:
143: void *pthread__static_lib_binder[] = {
144: &pthread__cancel_stub_binder,
145: pthread_cond_init,
146: pthread_mutex_init,
147: pthread_rwlock_init,
148: pthread_barrier_init,
149: pthread_key_create,
1.29 nathanw 150: pthread_setspecific,
1.2 thorpej 151: };
152:
1.101 ad 153: #define NHASHLOCK 64
154:
155: static union hashlock {
156: pthread_mutex_t mutex;
157: char pad[64];
158: } hashlocks[NHASHLOCK] __aligned(64);
159:
1.2 thorpej 160: /*
161: * This needs to be started by the library loading code, before main()
162: * gets to run, for various things that use the state of the initial thread
163: * to work properly (thread-specific data is an application-visible example;
164: * spinlock counts for mutexes is an internal example).
165: */
166: void
1.87 ad 167: pthread__init(void)
1.2 thorpej 168: {
169: pthread_t first;
1.24 nathanw 170: char *p;
1.136 joerg 171: int i;
1.148 joerg 172: int mib[2];
173: unsigned int value;
174: size_t len;
1.2 thorpej 175: extern int __isthreaded;
176:
1.146 manu 177: /*
178: * Allocate pthread_keys descriptors before
179: * reseting __uselibcstub because otherwise
180: * malloc() will call pthread_keys_create()
181: * while pthread_keys descriptors are not
182: * yet allocated.
183: */
1.147 christos 184: pthread__main = pthread_tsd_init(&__pthread_st_size);
185: if (pthread__main == NULL)
186: err(EXIT_FAILURE, "Cannot allocate pthread storage");
1.146 manu 187:
1.143 christos 188: __uselibcstub = 0;
189:
1.126 joerg 190: pthread__pagesize = (size_t)sysconf(_SC_PAGESIZE);
1.138 christos 191: pthread__concurrency = (int)sysconf(_SC_NPROCESSORS_CONF);
1.72 ad 192:
1.148 joerg 193: mib[0] = CTL_VM;
194: mib[1] = VM_THREAD_GUARD_SIZE;
195: len = sizeof(value);
196: if (sysctl(mib, __arraycount(mib), &value, &len, NULL, 0) == 0)
197: pthread__guardsize = value;
198: else
199: pthread__guardsize = pthread__pagesize;
200:
1.2 thorpej 201: /* Initialize locks first; they're needed elsewhere. */
1.77 ad 202: pthread__lockprim_init();
1.101 ad 203: for (i = 0; i < NHASHLOCK; i++) {
204: pthread_mutex_init(&hashlocks[i].mutex, NULL);
205: }
1.33 cl 206:
1.77 ad 207: /* Fetch parameters. */
1.59 ad 208: i = (int)_lwp_unpark_all(NULL, 0, NULL);
209: if (i == -1)
1.146 manu 210: err(EXIT_FAILURE, "_lwp_unpark_all");
1.52 ad 211: if (i < pthread__unpark_max)
212: pthread__unpark_max = i;
1.2 thorpej 213:
214: /* Basic data structure setup */
215: pthread_attr_init(&pthread_default_attr);
1.85 ad 216: PTQ_INIT(&pthread__allqueue);
1.2 thorpej 217: PTQ_INIT(&pthread__deadqueue);
1.139 rmind 218:
219: rb_tree_init(&pthread__alltree, &pthread__alltree_ops);
1.76 ad 220:
1.2 thorpej 221: /* Create the thread structure corresponding to main() */
222: pthread__initmain(&first);
1.69 ad 223: pthread__initthread(first);
1.77 ad 224: pthread__scrubthread(first, NULL, 0);
1.49 ad 225:
226: first->pt_lid = _lwp_self();
1.85 ad 227: PTQ_INSERT_HEAD(&pthread__allqueue, first, pt_allq);
1.139 rmind 228: (void)rb_tree_insert_node(&pthread__alltree, first);
1.2 thorpej 229:
1.108 ad 230: if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &first->pt_lwpctl) != 0) {
1.146 manu 231: err(EXIT_FAILURE, "_lwp_ctl");
1.108 ad 232: }
233:
1.2 thorpej 234: /* Start subsystems */
235: PTHREAD_MD_INIT
236:
1.87 ad 237: for (p = pthread__getenv("PTHREAD_DIAGASSERT"); p && *p; p++) {
1.24 nathanw 238: switch (*p) {
239: case 'a':
240: pthread__diagassert |= DIAGASSERT_ABORT;
241: break;
242: case 'A':
243: pthread__diagassert &= ~DIAGASSERT_ABORT;
244: break;
245: case 'e':
246: pthread__diagassert |= DIAGASSERT_STDERR;
247: break;
248: case 'E':
249: pthread__diagassert &= ~DIAGASSERT_STDERR;
250: break;
251: case 'l':
252: pthread__diagassert |= DIAGASSERT_SYSLOG;
253: break;
254: case 'L':
255: pthread__diagassert &= ~DIAGASSERT_SYSLOG;
256: break;
257: }
1.17 nathanw 258: }
259:
1.2 thorpej 260: /* Tell libc that we're here and it should role-play accordingly. */
1.108 ad 261: pthread_atfork(NULL, NULL, pthread__fork_callback);
1.2 thorpej 262: __isthreaded = 1;
263: }
264:
1.16 nathanw 265: static void
1.108 ad 266: pthread__fork_callback(void)
1.88 ad 267: {
1.134 enami 268: struct __pthread_st *self = pthread__self();
1.88 ad 269:
270: /* lwpctl state is not copied across fork. */
1.134 enami 271: if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) {
1.146 manu 272: err(EXIT_FAILURE, "_lwp_ctl");
1.108 ad 273: }
1.115 explorer 274: self->pt_lid = _lwp_self();
1.108 ad 275: }
276:
277: static void
278: pthread__child_callback(void)
279: {
1.88 ad 280:
1.16 nathanw 281: /*
282: * Clean up data structures that a forked child process might
283: * trip over. Note that if threads have been created (causing
284: * this handler to be registered) the standards say that the
285: * child will trigger undefined behavior if it makes any
286: * pthread_* calls (or any other calls that aren't
287: * async-signal-safe), so we don't really have to clean up
288: * much. Anything that permits some pthread_* calls to work is
289: * merely being polite.
290: */
291: pthread__started = 0;
292: }
1.2 thorpej 293:
1.45 chs 294: static void
1.2 thorpej 295: pthread__start(void)
296: {
1.18 nathanw 297:
298: /*
299: * Per-process timers are cleared by fork(); despite the
300: * various restrictions on fork() and threads, it's legal to
301: * fork() before creating any threads.
302: */
1.16 nathanw 303: pthread_atfork(NULL, NULL, pthread__child_callback);
1.2 thorpej 304: }
305:
306:
307: /* General-purpose thread data structure sanitization. */
1.69 ad 308: /* ARGSUSED */
309: static void
310: pthread__initthread(pthread_t t)
1.2 thorpej 311: {
312:
1.87 ad 313: t->pt_self = t;
1.2 thorpej 314: t->pt_magic = PT_MAGIC;
1.72 ad 315: t->pt_willpark = 0;
316: t->pt_unpark = 0;
1.81 ad 317: t->pt_nwaiters = 0;
1.72 ad 318: t->pt_sleepobj = NULL;
319: t->pt_signalled = 0;
1.77 ad 320: t->pt_havespecific = 0;
1.81 ad 321: t->pt_early = NULL;
1.88 ad 322: t->pt_lwpctl = &pthread__dummy_lwpctl;
1.49 ad 323:
1.87 ad 324: memcpy(&t->pt_lockops, pthread__lock_ops, sizeof(t->pt_lockops));
1.82 ad 325: pthread_mutex_init(&t->pt_lock, NULL);
1.2 thorpej 326: PTQ_INIT(&t->pt_cleanup_stack);
327: }
328:
1.77 ad 329: static void
330: pthread__scrubthread(pthread_t t, char *name, int flags)
331: {
332:
333: t->pt_state = PT_STATE_RUNNING;
334: t->pt_exitval = NULL;
335: t->pt_flags = flags;
336: t->pt_cancel = 0;
337: t->pt_errno = 0;
338: t->pt_name = name;
339: t->pt_lid = 0;
340: }
341:
1.126 joerg 342: static int
1.132 joerg 343: pthread__getstack(pthread_t newthread, const pthread_attr_t *attr)
1.126 joerg 344: {
1.132 joerg 345: void *stackbase, *stackbase2, *redzone;
1.131 joerg 346: size_t stacksize, guardsize;
1.132 joerg 347: bool allocated;
1.126 joerg 348:
1.128 joerg 349: if (attr != NULL) {
350: pthread_attr_getstack(attr, &stackbase, &stacksize);
1.148 joerg 351: pthread_attr_getguardsize(attr, &guardsize);
1.128 joerg 352: } else {
353: stackbase = NULL;
354: stacksize = 0;
1.148 joerg 355: guardsize = pthread__guardsize;
1.128 joerg 356: }
357: if (stacksize == 0)
358: stacksize = pthread__stacksize;
359:
1.132 joerg 360: if (newthread->pt_stack_allocated) {
1.133 drochner 361: if (stackbase == NULL &&
1.148 joerg 362: newthread->pt_stack.ss_size == stacksize &&
363: newthread->pt_guardsize == guardsize)
1.132 joerg 364: return 0;
365: stackbase2 = newthread->pt_stack.ss_sp;
366: #ifndef __MACHINE_STACK_GROWS_UP
367: stackbase2 = (char *)stackbase2 - newthread->pt_guardsize;
368: #endif
369: munmap(stackbase2,
370: newthread->pt_stack.ss_size + newthread->pt_guardsize);
371: newthread->pt_stack.ss_sp = NULL;
372: newthread->pt_stack.ss_size = 0;
373: newthread->pt_guardsize = 0;
374: newthread->pt_stack_allocated = false;
375: }
376:
377: newthread->pt_stack_allocated = false;
378:
1.128 joerg 379: if (stackbase == NULL) {
1.132 joerg 380: stacksize = ((stacksize - 1) | (pthread__pagesize - 1)) + 1;
1.148 joerg 381: guardsize = ((guardsize - 1) | (pthread__pagesize - 1)) + 1;
1.131 joerg 382: stackbase = mmap(NULL, stacksize + guardsize,
1.128 joerg 383: PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, (off_t)0);
384: if (stackbase == MAP_FAILED)
385: return ENOMEM;
1.132 joerg 386: allocated = true;
1.131 joerg 387: } else {
1.132 joerg 388: allocated = false;
1.128 joerg 389: }
1.126 joerg 390: #ifdef __MACHINE_STACK_GROWS_UP
1.132 joerg 391: redzone = (char *)stackbase + stacksize;
392: stackbase2 = (char *)stackbase;
1.126 joerg 393: #else
394: redzone = (char *)stackbase;
1.132 joerg 395: stackbase2 = (char *)stackbase + guardsize;
1.126 joerg 396: #endif
1.132 joerg 397: if (allocated && guardsize &&
398: mprotect(redzone, guardsize, PROT_NONE) == -1) {
1.131 joerg 399: munmap(stackbase, stacksize + guardsize);
1.126 joerg 400: return EPERM;
401: }
1.132 joerg 402: newthread->pt_stack.ss_size = stacksize;
403: newthread->pt_stack.ss_sp = stackbase2;
404: newthread->pt_guardsize = guardsize;
405: newthread->pt_stack_allocated = allocated;
1.126 joerg 406: return 0;
407: }
1.2 thorpej 408:
409: int
410: pthread_create(pthread_t *thread, const pthread_attr_t *attr,
411: void *(*startfunc)(void *), void *arg)
412: {
1.78 ad 413: pthread_t newthread;
1.2 thorpej 414: pthread_attr_t nattr;
1.11 thorpej 415: struct pthread_attr_private *p;
1.56 christos 416: char * volatile name;
1.75 ad 417: unsigned long flag;
1.121 joerg 418: void *private_area;
1.75 ad 419: int ret;
1.2 thorpej 420:
1.143 christos 421: if (__predict_false(__uselibcstub)) {
422: pthread__errorfunc(__FILE__, __LINE__, __func__,
423: "pthread_create() requires linking with -lpthread");
424: return __libc_thr_create_stub(thread, attr, startfunc, arg);
425: }
426:
1.2 thorpej 427: /*
428: * It's okay to check this without a lock because there can
429: * only be one thread before it becomes true.
430: */
431: if (pthread__started == 0) {
432: pthread__start();
433: pthread__started = 1;
434: }
435:
436: if (attr == NULL)
437: nattr = pthread_default_attr;
1.7 drochner 438: else if (attr->pta_magic == PT_ATTR_MAGIC)
1.2 thorpej 439: nattr = *attr;
440: else
441: return EINVAL;
442:
1.11 thorpej 443: /* Fetch misc. attributes from the attr structure. */
1.12 nathanw 444: name = NULL;
445: if ((p = nattr.pta_private) != NULL)
446: if (p->ptap_name[0] != '\0')
1.11 thorpej 447: if ((name = strdup(p->ptap_name)) == NULL)
448: return ENOMEM;
1.2 thorpej 449:
1.70 ad 450: newthread = NULL;
1.2 thorpej 451:
1.77 ad 452: /*
453: * Try to reclaim a dead thread.
454: */
1.70 ad 455: if (!PTQ_EMPTY(&pthread__deadqueue)) {
1.82 ad 456: pthread_mutex_lock(&pthread__deadqueue_lock);
1.113 christos 457: PTQ_FOREACH(newthread, &pthread__deadqueue, pt_deadq) {
1.157 ! ad 458: /* Still busily exiting, or finished? */
1.113 christos 459: if (newthread->pt_lwpctl->lc_curcpu ==
1.157 ! ad 460: LWPCTL_CPU_EXITED)
1.113 christos 461: break;
462: }
463: if (newthread)
1.77 ad 464: PTQ_REMOVE(&pthread__deadqueue, newthread, pt_deadq);
1.113 christos 465: pthread_mutex_unlock(&pthread__deadqueue_lock);
1.123 joerg 466: #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
467: if (newthread && newthread->pt_tls) {
468: _rtld_tls_free(newthread->pt_tls);
469: newthread->pt_tls = NULL;
470: }
471: #endif
1.60 yamt 472: }
1.70 ad 473:
1.77 ad 474: /*
475: * If necessary set up a stack, allocate space for a pthread_st,
476: * and initialize it.
477: */
1.60 yamt 478: if (newthread == NULL) {
1.146 manu 479: newthread = calloc(1, __pthread_st_size);
1.126 joerg 480: if (newthread == NULL) {
481: free(name);
482: return ENOMEM;
483: }
1.132 joerg 484: newthread->pt_stack_allocated = false;
1.126 joerg 485:
1.132 joerg 486: if (pthread__getstack(newthread, attr)) {
1.126 joerg 487: free(newthread);
488: free(name);
489: return ENOMEM;
1.31 christos 490: }
1.75 ad 491:
1.123 joerg 492: #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
493: newthread->pt_tls = NULL;
494: #endif
1.77 ad 495:
496: /* Add to list of all threads. */
1.84 ad 497: pthread_rwlock_wrlock(&pthread__alltree_lock);
1.85 ad 498: PTQ_INSERT_TAIL(&pthread__allqueue, newthread, pt_allq);
1.139 rmind 499: (void)rb_tree_insert_node(&pthread__alltree, newthread);
1.84 ad 500: pthread_rwlock_unlock(&pthread__alltree_lock);
1.77 ad 501:
502: /* Will be reset by the thread upon exit. */
503: pthread__initthread(newthread);
1.132 joerg 504: } else {
505: if (pthread__getstack(newthread, attr)) {
506: pthread_mutex_lock(&pthread__deadqueue_lock);
507: PTQ_INSERT_TAIL(&pthread__deadqueue, newthread, pt_deadq);
508: pthread_mutex_unlock(&pthread__deadqueue_lock);
509: return ENOMEM;
510: }
1.2 thorpej 511: }
512:
1.77 ad 513: /*
514: * Create the new LWP.
515: */
516: pthread__scrubthread(newthread, name, nattr.pta_flags);
1.110 ad 517: newthread->pt_func = startfunc;
518: newthread->pt_arg = arg;
1.121 joerg 519: #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II)
520: private_area = newthread->pt_tls = _rtld_tls_allocate();
521: newthread->pt_tls->tcb_pthread = newthread;
522: #else
523: private_area = newthread;
524: #endif
525:
1.157 ! ad 526: flag = 0;
1.103 ad 527: if ((newthread->pt_flags & PT_FLAG_SUSPENDED) != 0 ||
1.104 ad 528: (nattr.pta_flags & PT_FLAG_EXPLICIT_SCHED) != 0)
1.52 ad 529: flag |= LWP_SUSPENDED;
1.157 ! ad 530: if ((newthread->pt_flags & PT_FLAG_DETACHED) != 0)
! 531: flag |= LWP_DETACHED;
1.145 pooka 532:
533: ret = pthread__makelwp(pthread__create_tramp, newthread, private_area,
534: newthread->pt_stack.ss_sp, newthread->pt_stack.ss_size,
535: flag, &newthread->pt_lid);
1.49 ad 536: if (ret != 0) {
1.137 drochner 537: ret = errno;
1.116 rmind 538: pthread_mutex_lock(&newthread->pt_lock);
539: /* Will unlock and free name. */
540: pthread__reap(newthread);
1.49 ad 541: return ret;
542: }
543:
1.104 ad 544: if ((nattr.pta_flags & PT_FLAG_EXPLICIT_SCHED) != 0) {
1.103 ad 545: if (p != NULL) {
546: (void)pthread_setschedparam(newthread, p->ptap_policy,
547: &p->ptap_sp);
548: }
549: if ((newthread->pt_flags & PT_FLAG_SUSPENDED) == 0) {
550: (void)_lwp_continue(newthread->pt_lid);
551: }
552: }
553:
1.2 thorpej 554: *thread = newthread;
555:
556: return 0;
557: }
558:
559:
1.124 joerg 560: __dead static void
1.110 ad 561: pthread__create_tramp(void *cookie)
1.2 thorpej 562: {
1.110 ad 563: pthread_t self;
1.2 thorpej 564: void *retval;
565:
1.110 ad 566: self = cookie;
1.88 ad 567:
1.69 ad 568: /*
569: * Throw away some stack in a feeble attempt to reduce cache
570: * thrash. May help for SMT processors. XXX We should not
571: * be allocating stacks on fixed 2MB boundaries. Needs a
1.110 ad 572: * thread register or decent thread local storage.
1.69 ad 573: */
1.86 ad 574: (void)alloca(((unsigned)self->pt_lid & 7) << 8);
575:
576: if (self->pt_name != NULL) {
577: pthread_mutex_lock(&self->pt_lock);
578: if (self->pt_name != NULL)
1.88 ad 579: (void)_lwp_setname(0, self->pt_name);
1.86 ad 580: pthread_mutex_unlock(&self->pt_lock);
581: }
1.69 ad 582:
1.97 ad 583: if (_lwp_ctl(LWPCTL_FEATURE_CURCPU, &self->pt_lwpctl)) {
1.146 manu 584: err(EXIT_FAILURE, "_lwp_ctl");
1.97 ad 585: }
1.88 ad 586:
1.110 ad 587: retval = (*self->pt_func)(self->pt_arg);
1.2 thorpej 588:
589: pthread_exit(retval);
590:
1.19 christos 591: /*NOTREACHED*/
592: pthread__abort();
1.30 christos 593: }
594:
595: int
596: pthread_suspend_np(pthread_t thread)
597: {
1.44 chs 598: pthread_t self;
599:
600: self = pthread__self();
1.30 christos 601: if (self == thread) {
602: return EDEADLK;
603: }
1.78 ad 604: if (pthread__find(thread) != 0)
1.44 chs 605: return ESRCH;
1.79 ad 606: if (_lwp_suspend(thread->pt_lid) == 0)
607: return 0;
608: return errno;
1.30 christos 609: }
610:
611: int
612: pthread_resume_np(pthread_t thread)
613: {
1.78 ad 614:
615: if (pthread__find(thread) != 0)
1.44 chs 616: return ESRCH;
1.79 ad 617: if (_lwp_continue(thread->pt_lid) == 0)
618: return 0;
619: return errno;
1.2 thorpej 620: }
621:
1.156 ad 622: /*
623: * In case the thread is exiting at an inopportune time leaving waiters not
624: * awoken (because cancelled, for instance) make sure we have no waiters
625: * left.
626: */
627: static void
628: pthread__clear_waiters(pthread_t self)
629: {
630:
631: if (self->pt_nwaiters != 0) {
632: (void)_lwp_unpark_all(self->pt_waiters, self->pt_nwaiters,
633: NULL);
634: self->pt_nwaiters = 0;
635: }
636: self->pt_willpark = 0;
637: }
638:
1.2 thorpej 639: void
640: pthread_exit(void *retval)
641: {
1.8 nathanw 642: pthread_t self;
1.2 thorpej 643: struct pt_clean_t *cleanup;
644:
1.143 christos 645: if (__predict_false(__uselibcstub)) {
646: __libc_thr_exit_stub(retval);
647: goto out;
648: }
649:
1.2 thorpej 650: self = pthread__self();
651:
652: /* Disable cancellability. */
1.82 ad 653: pthread_mutex_lock(&self->pt_lock);
1.2 thorpej 654: self->pt_flags |= PT_FLAG_CS_DISABLED;
1.10 nathanw 655: self->pt_cancel = 0;
1.2 thorpej 656:
657: /* Call any cancellation cleanup handlers */
1.94 ad 658: if (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
659: pthread_mutex_unlock(&self->pt_lock);
660: while (!PTQ_EMPTY(&self->pt_cleanup_stack)) {
661: cleanup = PTQ_FIRST(&self->pt_cleanup_stack);
662: PTQ_REMOVE(&self->pt_cleanup_stack, cleanup, ptc_next);
663: (*cleanup->ptc_cleanup)(cleanup->ptc_arg);
664: }
665: pthread_mutex_lock(&self->pt_lock);
1.2 thorpej 666: }
667:
1.150 joerg 668: pthread_mutex_unlock(&self->pt_lock);
669: __cxa_thread_run_atexit();
670: pthread_mutex_lock(&self->pt_lock);
671:
1.2 thorpej 672: /* Perform cleanup of thread-specific data */
673: pthread__destroy_tsd(self);
674:
1.156 ad 675: /*
676: * Signal our exit. Our stack and pthread_t won't be reused until
677: * pthread_create() can see from kernel info that this LWP is gone.
678: */
1.2 thorpej 679: self->pt_exitval = retval;
1.36 yamt 680: if (self->pt_flags & PT_FLAG_DETACHED) {
1.157 ! ad 681: /* pthread__reap() will drop the lock. */
! 682: pthread__reap(self);
1.156 ad 683: pthread__clear_waiters(self);
1.49 ad 684: _lwp_exit();
1.2 thorpej 685: } else {
1.36 yamt 686: self->pt_state = PT_STATE_ZOMBIE;
1.82 ad 687: pthread_mutex_unlock(&self->pt_lock);
1.156 ad 688: pthread__clear_waiters(self);
1.11 thorpej 689: /* Note: name will be freed by the joiner. */
1.49 ad 690: _lwp_exit();
1.2 thorpej 691: }
692:
1.143 christos 693: out:
1.19 christos 694: /*NOTREACHED*/
695: pthread__abort();
1.2 thorpej 696: exit(1);
697: }
698:
699:
700: int
701: pthread_join(pthread_t thread, void **valptr)
702: {
703: pthread_t self;
704:
705: self = pthread__self();
706:
1.78 ad 707: if (pthread__find(thread) != 0)
1.2 thorpej 708: return ESRCH;
709:
710: if (thread->pt_magic != PT_MAGIC)
711: return EINVAL;
712:
713: if (thread == self)
714: return EDEADLK;
715:
1.157 ! ad 716: /* IEEE Std 1003.1 says pthread_join() never returns EINTR. */
1.94 ad 717: for (;;) {
1.157 ! ad 718: pthread__testcancel(self);
! 719: if (_lwp_wait(thread->pt_lid, NULL) == 0)
1.94 ad 720: break;
1.157 ! ad 721: if (errno != EINTR)
! 722: return errno;
! 723: }
1.95 ad 724:
1.157 ! ad 725: /*
! 726: * Don't test for cancellation again. The spec is that if
! 727: * cancelled, pthread_join() must not have succeeded.
! 728: */
! 729: pthread_mutex_lock(&thread->pt_lock);
! 730: if (thread->pt_state != PT_STATE_ZOMBIE) {
! 731: pthread__errorfunc(__FILE__, __LINE__, __func__,
! 732: "not a zombie");
! 733: }
1.74 ad 734: if (valptr != NULL)
735: *valptr = thread->pt_exitval;
1.157 ! ad 736:
1.94 ad 737: /* pthread__reap() will drop the lock. */
738: pthread__reap(thread);
739: return 0;
740: }
741:
742: static void
743: pthread__reap(pthread_t thread)
744: {
745: char *name;
746:
1.74 ad 747: name = thread->pt_name;
748: thread->pt_name = NULL;
749: thread->pt_state = PT_STATE_DEAD;
1.94 ad 750: pthread_mutex_unlock(&thread->pt_lock);
751:
1.82 ad 752: pthread_mutex_lock(&pthread__deadqueue_lock);
1.77 ad 753: PTQ_INSERT_HEAD(&pthread__deadqueue, thread, pt_deadq);
1.82 ad 754: pthread_mutex_unlock(&pthread__deadqueue_lock);
1.94 ad 755:
1.74 ad 756: if (name != NULL)
757: free(name);
1.2 thorpej 758: }
759:
760: int
761: pthread_equal(pthread_t t1, pthread_t t2)
762: {
1.143 christos 763: if (__predict_false(__uselibcstub))
764: return __libc_thr_equal_stub(t1, t2);
1.2 thorpej 765:
766: /* Nothing special here. */
767: return (t1 == t2);
768: }
769:
770:
771: int
772: pthread_detach(pthread_t thread)
773: {
1.157 ! ad 774: int error;
1.2 thorpej 775:
1.78 ad 776: if (pthread__find(thread) != 0)
1.2 thorpej 777: return ESRCH;
778:
779: if (thread->pt_magic != PT_MAGIC)
780: return EINVAL;
781:
1.82 ad 782: pthread_mutex_lock(&thread->pt_lock);
1.157 ! ad 783: if ((thread->pt_flags & PT_FLAG_DETACHED) != 0) {
! 784: error = EINVAL;
! 785: } else {
! 786: error = _lwp_detach(thread->pt_lid);
! 787: if (error == 0)
! 788: thread->pt_flags |= PT_FLAG_DETACHED;
! 789: else
! 790: error = errno;
! 791: }
1.94 ad 792: if (thread->pt_state == PT_STATE_ZOMBIE) {
793: /* pthread__reap() will drop the lock. */
794: pthread__reap(thread);
1.157 ! ad 795: } else
1.94 ad 796: pthread_mutex_unlock(&thread->pt_lock);
1.157 ! ad 797: return error;
1.2 thorpej 798: }
799:
800:
801: int
1.11 thorpej 802: pthread_getname_np(pthread_t thread, char *name, size_t len)
803: {
804:
1.78 ad 805: if (pthread__find(thread) != 0)
1.11 thorpej 806: return ESRCH;
807:
808: if (thread->pt_magic != PT_MAGIC)
809: return EINVAL;
810:
1.82 ad 811: pthread_mutex_lock(&thread->pt_lock);
1.11 thorpej 812: if (thread->pt_name == NULL)
813: name[0] = '\0';
814: else
815: strlcpy(name, thread->pt_name, len);
1.82 ad 816: pthread_mutex_unlock(&thread->pt_lock);
1.11 thorpej 817:
818: return 0;
819: }
820:
821:
822: int
823: pthread_setname_np(pthread_t thread, const char *name, void *arg)
824: {
825: char *oldname, *cp, newname[PTHREAD_MAX_NAMELEN_NP];
826: int namelen;
827:
1.78 ad 828: if (pthread__find(thread) != 0)
1.11 thorpej 829: return ESRCH;
830:
831: if (thread->pt_magic != PT_MAGIC)
832: return EINVAL;
833:
834: namelen = snprintf(newname, sizeof(newname), name, arg);
835: if (namelen >= PTHREAD_MAX_NAMELEN_NP)
836: return EINVAL;
837:
838: cp = strdup(newname);
839: if (cp == NULL)
840: return ENOMEM;
841:
1.82 ad 842: pthread_mutex_lock(&thread->pt_lock);
1.11 thorpej 843: oldname = thread->pt_name;
844: thread->pt_name = cp;
1.86 ad 845: (void)_lwp_setname(thread->pt_lid, cp);
1.82 ad 846: pthread_mutex_unlock(&thread->pt_lock);
1.11 thorpej 847:
848: if (oldname != NULL)
849: free(oldname);
850:
851: return 0;
852: }
853:
1.2 thorpej 854:
855: pthread_t
856: pthread_self(void)
857: {
1.143 christos 858: if (__predict_false(__uselibcstub))
859: return (pthread_t)__libc_thr_self_stub();
1.2 thorpej 860:
861: return pthread__self();
862: }
863:
864:
865: int
866: pthread_cancel(pthread_t thread)
867: {
868:
1.78 ad 869: if (pthread__find(thread) != 0)
1.44 chs 870: return ESRCH;
1.82 ad 871: pthread_mutex_lock(&thread->pt_lock);
1.49 ad 872: thread->pt_flags |= PT_FLAG_CS_PENDING;
873: if ((thread->pt_flags & PT_FLAG_CS_DISABLED) == 0) {
874: thread->pt_cancel = 1;
1.82 ad 875: pthread_mutex_unlock(&thread->pt_lock);
1.49 ad 876: _lwp_wakeup(thread->pt_lid);
877: } else
1.82 ad 878: pthread_mutex_unlock(&thread->pt_lock);
1.2 thorpej 879:
880: return 0;
881: }
882:
883:
884: int
885: pthread_setcancelstate(int state, int *oldstate)
886: {
887: pthread_t self;
1.28 nathanw 888: int retval;
1.2 thorpej 889:
1.143 christos 890: if (__predict_false(__uselibcstub))
891: return __libc_thr_setcancelstate_stub(state, oldstate);
892:
1.2 thorpej 893: self = pthread__self();
1.28 nathanw 894: retval = 0;
1.2 thorpej 895:
1.82 ad 896: pthread_mutex_lock(&self->pt_lock);
1.69 ad 897:
1.2 thorpej 898: if (oldstate != NULL) {
1.28 nathanw 899: if (self->pt_flags & PT_FLAG_CS_DISABLED)
1.2 thorpej 900: *oldstate = PTHREAD_CANCEL_DISABLE;
901: else
902: *oldstate = PTHREAD_CANCEL_ENABLE;
903: }
904:
1.28 nathanw 905: if (state == PTHREAD_CANCEL_DISABLE) {
906: self->pt_flags |= PT_FLAG_CS_DISABLED;
907: if (self->pt_cancel) {
908: self->pt_flags |= PT_FLAG_CS_PENDING;
909: self->pt_cancel = 0;
910: }
911: } else if (state == PTHREAD_CANCEL_ENABLE) {
912: self->pt_flags &= ~PT_FLAG_CS_DISABLED;
1.2 thorpej 913: /*
914: * If a cancellation was requested while cancellation
915: * was disabled, note that fact for future
916: * cancellation tests.
917: */
1.28 nathanw 918: if (self->pt_flags & PT_FLAG_CS_PENDING) {
1.2 thorpej 919: self->pt_cancel = 1;
920: /* This is not a deferred cancellation point. */
1.28 nathanw 921: if (self->pt_flags & PT_FLAG_CS_ASYNC) {
1.82 ad 922: pthread_mutex_unlock(&self->pt_lock);
1.94 ad 923: pthread__cancelled();
1.28 nathanw 924: }
1.2 thorpej 925: }
926: } else
1.28 nathanw 927: retval = EINVAL;
1.2 thorpej 928:
1.82 ad 929: pthread_mutex_unlock(&self->pt_lock);
1.69 ad 930:
1.28 nathanw 931: return retval;
1.2 thorpej 932: }
933:
934:
935: int
936: pthread_setcanceltype(int type, int *oldtype)
937: {
938: pthread_t self;
1.28 nathanw 939: int retval;
1.2 thorpej 940:
941: self = pthread__self();
1.28 nathanw 942: retval = 0;
943:
1.82 ad 944: pthread_mutex_lock(&self->pt_lock);
1.2 thorpej 945:
946: if (oldtype != NULL) {
1.28 nathanw 947: if (self->pt_flags & PT_FLAG_CS_ASYNC)
1.2 thorpej 948: *oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
949: else
950: *oldtype = PTHREAD_CANCEL_DEFERRED;
951: }
952:
953: if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
1.28 nathanw 954: self->pt_flags |= PT_FLAG_CS_ASYNC;
955: if (self->pt_cancel) {
1.82 ad 956: pthread_mutex_unlock(&self->pt_lock);
1.94 ad 957: pthread__cancelled();
1.28 nathanw 958: }
1.2 thorpej 959: } else if (type == PTHREAD_CANCEL_DEFERRED)
1.28 nathanw 960: self->pt_flags &= ~PT_FLAG_CS_ASYNC;
1.2 thorpej 961: else
1.28 nathanw 962: retval = EINVAL;
1.2 thorpej 963:
1.82 ad 964: pthread_mutex_unlock(&self->pt_lock);
1.69 ad 965:
1.28 nathanw 966: return retval;
1.2 thorpej 967: }
968:
969:
970: void
1.94 ad 971: pthread_testcancel(void)
1.2 thorpej 972: {
973: pthread_t self;
974:
975: self = pthread__self();
976: if (self->pt_cancel)
1.94 ad 977: pthread__cancelled();
1.2 thorpej 978: }
979:
980:
981: /*
982: * POSIX requires that certain functions return an error rather than
983: * invoking undefined behavior even when handed completely bogus
1.139 rmind 984: * pthread_t values, e.g. stack garbage.
1.2 thorpej 985: */
986: int
1.78 ad 987: pthread__find(pthread_t id)
1.2 thorpej 988: {
989: pthread_t target;
1.139 rmind 990: int error;
1.2 thorpej 991:
1.84 ad 992: pthread_rwlock_rdlock(&pthread__alltree_lock);
1.139 rmind 993: target = rb_tree_find_node(&pthread__alltree, id);
994: error = (target && target->pt_state != PT_STATE_DEAD) ? 0 : ESRCH;
1.84 ad 995: pthread_rwlock_unlock(&pthread__alltree_lock);
1.2 thorpej 996:
1.139 rmind 997: return error;
1.2 thorpej 998: }
999:
1000:
1001: void
1002: pthread__testcancel(pthread_t self)
1003: {
1004:
1005: if (self->pt_cancel)
1.94 ad 1006: pthread__cancelled();
1007: }
1008:
1009:
1010: void
1011: pthread__cancelled(void)
1012: {
1013:
1014: pthread_exit(PTHREAD_CANCELED);
1.2 thorpej 1015: }
1016:
1017:
1018: void
1019: pthread__cleanup_push(void (*cleanup)(void *), void *arg, void *store)
1020: {
1021: pthread_t self;
1022: struct pt_clean_t *entry;
1023:
1024: self = pthread__self();
1025: entry = store;
1026: entry->ptc_cleanup = cleanup;
1027: entry->ptc_arg = arg;
1028: PTQ_INSERT_HEAD(&self->pt_cleanup_stack, entry, ptc_next);
1029: }
1030:
1031:
1032: void
1033: pthread__cleanup_pop(int ex, void *store)
1034: {
1035: pthread_t self;
1036: struct pt_clean_t *entry;
1037:
1038: self = pthread__self();
1039: entry = store;
1040:
1041: PTQ_REMOVE(&self->pt_cleanup_stack, entry, ptc_next);
1042: if (ex)
1043: (*entry->ptc_cleanup)(entry->ptc_arg);
1044: }
1045:
1046:
1.99 ad 1047: int *
1048: pthread__errno(void)
1049: {
1050: pthread_t self;
1051:
1.143 christos 1052: if (__predict_false(__uselibcstub)) {
1053: pthread__errorfunc(__FILE__, __LINE__, __func__,
1054: "pthread__errno() requires linking with -lpthread");
1055: return __libc_thr_errno_stub();
1056: }
1057:
1.99 ad 1058: self = pthread__self();
1059:
1060: return &(self->pt_errno);
1061: }
1062:
1.27 nathanw 1063: ssize_t _sys_write(int, const void *, size_t);
1064:
1.9 nathanw 1065: void
1.34 drochner 1066: pthread__assertfunc(const char *file, int line, const char *function,
1067: const char *expr)
1.9 nathanw 1068: {
1069: char buf[1024];
1070: int len;
1071:
1072: /*
1073: * snprintf should not acquire any locks, or we could
1074: * end up deadlocked if the assert caller held locks.
1075: */
1076: len = snprintf(buf, 1024,
1077: "assertion \"%s\" failed: file \"%s\", line %d%s%s%s\n",
1078: expr, file, line,
1079: function ? ", function \"" : "",
1080: function ? function : "",
1081: function ? "\"" : "");
1082:
1.27 nathanw 1083: _sys_write(STDERR_FILENO, buf, (size_t)len);
1.9 nathanw 1084: (void)kill(getpid(), SIGABRT);
1085:
1086: _exit(1);
1.17 nathanw 1087: }
1088:
1089:
1090: void
1.34 drochner 1091: pthread__errorfunc(const char *file, int line, const char *function,
1092: const char *msg)
1.17 nathanw 1093: {
1094: char buf[1024];
1.24 nathanw 1095: size_t len;
1.17 nathanw 1096:
1.24 nathanw 1097: if (pthread__diagassert == 0)
1.17 nathanw 1098: return;
1099:
1100: /*
1101: * snprintf should not acquire any locks, or we could
1102: * end up deadlocked if the assert caller held locks.
1103: */
1104: len = snprintf(buf, 1024,
1.24 nathanw 1105: "%s: Error detected by libpthread: %s.\n"
1106: "Detected by file \"%s\", line %d%s%s%s.\n"
1107: "See pthread(3) for information.\n",
1108: getprogname(), msg, file, line,
1.17 nathanw 1109: function ? ", function \"" : "",
1110: function ? function : "",
1.24 nathanw 1111: function ? "\"" : "");
1112:
1113: if (pthread__diagassert & DIAGASSERT_STDERR)
1.27 nathanw 1114: _sys_write(STDERR_FILENO, buf, len);
1.24 nathanw 1115:
1116: if (pthread__diagassert & DIAGASSERT_SYSLOG)
1117: syslog(LOG_DEBUG | LOG_USER, "%s", buf);
1.17 nathanw 1118:
1.24 nathanw 1119: if (pthread__diagassert & DIAGASSERT_ABORT) {
1.17 nathanw 1120: (void)kill(getpid(), SIGABRT);
1121: _exit(1);
1122: }
1.2 thorpej 1123: }
1.49 ad 1124:
1.51 ad 1125: /*
1.52 ad 1126: * Thread park/unpark operations. The kernel operations are
1127: * modelled after a brief description from "Multithreading in
1128: * the Solaris Operating Environment":
1.51 ad 1129: *
1130: * http://www.sun.com/software/whitepapers/solaris9/multithread.pdf
1131: */
1132:
1.49 ad 1133: #define OOPS(msg) \
1.58 christos 1134: pthread__errorfunc(__FILE__, __LINE__, __func__, msg)
1.49 ad 1135:
1136: int
1.101 ad 1137: pthread__park(pthread_t self, pthread_mutex_t *lock,
1.69 ad 1138: pthread_queue_t *queue, const struct timespec *abstime,
1139: int cancelpt, const void *hint)
1.49 ad 1140: {
1.72 ad 1141: int rv, error;
1.81 ad 1142: void *obj;
1.49 ad 1143:
1.101 ad 1144: self->pt_willpark = 1;
1145: pthread_mutex_unlock(lock);
1146: self->pt_willpark = 0;
1.88 ad 1147:
1.52 ad 1148: /*
1149: * Wait until we are awoken by a pending unpark operation,
1150: * a signal, an unpark posted after we have gone asleep,
1151: * or an expired timeout.
1.69 ad 1152: *
1.101 ad 1153: * It is fine to test the value of pt_sleepobj without
1154: * holding any locks, because:
1.69 ad 1155: *
1156: * o Only the blocking thread (this thread) ever sets them
1157: * to a non-NULL value.
1158: *
1159: * o Other threads may set them NULL, but if they do so they
1160: * must also make this thread return from _lwp_park.
1161: *
1162: * o _lwp_park, _lwp_unpark and _lwp_unpark_all are system
1163: * calls and all make use of spinlocks in the kernel. So
1164: * these system calls act as full memory barriers, and will
1165: * ensure that the calling CPU's store buffers are drained.
1166: * In combination with the spinlock release before unpark,
1167: * this means that modification of pt_sleepobj/onq by another
1168: * thread will become globally visible before that thread
1169: * schedules an unpark operation on this thread.
1.72 ad 1170: *
1171: * Note: the test in the while() statement dodges the park op if
1172: * we have already been awoken, unless there is another thread to
1173: * awaken. This saves a syscall - if we were already awakened,
1174: * the next call to _lwp_park() would need to return early in order
1175: * to eat the previous wakeup.
1.52 ad 1176: */
1177: rv = 0;
1.101 ad 1178: do {
1.72 ad 1179: /*
1180: * If we deferred unparking a thread, arrange to
1181: * have _lwp_park() restart it before blocking.
1182: */
1.151 kre 1183: error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME,
1184: __UNCONST(abstime), self->pt_unpark, hint, hint);
1.72 ad 1185: self->pt_unpark = 0;
1186: if (error != 0) {
1.52 ad 1187: switch (rv = errno) {
1188: case EINTR:
1189: case EALREADY:
1190: rv = 0;
1191: break;
1192: case ETIMEDOUT:
1193: break;
1194: default:
1195: OOPS("_lwp_park failed");
1196: break;
1197: }
1.49 ad 1198: }
1.67 ad 1199: /* Check for cancellation. */
1.78 ad 1200: if (cancelpt && self->pt_cancel)
1201: rv = EINTR;
1.101 ad 1202: } while (self->pt_sleepobj != NULL && rv == 0);
1.49 ad 1203:
1.52 ad 1204: /*
1205: * If we have been awoken early but are still on the queue,
1.69 ad 1206: * then remove ourself. Again, it's safe to do the test
1207: * without holding any locks.
1.52 ad 1208: */
1.101 ad 1209: if (__predict_false(self->pt_sleepobj != NULL)) {
1210: pthread_mutex_lock(lock);
1211: if ((obj = self->pt_sleepobj) != NULL) {
1.69 ad 1212: PTQ_REMOVE(queue, self, pt_sleep);
1213: self->pt_sleepobj = NULL;
1.81 ad 1214: if (obj != NULL && self->pt_early != NULL)
1215: (*self->pt_early)(obj);
1.69 ad 1216: }
1.101 ad 1217: pthread_mutex_unlock(lock);
1.66 ad 1218: }
1.81 ad 1219: self->pt_early = NULL;
1.49 ad 1220:
1221: return rv;
1222: }
1223:
1224: void
1.101 ad 1225: pthread__unpark(pthread_queue_t *queue, pthread_t self,
1226: pthread_mutex_t *interlock)
1.49 ad 1227: {
1.101 ad 1228: pthread_t target;
1.105 matt 1229: u_int max;
1230: size_t nwaiters;
1.49 ad 1231:
1.101 ad 1232: max = pthread__unpark_max;
1233: nwaiters = self->pt_nwaiters;
1234: target = PTQ_FIRST(queue);
1235: if (nwaiters == max) {
1236: /* Overflow. */
1237: (void)_lwp_unpark_all(self->pt_waiters, nwaiters,
1238: __UNVOLATILE(&interlock->ptm_waiters));
1239: nwaiters = 0;
1.67 ad 1240: }
1241: target->pt_sleepobj = NULL;
1.101 ad 1242: self->pt_waiters[nwaiters++] = target->pt_lid;
1243: PTQ_REMOVE(queue, target, pt_sleep);
1244: self->pt_nwaiters = nwaiters;
1245: pthread__mutex_deferwake(self, interlock);
1.49 ad 1246: }
1247:
1248: void
1.101 ad 1249: pthread__unpark_all(pthread_queue_t *queue, pthread_t self,
1250: pthread_mutex_t *interlock)
1.49 ad 1251: {
1.101 ad 1252: pthread_t target;
1.105 matt 1253: u_int max;
1254: size_t nwaiters;
1.52 ad 1255:
1.101 ad 1256: max = pthread__unpark_max;
1257: nwaiters = self->pt_nwaiters;
1258: PTQ_FOREACH(target, queue, pt_sleep) {
1259: if (nwaiters == max) {
1260: /* Overflow. */
1261: (void)_lwp_unpark_all(self->pt_waiters, nwaiters,
1262: __UNVOLATILE(&interlock->ptm_waiters));
1263: nwaiters = 0;
1.49 ad 1264: }
1.101 ad 1265: target->pt_sleepobj = NULL;
1266: self->pt_waiters[nwaiters++] = target->pt_lid;
1.49 ad 1267: }
1.101 ad 1268: self->pt_nwaiters = nwaiters;
1269: PTQ_INIT(queue);
1270: pthread__mutex_deferwake(self, interlock);
1.49 ad 1271: }
1272:
1273: #undef OOPS
1.76 ad 1274:
1.126 joerg 1275: static void
1276: pthread__initmainstack(void)
1.76 ad 1277: {
1.126 joerg 1278: struct rlimit slimit;
1279: const AuxInfo *aux;
1.148 joerg 1280: size_t size, len;
1281: int mib[2];
1282: unsigned int value;
1.76 ad 1283:
1.126 joerg 1284: _DIAGASSERT(_dlauxinfo() != NULL);
1.76 ad 1285:
1.126 joerg 1286: if (getrlimit(RLIMIT_STACK, &slimit) == -1)
1.146 manu 1287: err(EXIT_FAILURE,
1288: "Couldn't get stack resource consumption limits");
1.127 joerg 1289: size = slimit.rlim_cur;
1.146 manu 1290: pthread__main->pt_stack.ss_size = size;
1.148 joerg 1291: pthread__main->pt_guardsize = pthread__pagesize;
1292:
1293: mib[0] = CTL_VM;
1294: mib[1] = VM_GUARD_SIZE;
1295: len = sizeof(value);
1296: if (sysctl(mib, __arraycount(mib), &value, &len, NULL, 0) == 0)
1297: pthread__main->pt_guardsize = value;
1.76 ad 1298:
1.126 joerg 1299: for (aux = _dlauxinfo(); aux->a_type != AT_NULL; ++aux) {
1300: if (aux->a_type == AT_STACKBASE) {
1.127 joerg 1301: #ifdef __MACHINE_STACK_GROWS_UP
1.146 manu 1302: pthread__main->pt_stack.ss_sp = (void *)aux->a_v;
1.127 joerg 1303: #else
1.146 manu 1304: pthread__main->pt_stack.ss_sp = (char *)aux->a_v - size;
1.127 joerg 1305: #endif
1.126 joerg 1306: break;
1307: }
1308: }
1.153 christos 1309: pthread__copy_tsd(pthread__main);
1.76 ad 1310: }
1311:
1312: /*
1313: * Set up the slightly special stack for the "initial" thread, which
1314: * runs on the normal system stack, and thus gets slightly different
1315: * treatment.
1316: */
1317: static void
1318: pthread__initmain(pthread_t *newt)
1319: {
1320: char *value;
1321:
1.126 joerg 1322: pthread__initmainstack();
1.87 ad 1323:
1324: value = pthread__getenv("PTHREAD_STACKSIZE");
1325: if (value != NULL) {
1.76 ad 1326: pthread__stacksize = atoi(value) * 1024;
1.146 manu 1327: if (pthread__stacksize > pthread__main->pt_stack.ss_size)
1328: pthread__stacksize = pthread__main->pt_stack.ss_size;
1.76 ad 1329: }
1330: if (pthread__stacksize == 0)
1.146 manu 1331: pthread__stacksize = pthread__main->pt_stack.ss_size;
1.126 joerg 1332: pthread__stacksize += pthread__pagesize - 1;
1.129 drochner 1333: pthread__stacksize &= ~(pthread__pagesize - 1);
1.126 joerg 1334: if (pthread__stacksize < 4 * pthread__pagesize)
1.76 ad 1335: errx(1, "Stacksize limit is too low, minimum %zd kbyte.",
1.126 joerg 1336: 4 * pthread__pagesize / 1024);
1.76 ad 1337:
1.146 manu 1338: *newt = pthread__main;
1.145 pooka 1339: #if defined(_PTHREAD_GETTCB_EXT)
1.146 manu 1340: pthread__main->pt_tls = _PTHREAD_GETTCB_EXT();
1.145 pooka 1341: #elif defined(__HAVE___LWP_GETTCB_FAST)
1.146 manu 1342: pthread__main->pt_tls = __lwp_gettcb_fast();
1.135 joerg 1343: #else
1.146 manu 1344: pthread__main->pt_tls = _lwp_getprivate();
1.135 joerg 1345: #endif
1.146 manu 1346: pthread__main->pt_tls->tcb_pthread = pthread__main;
1.76 ad 1347: }
1.84 ad 1348:
1.139 rmind 1349: static signed int
1.141 christos 1350: /*ARGSUSED*/
1.139 rmind 1351: pthread__cmp(void *ctx, const void *n1, const void *n2)
1.84 ad 1352: {
1.140 apb 1353: const uintptr_t p1 = (const uintptr_t)n1;
1354: const uintptr_t p2 = (const uintptr_t)n2;
1.109 drochner 1355:
1.139 rmind 1356: if (p1 < p2)
1357: return -1;
1358: if (p1 > p2)
1.109 drochner 1359: return 1;
1.139 rmind 1360: return 0;
1.84 ad 1361: }
1362:
1.87 ad 1363: /* Because getenv() wants to use locks. */
1364: char *
1365: pthread__getenv(const char *name)
1366: {
1.117 tron 1367: extern char **environ;
1368: size_t l_name, offset;
1.87 ad 1369:
1.149 joerg 1370: if (issetugid())
1371: return (NULL);
1372:
1.117 tron 1373: l_name = strlen(name);
1374: for (offset = 0; environ[offset] != NULL; offset++) {
1375: if (strncmp(name, environ[offset], l_name) == 0 &&
1376: environ[offset][l_name] == '=') {
1377: return environ[offset] + l_name + 1;
1378: }
1379: }
1380:
1381: return NULL;
1.87 ad 1382: }
1383:
1.101 ad 1384: pthread_mutex_t *
1385: pthread__hashlock(volatile const void *p)
1386: {
1387: uintptr_t v;
1.87 ad 1388:
1.101 ad 1389: v = (uintptr_t)p;
1390: return &hashlocks[((v >> 9) ^ (v >> 3)) & (NHASHLOCK - 1)].mutex;
1391: }
1.103 ad 1392:
1393: int
1394: pthread__checkpri(int pri)
1395: {
1.105 matt 1396: static int havepri;
1397: static long min, max;
1.103 ad 1398:
1399: if (!havepri) {
1400: min = sysconf(_SC_SCHED_PRI_MIN);
1401: max = sysconf(_SC_SCHED_PRI_MAX);
1402: havepri = 1;
1403: }
1404: return (pri < min || pri > max) ? EINVAL : 0;
1405: }
CVSweb <webmaster@jp.NetBSD.org>