Annotation of src/sys/kern/kern_lwp.c, Revision 1.152
1.152 ! rmind 1: /* $NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $ */
1.2 thorpej 2:
3: /*-
1.127 ad 4: * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
1.2 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.52 ad 8: * by Nathan J. Williams, and Andrew Doran.
1.2 thorpej 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
1.9 lukem 31:
1.52 ad 32: /*
33: * Overview
34: *
1.66 ad 35: * Lightweight processes (LWPs) are the basic unit or thread of
1.52 ad 36: * execution within the kernel. The core state of an LWP is described
1.66 ad 37: * by "struct lwp", also known as lwp_t.
1.52 ad 38: *
39: * Each LWP is contained within a process (described by "struct proc"),
40: * Every process contains at least one LWP, but may contain more. The
41: * process describes attributes shared among all of its LWPs such as a
42: * private address space, global execution state (stopped, active,
43: * zombie, ...), signal disposition and so on. On a multiprocessor
1.66 ad 44: * machine, multiple LWPs be executing concurrently in the kernel.
1.52 ad 45: *
46: * Execution states
47: *
48: * At any given time, an LWP has overall state that is described by
49: * lwp::l_stat. The states are broken into two sets below. The first
50: * set is guaranteed to represent the absolute, current state of the
51: * LWP:
1.101 rmind 52: *
53: * LSONPROC
54: *
55: * On processor: the LWP is executing on a CPU, either in the
56: * kernel or in user space.
57: *
58: * LSRUN
59: *
60: * Runnable: the LWP is parked on a run queue, and may soon be
61: * chosen to run by an idle processor, or by a processor that
62: * has been asked to preempt a currently runnning but lower
1.134 rmind 63: * priority LWP.
1.101 rmind 64: *
65: * LSIDL
66: *
67: * Idle: the LWP has been created but has not yet executed,
1.66 ad 68: * or it has ceased executing a unit of work and is waiting
69: * to be started again.
1.101 rmind 70: *
71: * LSSUSPENDED:
72: *
73: * Suspended: the LWP has had its execution suspended by
1.52 ad 74: * another LWP in the same process using the _lwp_suspend()
75: * system call. User-level LWPs also enter the suspended
76: * state when the system is shutting down.
77: *
78: * The second set represent a "statement of intent" on behalf of the
79: * LWP. The LWP may in fact be executing on a processor, may be
1.66 ad 80: * sleeping or idle. It is expected to take the necessary action to
1.101 rmind 81: * stop executing or become "running" again within a short timeframe.
1.115 ad 82: * The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
1.101 rmind 83: * Importantly, it indicates that its state is tied to a CPU.
84: *
85: * LSZOMB:
86: *
87: * Dead or dying: the LWP has released most of its resources
1.129 ad 88: * and is about to switch away into oblivion, or has already
1.66 ad 89: * switched away. When it switches away, its few remaining
90: * resources can be collected.
1.101 rmind 91: *
92: * LSSLEEP:
93: *
94: * Sleeping: the LWP has entered itself onto a sleep queue, and
95: * has switched away or will switch away shortly to allow other
1.66 ad 96: * LWPs to run on the CPU.
1.101 rmind 97: *
98: * LSSTOP:
99: *
100: * Stopped: the LWP has been stopped as a result of a job
101: * control signal, or as a result of the ptrace() interface.
102: *
103: * Stopped LWPs may run briefly within the kernel to handle
104: * signals that they receive, but will not return to user space
105: * until their process' state is changed away from stopped.
106: *
107: * Single LWPs within a process can not be set stopped
108: * selectively: all actions that can stop or continue LWPs
109: * occur at the process level.
110: *
1.52 ad 111: * State transitions
112: *
1.66 ad 113: * Note that the LSSTOP state may only be set when returning to
114: * user space in userret(), or when sleeping interruptably. The
115: * LSSUSPENDED state may only be set in userret(). Before setting
116: * those states, we try to ensure that the LWPs will release all
117: * locks that they hold, and at a minimum try to ensure that the
118: * LWP can be set runnable again by a signal.
1.52 ad 119: *
120: * LWPs may transition states in the following ways:
121: *
122: * RUN -------> ONPROC ONPROC -----> RUN
1.129 ad 123: * > SLEEP
124: * > STOPPED
1.52 ad 125: * > SUSPENDED
126: * > ZOMB
1.129 ad 127: * > IDL (special cases)
1.52 ad 128: *
129: * STOPPED ---> RUN SUSPENDED --> RUN
1.129 ad 130: * > SLEEP
1.52 ad 131: *
132: * SLEEP -----> ONPROC IDL --------> RUN
1.101 rmind 133: * > RUN > SUSPENDED
134: * > STOPPED > STOPPED
1.129 ad 135: * > ONPROC (special cases)
1.52 ad 136: *
1.129 ad 137: * Some state transitions are only possible with kernel threads (eg
138: * ONPROC -> IDL) and happen under tightly controlled circumstances
139: * free of unwanted side effects.
1.66 ad 140: *
1.114 rmind 141: * Migration
142: *
143: * Migration of threads from one CPU to another could be performed
144: * internally by the scheduler via sched_takecpu() or sched_catchlwp()
145: * functions. The universal lwp_migrate() function should be used for
146: * any other cases. Subsystems in the kernel must be aware that CPU
147: * of LWP may change, while it is not locked.
148: *
1.52 ad 149: * Locking
150: *
151: * The majority of fields in 'struct lwp' are covered by a single,
1.66 ad 152: * general spin lock pointed to by lwp::l_mutex. The locks covering
1.52 ad 153: * each field are documented in sys/lwp.h.
154: *
1.66 ad 155: * State transitions must be made with the LWP's general lock held,
1.152 ! rmind 156: * and may cause the LWP's lock pointer to change. Manipulation of
1.66 ad 157: * the general lock is not performed directly, but through calls to
1.152 ! rmind 158: * lwp_lock(), lwp_unlock() and others. It should be noted that the
! 159: * adaptive locks are not allowed to be released while the LWP's lock
! 160: * is being held (unlike for other spin-locks).
1.52 ad 161: *
162: * States and their associated locks:
163: *
1.74 rmind 164: * LSONPROC, LSZOMB:
1.52 ad 165: *
1.64 yamt 166: * Always covered by spc_lwplock, which protects running LWPs.
1.129 ad 167: * This is a per-CPU lock and matches lwp::l_cpu.
1.52 ad 168: *
1.74 rmind 169: * LSIDL, LSRUN:
1.52 ad 170: *
1.64 yamt 171: * Always covered by spc_mutex, which protects the run queues.
1.129 ad 172: * This is a per-CPU lock and matches lwp::l_cpu.
1.52 ad 173: *
174: * LSSLEEP:
175: *
1.66 ad 176: * Covered by a lock associated with the sleep queue that the
1.129 ad 177: * LWP resides on. Matches lwp::l_sleepq::sq_mutex.
1.52 ad 178: *
179: * LSSTOP, LSSUSPENDED:
1.101 rmind 180: *
1.52 ad 181: * If the LWP was previously sleeping (l_wchan != NULL), then
1.66 ad 182: * l_mutex references the sleep queue lock. If the LWP was
1.52 ad 183: * runnable or on the CPU when halted, or has been removed from
1.66 ad 184: * the sleep queue since halted, then the lock is spc_lwplock.
1.52 ad 185: *
186: * The lock order is as follows:
187: *
1.64 yamt 188: * spc::spc_lwplock ->
1.112 ad 189: * sleeptab::st_mutex ->
1.64 yamt 190: * tschain_t::tc_mutex ->
191: * spc::spc_mutex
1.52 ad 192: *
1.103 ad 193: * Each process has an scheduler state lock (proc::p_lock), and a
1.52 ad 194: * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
195: * so on. When an LWP is to be entered into or removed from one of the
1.103 ad 196: * following states, p_lock must be held and the process wide counters
1.52 ad 197: * adjusted:
198: *
199: * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
200: *
1.129 ad 201: * (But not always for kernel threads. There are some special cases
202: * as mentioned above. See kern_softint.c.)
203: *
1.52 ad 204: * Note that an LWP is considered running or likely to run soon if in
205: * one of the following states. This affects the value of p_nrlwps:
206: *
207: * LSRUN, LSONPROC, LSSLEEP
208: *
1.103 ad 209: * p_lock does not need to be held when transitioning among these
1.129 ad 210: * three states, hence p_lock is rarely taken for state transitions.
1.52 ad 211: */
212:
1.9 lukem 213: #include <sys/cdefs.h>
1.152 ! rmind 214: __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.151 2010/07/07 01:30:37 chs Exp $");
1.8 martin 215:
1.84 yamt 216: #include "opt_ddb.h"
1.52 ad 217: #include "opt_lockdebug.h"
1.124 wrstuden 218: #include "opt_sa.h"
1.139 darran 219: #include "opt_dtrace.h"
1.2 thorpej 220:
1.47 hannken 221: #define _LWP_API_PRIVATE
222:
1.2 thorpej 223: #include <sys/param.h>
224: #include <sys/systm.h>
1.64 yamt 225: #include <sys/cpu.h>
1.2 thorpej 226: #include <sys/pool.h>
227: #include <sys/proc.h>
1.124 wrstuden 228: #include <sys/sa.h>
229: #include <sys/savar.h>
1.2 thorpej 230: #include <sys/syscallargs.h>
1.57 dsl 231: #include <sys/syscall_stats.h>
1.37 ad 232: #include <sys/kauth.h>
1.52 ad 233: #include <sys/sleepq.h>
234: #include <sys/lockdebug.h>
235: #include <sys/kmem.h>
1.91 rmind 236: #include <sys/pset.h>
1.75 ad 237: #include <sys/intr.h>
1.78 ad 238: #include <sys/lwpctl.h>
1.81 ad 239: #include <sys/atomic.h>
1.131 ad 240: #include <sys/filedesc.h>
1.138 darran 241: #include <sys/dtrace_bsd.h>
1.141 darran 242: #include <sys/sdt.h>
1.138 darran 243:
1.2 thorpej 244: #include <uvm/uvm_extern.h>
1.80 skrll 245: #include <uvm/uvm_object.h>
1.2 thorpej 246:
1.152 ! rmind 247: static pool_cache_t lwp_cache __read_mostly;
! 248: struct lwplist alllwp __cacheline_aligned;
1.41 thorpej 249:
1.141 darran 250: /* DTrace proc provider probes */
251: SDT_PROBE_DEFINE(proc,,,lwp_create,
252: "struct lwp *", NULL,
253: NULL, NULL, NULL, NULL,
254: NULL, NULL, NULL, NULL);
255: SDT_PROBE_DEFINE(proc,,,lwp_start,
256: "struct lwp *", NULL,
257: NULL, NULL, NULL, NULL,
258: NULL, NULL, NULL, NULL);
259: SDT_PROBE_DEFINE(proc,,,lwp_exit,
260: "struct lwp *", NULL,
261: NULL, NULL, NULL, NULL,
262: NULL, NULL, NULL, NULL);
263:
1.147 pooka 264: struct turnstile turnstile0;
265: struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
266: #ifdef LWP0_CPU_INFO
267: .l_cpu = LWP0_CPU_INFO,
268: #endif
269: .l_proc = &proc0,
270: .l_lid = 1,
271: .l_flag = LW_SYSTEM,
272: .l_stat = LSONPROC,
273: .l_ts = &turnstile0,
274: .l_syncobj = &sched_syncobj,
275: .l_refcnt = 1,
276: .l_priority = PRI_USER + NPRI_USER - 1,
277: .l_inheritedprio = -1,
278: .l_class = SCHED_OTHER,
279: .l_psid = PS_NONE,
280: .l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
281: .l_name = __UNCONST("swapper"),
282: .l_fd = &filedesc0,
283: };
284:
1.41 thorpej 285: void
286: lwpinit(void)
287: {
288:
1.152 ! rmind 289: LIST_INIT(&alllwp);
1.144 pooka 290: lwpinit_specificdata();
1.52 ad 291: lwp_sys_init();
1.87 ad 292: lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
293: "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
1.41 thorpej 294: }
295:
1.147 pooka 296: void
297: lwp0_init(void)
298: {
299: struct lwp *l = &lwp0;
300:
301: KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
1.148 pooka 302: KASSERT(l->l_lid == proc0.p_nlwpid);
1.147 pooka 303:
304: LIST_INSERT_HEAD(&alllwp, l, l_list);
305:
306: callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
307: callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
308: cv_init(&l->l_sigcv, "sigwait");
309:
310: kauth_cred_hold(proc0.p_cred);
311: l->l_cred = proc0.p_cred;
312:
313: lwp_initspecific(l);
314:
315: SYSCALL_TIME_LWP_INIT(l);
316: }
317:
1.52 ad 318: /*
319: * Set an suspended.
320: *
1.103 ad 321: * Must be called with p_lock held, and the LWP locked. Will unlock the
1.52 ad 322: * LWP before return.
323: */
1.2 thorpej 324: int
1.52 ad 325: lwp_suspend(struct lwp *curl, struct lwp *t)
1.2 thorpej 326: {
1.52 ad 327: int error;
1.2 thorpej 328:
1.103 ad 329: KASSERT(mutex_owned(t->l_proc->p_lock));
1.63 ad 330: KASSERT(lwp_locked(t, NULL));
1.33 chs 331:
1.52 ad 332: KASSERT(curl != t || curl->l_stat == LSONPROC);
1.2 thorpej 333:
1.52 ad 334: /*
335: * If the current LWP has been told to exit, we must not suspend anyone
336: * else or deadlock could occur. We won't return to userspace.
1.2 thorpej 337: */
1.109 rmind 338: if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
1.52 ad 339: lwp_unlock(t);
340: return (EDEADLK);
1.2 thorpej 341: }
342:
1.52 ad 343: error = 0;
1.2 thorpej 344:
1.52 ad 345: switch (t->l_stat) {
346: case LSRUN:
347: case LSONPROC:
1.56 pavel 348: t->l_flag |= LW_WSUSPEND;
1.52 ad 349: lwp_need_userret(t);
350: lwp_unlock(t);
351: break;
1.2 thorpej 352:
1.52 ad 353: case LSSLEEP:
1.56 pavel 354: t->l_flag |= LW_WSUSPEND;
1.2 thorpej 355:
356: /*
1.52 ad 357: * Kick the LWP and try to get it to the kernel boundary
358: * so that it will release any locks that it holds.
359: * setrunnable() will release the lock.
1.2 thorpej 360: */
1.56 pavel 361: if ((t->l_flag & LW_SINTR) != 0)
1.52 ad 362: setrunnable(t);
363: else
364: lwp_unlock(t);
365: break;
1.2 thorpej 366:
1.52 ad 367: case LSSUSPENDED:
368: lwp_unlock(t);
369: break;
1.17 manu 370:
1.52 ad 371: case LSSTOP:
1.56 pavel 372: t->l_flag |= LW_WSUSPEND;
1.52 ad 373: setrunnable(t);
374: break;
1.2 thorpej 375:
1.52 ad 376: case LSIDL:
377: case LSZOMB:
378: error = EINTR; /* It's what Solaris does..... */
379: lwp_unlock(t);
380: break;
1.2 thorpej 381: }
382:
1.69 rmind 383: return (error);
1.2 thorpej 384: }
385:
1.52 ad 386: /*
387: * Restart a suspended LWP.
388: *
1.103 ad 389: * Must be called with p_lock held, and the LWP locked. Will unlock the
1.52 ad 390: * LWP before return.
391: */
1.2 thorpej 392: void
393: lwp_continue(struct lwp *l)
394: {
395:
1.103 ad 396: KASSERT(mutex_owned(l->l_proc->p_lock));
1.63 ad 397: KASSERT(lwp_locked(l, NULL));
1.52 ad 398:
399: /* If rebooting or not suspended, then just bail out. */
1.56 pavel 400: if ((l->l_flag & LW_WREBOOT) != 0) {
1.52 ad 401: lwp_unlock(l);
1.2 thorpej 402: return;
1.10 fvdl 403: }
1.2 thorpej 404:
1.56 pavel 405: l->l_flag &= ~LW_WSUSPEND;
1.2 thorpej 406:
1.52 ad 407: if (l->l_stat != LSSUSPENDED) {
408: lwp_unlock(l);
409: return;
1.2 thorpej 410: }
411:
1.52 ad 412: /* setrunnable() will release the lock. */
413: setrunnable(l);
1.2 thorpej 414: }
415:
1.52 ad 416: /*
1.142 christos 417: * Restart a stopped LWP.
418: *
419: * Must be called with p_lock held, and the LWP NOT locked. Will unlock the
420: * LWP before return.
421: */
422: void
423: lwp_unstop(struct lwp *l)
424: {
425: struct proc *p = l->l_proc;
426:
427: KASSERT(mutex_owned(proc_lock));
428: KASSERT(mutex_owned(p->p_lock));
429:
430: lwp_lock(l);
431:
432: /* If not stopped, then just bail out. */
433: if (l->l_stat != LSSTOP) {
434: lwp_unlock(l);
435: return;
436: }
437:
438: p->p_stat = SACTIVE;
439: p->p_sflag &= ~PS_STOPPING;
440:
441: if (!p->p_waited)
442: p->p_pptr->p_nstopchild--;
443:
444: if (l->l_wchan == NULL) {
445: /* setrunnable() will release the lock. */
446: setrunnable(l);
447: } else {
448: l->l_stat = LSSLEEP;
449: p->p_nrlwps++;
450: lwp_unlock(l);
451: }
452: }
453:
454: /*
1.52 ad 455: * Wait for an LWP within the current process to exit. If 'lid' is
456: * non-zero, we are waiting for a specific LWP.
457: *
1.103 ad 458: * Must be called with p->p_lock held.
1.52 ad 459: */
1.2 thorpej 460: int
461: lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
462: {
463: struct proc *p = l->l_proc;
1.52 ad 464: struct lwp *l2;
465: int nfound, error;
1.63 ad 466: lwpid_t curlid;
467: bool exiting;
1.2 thorpej 468:
1.103 ad 469: KASSERT(mutex_owned(p->p_lock));
1.52 ad 470:
471: p->p_nlwpwait++;
1.63 ad 472: l->l_waitingfor = lid;
473: curlid = l->l_lid;
474: exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
1.52 ad 475:
476: for (;;) {
477: /*
478: * Avoid a race between exit1() and sigexit(): if the
479: * process is dumping core, then we need to bail out: call
480: * into lwp_userret() where we will be suspended until the
481: * deed is done.
482: */
483: if ((p->p_sflag & PS_WCORE) != 0) {
1.103 ad 484: mutex_exit(p->p_lock);
1.52 ad 485: lwp_userret(l);
486: #ifdef DIAGNOSTIC
487: panic("lwp_wait1");
488: #endif
489: /* NOTREACHED */
490: }
491:
492: /*
493: * First off, drain any detached LWP that is waiting to be
494: * reaped.
495: */
496: while ((l2 = p->p_zomblwp) != NULL) {
497: p->p_zomblwp = NULL;
1.63 ad 498: lwp_free(l2, false, false);/* releases proc mutex */
1.103 ad 499: mutex_enter(p->p_lock);
1.52 ad 500: }
501:
502: /*
503: * Now look for an LWP to collect. If the whole process is
504: * exiting, count detached LWPs as eligible to be collected,
505: * but don't drain them here.
506: */
507: nfound = 0;
1.63 ad 508: error = 0;
1.52 ad 509: LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1.63 ad 510: /*
511: * If a specific wait and the target is waiting on
512: * us, then avoid deadlock. This also traps LWPs
513: * that try to wait on themselves.
514: *
515: * Note that this does not handle more complicated
516: * cycles, like: t1 -> t2 -> t3 -> t1. The process
517: * can still be killed so it is not a major problem.
518: */
519: if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
520: error = EDEADLK;
521: break;
522: }
523: if (l2 == l)
1.52 ad 524: continue;
525: if ((l2->l_prflag & LPR_DETACHED) != 0) {
1.63 ad 526: nfound += exiting;
527: continue;
528: }
529: if (lid != 0) {
530: if (l2->l_lid != lid)
531: continue;
532: /*
533: * Mark this LWP as the first waiter, if there
534: * is no other.
535: */
536: if (l2->l_waiter == 0)
537: l2->l_waiter = curlid;
538: } else if (l2->l_waiter != 0) {
539: /*
540: * It already has a waiter - so don't
541: * collect it. If the waiter doesn't
542: * grab it we'll get another chance
543: * later.
544: */
545: nfound++;
1.52 ad 546: continue;
547: }
548: nfound++;
1.2 thorpej 549:
1.52 ad 550: /* No need to lock the LWP in order to see LSZOMB. */
551: if (l2->l_stat != LSZOMB)
552: continue;
1.2 thorpej 553:
1.63 ad 554: /*
555: * We're no longer waiting. Reset the "first waiter"
556: * pointer on the target, in case it was us.
557: */
558: l->l_waitingfor = 0;
559: l2->l_waiter = 0;
560: p->p_nlwpwait--;
1.2 thorpej 561: if (departed)
562: *departed = l2->l_lid;
1.75 ad 563: sched_lwp_collect(l2);
1.63 ad 564:
565: /* lwp_free() releases the proc lock. */
566: lwp_free(l2, false, false);
1.103 ad 567: mutex_enter(p->p_lock);
1.52 ad 568: return 0;
569: }
1.2 thorpej 570:
1.63 ad 571: if (error != 0)
572: break;
1.52 ad 573: if (nfound == 0) {
574: error = ESRCH;
575: break;
576: }
1.63 ad 577:
578: /*
579: * The kernel is careful to ensure that it can not deadlock
580: * when exiting - just keep waiting.
581: */
582: if (exiting) {
1.52 ad 583: KASSERT(p->p_nlwps > 1);
1.103 ad 584: cv_wait(&p->p_lwpcv, p->p_lock);
1.52 ad 585: continue;
586: }
1.63 ad 587:
588: /*
589: * If all other LWPs are waiting for exits or suspends
590: * and the supply of zombies and potential zombies is
591: * exhausted, then we are about to deadlock.
592: *
593: * If the process is exiting (and this LWP is not the one
594: * that is coordinating the exit) then bail out now.
595: */
1.52 ad 596: if ((p->p_sflag & PS_WEXIT) != 0 ||
1.63 ad 597: p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
1.52 ad 598: error = EDEADLK;
599: break;
1.2 thorpej 600: }
1.63 ad 601:
602: /*
603: * Sit around and wait for something to happen. We'll be
604: * awoken if any of the conditions examined change: if an
605: * LWP exits, is collected, or is detached.
606: */
1.103 ad 607: if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
1.52 ad 608: break;
1.2 thorpej 609: }
610:
1.63 ad 611: /*
612: * We didn't find any LWPs to collect, we may have received a
613: * signal, or some other condition has caused us to bail out.
614: *
615: * If waiting on a specific LWP, clear the waiters marker: some
616: * other LWP may want it. Then, kick all the remaining waiters
617: * so that they can re-check for zombies and for deadlock.
618: */
619: if (lid != 0) {
620: LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
621: if (l2->l_lid == lid) {
622: if (l2->l_waiter == curlid)
623: l2->l_waiter = 0;
624: break;
625: }
626: }
627: }
1.52 ad 628: p->p_nlwpwait--;
1.63 ad 629: l->l_waitingfor = 0;
630: cv_broadcast(&p->p_lwpcv);
631:
1.52 ad 632: return error;
1.2 thorpej 633: }
634:
1.52 ad 635: /*
636: * Create a new LWP within process 'p2', using LWP 'l1' as a template.
637: * The new LWP is created in state LSIDL and must be set running,
638: * suspended, or stopped by the caller.
639: */
1.2 thorpej 640: int
1.134 rmind 641: lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
1.75 ad 642: void *stack, size_t stacksize, void (*func)(void *), void *arg,
643: lwp_t **rnewlwpp, int sclass)
1.2 thorpej 644: {
1.52 ad 645: struct lwp *l2, *isfree;
646: turnstile_t *ts;
1.151 chs 647: lwpid_t lid;
1.2 thorpej 648:
1.107 ad 649: KASSERT(l1 == curlwp || l1->l_proc == &proc0);
650:
1.52 ad 651: /*
652: * First off, reap any detached LWP waiting to be collected.
653: * We can re-use its LWP structure and turnstile.
654: */
655: isfree = NULL;
656: if (p2->p_zomblwp != NULL) {
1.103 ad 657: mutex_enter(p2->p_lock);
1.52 ad 658: if ((isfree = p2->p_zomblwp) != NULL) {
659: p2->p_zomblwp = NULL;
1.63 ad 660: lwp_free(isfree, true, false);/* releases proc mutex */
1.52 ad 661: } else
1.103 ad 662: mutex_exit(p2->p_lock);
1.52 ad 663: }
664: if (isfree == NULL) {
1.87 ad 665: l2 = pool_cache_get(lwp_cache, PR_WAITOK);
1.52 ad 666: memset(l2, 0, sizeof(*l2));
1.76 ad 667: l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
1.60 yamt 668: SLIST_INIT(&l2->l_pi_lenders);
1.52 ad 669: } else {
670: l2 = isfree;
671: ts = l2->l_ts;
1.75 ad 672: KASSERT(l2->l_inheritedprio == -1);
1.60 yamt 673: KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
1.52 ad 674: memset(l2, 0, sizeof(*l2));
675: l2->l_ts = ts;
676: }
1.2 thorpej 677:
678: l2->l_stat = LSIDL;
679: l2->l_proc = p2;
1.52 ad 680: l2->l_refcnt = 1;
1.75 ad 681: l2->l_class = sclass;
1.116 ad 682:
683: /*
684: * If vfork(), we want the LWP to run fast and on the same CPU
685: * as its parent, so that it can reuse the VM context and cache
686: * footprint on the local CPU.
687: */
688: l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
1.82 ad 689: l2->l_kpribase = PRI_KERNEL;
1.52 ad 690: l2->l_priority = l1->l_priority;
1.75 ad 691: l2->l_inheritedprio = -1;
1.134 rmind 692: l2->l_flag = 0;
1.88 ad 693: l2->l_pflag = LP_MPSAFE;
1.131 ad 694: TAILQ_INIT(&l2->l_ld_locks);
695:
696: /*
697: * If not the first LWP in the process, grab a reference to the
698: * descriptor table.
699: */
1.97 ad 700: l2->l_fd = p2->p_fd;
1.131 ad 701: if (p2->p_nlwps != 0) {
702: KASSERT(l1->l_proc == p2);
1.136 rmind 703: fd_hold(l2);
1.131 ad 704: } else {
705: KASSERT(l1->l_proc != p2);
706: }
1.41 thorpej 707:
1.56 pavel 708: if (p2->p_flag & PK_SYSTEM) {
1.134 rmind 709: /* Mark it as a system LWP. */
1.56 pavel 710: l2->l_flag |= LW_SYSTEM;
1.52 ad 711: }
1.2 thorpej 712:
1.107 ad 713: kpreempt_disable();
714: l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
715: l2->l_cpu = l1->l_cpu;
716: kpreempt_enable();
717:
1.138 darran 718: kdtrace_thread_ctor(NULL, l2);
1.73 rmind 719: lwp_initspecific(l2);
1.75 ad 720: sched_lwp_fork(l1, l2);
1.37 ad 721: lwp_update_creds(l2);
1.70 ad 722: callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
723: callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
1.52 ad 724: cv_init(&l2->l_sigcv, "sigwait");
725: l2->l_syncobj = &sched_syncobj;
1.2 thorpej 726:
727: if (rnewlwpp != NULL)
728: *rnewlwpp = l2;
729:
1.137 rmind 730: uvm_lwp_setuarea(l2, uaddr);
1.2 thorpej 731: uvm_lwp_fork(l1, l2, stack, stacksize, func,
732: (arg != NULL) ? arg : l2);
733:
1.151 chs 734: if ((flags & LWP_PIDLID) != 0) {
735: lid = proc_alloc_pid(p2);
736: l2->l_pflag |= LP_PIDLID;
737: } else {
738: lid = 0;
739: }
740:
1.103 ad 741: mutex_enter(p2->p_lock);
1.52 ad 742:
743: if ((flags & LWP_DETACHED) != 0) {
744: l2->l_prflag = LPR_DETACHED;
745: p2->p_ndlwps++;
746: } else
747: l2->l_prflag = 0;
748:
749: l2->l_sigmask = l1->l_sigmask;
750: CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
751: sigemptyset(&l2->l_sigpend.sp_set);
752:
1.151 chs 753: if (lid == 0) {
1.53 yamt 754: p2->p_nlwpid++;
1.151 chs 755: if (p2->p_nlwpid == 0)
756: p2->p_nlwpid++;
757: lid = p2->p_nlwpid;
758: }
759: l2->l_lid = lid;
1.2 thorpej 760: LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
761: p2->p_nlwps++;
1.149 yamt 762: p2->p_nrlwps++;
1.2 thorpej 763:
1.91 rmind 764: if ((p2->p_flag & PK_SYSTEM) == 0) {
765: /* Inherit an affinity */
1.122 rmind 766: if (l1->l_flag & LW_AFFINITY) {
1.128 rmind 767: /*
768: * Note that we hold the state lock while inheriting
769: * the affinity to avoid race with sched_setaffinity().
770: */
771: lwp_lock(l1);
1.122 rmind 772: if (l1->l_flag & LW_AFFINITY) {
773: kcpuset_use(l1->l_affinity);
774: l2->l_affinity = l1->l_affinity;
775: l2->l_flag |= LW_AFFINITY;
776: }
1.128 rmind 777: lwp_unlock(l1);
1.117 christos 778: }
1.128 rmind 779: lwp_lock(l2);
780: /* Inherit a processor-set */
781: l2->l_psid = l1->l_psid;
1.91 rmind 782: /* Look for a CPU to start */
783: l2->l_cpu = sched_takecpu(l2);
784: lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
785: }
1.128 rmind 786: mutex_exit(p2->p_lock);
787:
1.141 darran 788: SDT_PROBE(proc,,,lwp_create, l2, 0,0,0,0);
789:
1.128 rmind 790: mutex_enter(proc_lock);
791: LIST_INSERT_HEAD(&alllwp, l2, l_list);
792: mutex_exit(proc_lock);
1.91 rmind 793:
1.57 dsl 794: SYSCALL_TIME_LWP_INIT(l2);
795:
1.16 manu 796: if (p2->p_emul->e_lwp_fork)
797: (*p2->p_emul->e_lwp_fork)(l1, l2);
798:
1.2 thorpej 799: return (0);
800: }
801:
802: /*
1.64 yamt 803: * Called by MD code when a new LWP begins execution. Must be called
804: * with the previous LWP locked (so at splsched), or if there is no
805: * previous LWP, at splsched.
806: */
807: void
808: lwp_startup(struct lwp *prev, struct lwp *new)
809: {
810:
1.141 darran 811: SDT_PROBE(proc,,,lwp_start, new, 0,0,0,0);
812:
1.107 ad 813: KASSERT(kpreempt_disabled());
1.64 yamt 814: if (prev != NULL) {
1.81 ad 815: /*
816: * Normalize the count of the spin-mutexes, it was
817: * increased in mi_switch(). Unmark the state of
818: * context switch - it is finished for previous LWP.
819: */
820: curcpu()->ci_mtx_count++;
821: membar_exit();
822: prev->l_ctxswtch = 0;
1.64 yamt 823: }
1.107 ad 824: KPREEMPT_DISABLE(new);
825: spl0();
1.105 ad 826: pmap_activate(new);
1.64 yamt 827: LOCKDEBUG_BARRIER(NULL, 0);
1.107 ad 828: KPREEMPT_ENABLE(new);
1.65 ad 829: if ((new->l_pflag & LP_MPSAFE) == 0) {
830: KERNEL_LOCK(1, new);
831: }
1.64 yamt 832: }
833:
834: /*
1.65 ad 835: * Exit an LWP.
1.2 thorpej 836: */
837: void
838: lwp_exit(struct lwp *l)
839: {
840: struct proc *p = l->l_proc;
1.52 ad 841: struct lwp *l2;
1.65 ad 842: bool current;
843:
844: current = (l == curlwp);
1.2 thorpej 845:
1.114 rmind 846: KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
1.131 ad 847: KASSERT(p == curproc);
1.2 thorpej 848:
1.141 darran 849: SDT_PROBE(proc,,,lwp_exit, l, 0,0,0,0);
850:
1.52 ad 851: /*
852: * Verify that we hold no locks other than the kernel lock.
853: */
854: LOCKDEBUG_BARRIER(&kernel_lock, 0);
1.16 manu 855:
1.2 thorpej 856: /*
1.52 ad 857: * If we are the last live LWP in a process, we need to exit the
858: * entire process. We do so with an exit status of zero, because
859: * it's a "controlled" exit, and because that's what Solaris does.
860: *
861: * We are not quite a zombie yet, but for accounting purposes we
862: * must increment the count of zombies here.
1.45 thorpej 863: *
864: * Note: the last LWP's specificdata will be deleted here.
1.2 thorpej 865: */
1.103 ad 866: mutex_enter(p->p_lock);
1.52 ad 867: if (p->p_nlwps - p->p_nzlwps == 1) {
1.65 ad 868: KASSERT(current == true);
1.88 ad 869: /* XXXSMP kernel_lock not held */
1.2 thorpej 870: exit1(l, 0);
1.19 jdolecek 871: /* NOTREACHED */
1.2 thorpej 872: }
1.52 ad 873: p->p_nzlwps++;
1.103 ad 874: mutex_exit(p->p_lock);
1.52 ad 875:
876: if (p->p_emul->e_lwp_exit)
877: (*p->p_emul->e_lwp_exit)(l);
1.2 thorpej 878:
1.131 ad 879: /* Drop filedesc reference. */
880: fd_free();
881:
1.45 thorpej 882: /* Delete the specificdata while it's still safe to sleep. */
1.145 pooka 883: lwp_finispecific(l);
1.45 thorpej 884:
1.52 ad 885: /*
886: * Release our cached credentials.
887: */
1.37 ad 888: kauth_cred_free(l->l_cred);
1.70 ad 889: callout_destroy(&l->l_timeout_ch);
1.65 ad 890:
891: /*
1.52 ad 892: * Remove the LWP from the global list.
1.151 chs 893: * Free its LID from the PID namespace if needed.
1.52 ad 894: */
1.102 ad 895: mutex_enter(proc_lock);
1.52 ad 896: LIST_REMOVE(l, l_list);
1.151 chs 897: if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) {
898: proc_free_pid(l->l_lid);
899: }
1.102 ad 900: mutex_exit(proc_lock);
1.19 jdolecek 901:
1.52 ad 902: /*
903: * Get rid of all references to the LWP that others (e.g. procfs)
904: * may have, and mark the LWP as a zombie. If the LWP is detached,
905: * mark it waiting for collection in the proc structure. Note that
906: * before we can do that, we need to free any other dead, deatched
907: * LWP waiting to meet its maker.
908: */
1.103 ad 909: mutex_enter(p->p_lock);
1.52 ad 910: lwp_drainrefs(l);
1.31 yamt 911:
1.52 ad 912: if ((l->l_prflag & LPR_DETACHED) != 0) {
913: while ((l2 = p->p_zomblwp) != NULL) {
914: p->p_zomblwp = NULL;
1.63 ad 915: lwp_free(l2, false, false);/* releases proc mutex */
1.103 ad 916: mutex_enter(p->p_lock);
1.72 ad 917: l->l_refcnt++;
918: lwp_drainrefs(l);
1.52 ad 919: }
920: p->p_zomblwp = l;
921: }
1.31 yamt 922:
1.52 ad 923: /*
924: * If we find a pending signal for the process and we have been
1.151 chs 925: * asked to check for signals, then we lose: arrange to have
1.52 ad 926: * all other LWPs in the process check for signals.
927: */
1.56 pavel 928: if ((l->l_flag & LW_PENDSIG) != 0 &&
1.52 ad 929: firstsig(&p->p_sigpend.sp_set) != 0) {
930: LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
931: lwp_lock(l2);
1.56 pavel 932: l2->l_flag |= LW_PENDSIG;
1.52 ad 933: lwp_unlock(l2);
934: }
1.31 yamt 935: }
936:
1.52 ad 937: lwp_lock(l);
938: l->l_stat = LSZOMB;
1.90 ad 939: if (l->l_name != NULL)
940: strcpy(l->l_name, "(zombie)");
1.128 rmind 941: if (l->l_flag & LW_AFFINITY) {
1.122 rmind 942: l->l_flag &= ~LW_AFFINITY;
1.128 rmind 943: } else {
944: KASSERT(l->l_affinity == NULL);
945: }
1.52 ad 946: lwp_unlock(l);
1.2 thorpej 947: p->p_nrlwps--;
1.52 ad 948: cv_broadcast(&p->p_lwpcv);
1.78 ad 949: if (l->l_lwpctl != NULL)
950: l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
1.103 ad 951: mutex_exit(p->p_lock);
1.52 ad 952:
1.122 rmind 953: /* Safe without lock since LWP is in zombie state */
954: if (l->l_affinity) {
955: kcpuset_unuse(l->l_affinity, NULL);
956: l->l_affinity = NULL;
957: }
958:
1.52 ad 959: /*
960: * We can no longer block. At this point, lwp_free() may already
961: * be gunning for us. On a multi-CPU system, we may be off p_lwps.
962: *
963: * Free MD LWP resources.
964: */
965: cpu_lwp_free(l, 0);
1.2 thorpej 966:
1.65 ad 967: if (current) {
968: pmap_deactivate(l);
969:
970: /*
971: * Release the kernel lock, and switch away into
972: * oblivion.
973: */
1.52 ad 974: #ifdef notyet
1.65 ad 975: /* XXXSMP hold in lwp_userret() */
976: KERNEL_UNLOCK_LAST(l);
1.52 ad 977: #else
1.65 ad 978: KERNEL_UNLOCK_ALL(l, NULL);
1.52 ad 979: #endif
1.65 ad 980: lwp_exit_switchaway(l);
981: }
1.2 thorpej 982: }
983:
1.52 ad 984: /*
985: * Free a dead LWP's remaining resources.
986: *
987: * XXXLWP limits.
988: */
989: void
1.63 ad 990: lwp_free(struct lwp *l, bool recycle, bool last)
1.52 ad 991: {
992: struct proc *p = l->l_proc;
1.100 ad 993: struct rusage *ru;
1.52 ad 994: ksiginfoq_t kq;
995:
1.92 yamt 996: KASSERT(l != curlwp);
997:
1.52 ad 998: /*
999: * If this was not the last LWP in the process, then adjust
1000: * counters and unlock.
1001: */
1002: if (!last) {
1003: /*
1004: * Add the LWP's run time to the process' base value.
1005: * This needs to co-incide with coming off p_lwps.
1006: */
1.86 yamt 1007: bintime_add(&p->p_rtime, &l->l_rtime);
1.64 yamt 1008: p->p_pctcpu += l->l_pctcpu;
1.100 ad 1009: ru = &p->p_stats->p_ru;
1010: ruadd(ru, &l->l_ru);
1011: ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
1012: ru->ru_nivcsw += l->l_nivcsw;
1.52 ad 1013: LIST_REMOVE(l, l_sibling);
1014: p->p_nlwps--;
1015: p->p_nzlwps--;
1016: if ((l->l_prflag & LPR_DETACHED) != 0)
1017: p->p_ndlwps--;
1.63 ad 1018:
1019: /*
1020: * Have any LWPs sleeping in lwp_wait() recheck for
1021: * deadlock.
1022: */
1023: cv_broadcast(&p->p_lwpcv);
1.103 ad 1024: mutex_exit(p->p_lock);
1.63 ad 1025: }
1.52 ad 1026:
1027: #ifdef MULTIPROCESSOR
1.63 ad 1028: /*
1029: * In the unlikely event that the LWP is still on the CPU,
1030: * then spin until it has switched away. We need to release
1031: * all locks to avoid deadlock against interrupt handlers on
1032: * the target CPU.
1033: */
1.115 ad 1034: if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
1.63 ad 1035: int count;
1.64 yamt 1036: (void)count; /* XXXgcc */
1.63 ad 1037: KERNEL_UNLOCK_ALL(curlwp, &count);
1.115 ad 1038: while ((l->l_pflag & LP_RUNNING) != 0 ||
1.64 yamt 1039: l->l_cpu->ci_curlwp == l)
1.63 ad 1040: SPINLOCK_BACKOFF_HOOK;
1041: KERNEL_LOCK(count, curlwp);
1042: }
1.52 ad 1043: #endif
1044:
1045: /*
1046: * Destroy the LWP's remaining signal information.
1047: */
1048: ksiginfo_queue_init(&kq);
1049: sigclear(&l->l_sigpend, NULL, &kq);
1050: ksiginfo_queue_drain(&kq);
1051: cv_destroy(&l->l_sigcv);
1.2 thorpej 1052:
1.19 jdolecek 1053: /*
1.52 ad 1054: * Free the LWP's turnstile and the LWP structure itself unless the
1.93 yamt 1055: * caller wants to recycle them. Also, free the scheduler specific
1056: * data.
1.52 ad 1057: *
1058: * We can't return turnstile0 to the pool (it didn't come from it),
1059: * so if it comes up just drop it quietly and move on.
1060: *
1061: * We don't recycle the VM resources at this time.
1.19 jdolecek 1062: */
1.78 ad 1063: if (l->l_lwpctl != NULL)
1064: lwp_ctl_free(l);
1.64 yamt 1065:
1.52 ad 1066: if (!recycle && l->l_ts != &turnstile0)
1.76 ad 1067: pool_cache_put(turnstile_cache, l->l_ts);
1.90 ad 1068: if (l->l_name != NULL)
1069: kmem_free(l->l_name, MAXCOMLEN);
1.135 rmind 1070:
1.52 ad 1071: cpu_lwp_free2(l);
1.19 jdolecek 1072: uvm_lwp_exit(l);
1.134 rmind 1073:
1.60 yamt 1074: KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
1.75 ad 1075: KASSERT(l->l_inheritedprio == -1);
1.138 darran 1076: kdtrace_thread_dtor(NULL, l);
1.52 ad 1077: if (!recycle)
1.87 ad 1078: pool_cache_put(lwp_cache, l);
1.2 thorpej 1079: }
1080:
1081: /*
1.91 rmind 1082: * Migrate the LWP to the another CPU. Unlocks the LWP.
1083: */
1084: void
1.114 rmind 1085: lwp_migrate(lwp_t *l, struct cpu_info *tci)
1.91 rmind 1086: {
1.114 rmind 1087: struct schedstate_percpu *tspc;
1.121 rmind 1088: int lstat = l->l_stat;
1089:
1.91 rmind 1090: KASSERT(lwp_locked(l, NULL));
1.114 rmind 1091: KASSERT(tci != NULL);
1092:
1.121 rmind 1093: /* If LWP is still on the CPU, it must be handled like LSONPROC */
1094: if ((l->l_pflag & LP_RUNNING) != 0) {
1095: lstat = LSONPROC;
1096: }
1097:
1.114 rmind 1098: /*
1099: * The destination CPU could be changed while previous migration
1100: * was not finished.
1101: */
1.121 rmind 1102: if (l->l_target_cpu != NULL) {
1.114 rmind 1103: l->l_target_cpu = tci;
1104: lwp_unlock(l);
1105: return;
1106: }
1.91 rmind 1107:
1.114 rmind 1108: /* Nothing to do if trying to migrate to the same CPU */
1109: if (l->l_cpu == tci) {
1.91 rmind 1110: lwp_unlock(l);
1111: return;
1112: }
1113:
1.114 rmind 1114: KASSERT(l->l_target_cpu == NULL);
1115: tspc = &tci->ci_schedstate;
1.121 rmind 1116: switch (lstat) {
1.91 rmind 1117: case LSRUN:
1.134 rmind 1118: l->l_target_cpu = tci;
1119: break;
1.91 rmind 1120: case LSIDL:
1.114 rmind 1121: l->l_cpu = tci;
1122: lwp_unlock_to(l, tspc->spc_mutex);
1.91 rmind 1123: return;
1124: case LSSLEEP:
1.114 rmind 1125: l->l_cpu = tci;
1.91 rmind 1126: break;
1127: case LSSTOP:
1128: case LSSUSPENDED:
1.114 rmind 1129: l->l_cpu = tci;
1130: if (l->l_wchan == NULL) {
1131: lwp_unlock_to(l, tspc->spc_lwplock);
1132: return;
1.91 rmind 1133: }
1.114 rmind 1134: break;
1.91 rmind 1135: case LSONPROC:
1.114 rmind 1136: l->l_target_cpu = tci;
1137: spc_lock(l->l_cpu);
1138: cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
1139: spc_unlock(l->l_cpu);
1.91 rmind 1140: break;
1141: }
1142: lwp_unlock(l);
1143: }
1144:
1145: /*
1.94 rmind 1146: * Find the LWP in the process. Arguments may be zero, in such case,
1147: * the calling process and first LWP in the list will be used.
1.103 ad 1148: * On success - returns proc locked.
1.91 rmind 1149: */
1150: struct lwp *
1151: lwp_find2(pid_t pid, lwpid_t lid)
1152: {
1153: proc_t *p;
1154: lwp_t *l;
1155:
1.150 rmind 1156: /* Find the process. */
1.94 rmind 1157: if (pid != 0) {
1.150 rmind 1158: mutex_enter(proc_lock);
1159: p = proc_find(pid);
1160: if (p == NULL) {
1161: mutex_exit(proc_lock);
1162: return NULL;
1163: }
1164: mutex_enter(p->p_lock);
1.102 ad 1165: mutex_exit(proc_lock);
1.150 rmind 1166: } else {
1167: p = curlwp->l_proc;
1168: mutex_enter(p->p_lock);
1169: }
1170: /* Find the thread. */
1171: if (lid != 0) {
1172: l = lwp_find(p, lid);
1173: } else {
1174: l = LIST_FIRST(&p->p_lwps);
1.94 rmind 1175: }
1.103 ad 1176: if (l == NULL) {
1177: mutex_exit(p->p_lock);
1178: }
1.91 rmind 1179: return l;
1180: }
1181:
1182: /*
1.151 chs 1183: * Look up a live LWP within the specified process, and return it locked.
1.52 ad 1184: *
1.103 ad 1185: * Must be called with p->p_lock held.
1.52 ad 1186: */
1187: struct lwp *
1.151 chs 1188: lwp_find(struct proc *p, lwpid_t id)
1.52 ad 1189: {
1190: struct lwp *l;
1191:
1.103 ad 1192: KASSERT(mutex_owned(p->p_lock));
1.52 ad 1193:
1194: LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1195: if (l->l_lid == id)
1196: break;
1197: }
1198:
1199: /*
1200: * No need to lock - all of these conditions will
1201: * be visible with the process level mutex held.
1202: */
1203: if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1204: l = NULL;
1205:
1206: return l;
1207: }
1208:
1209: /*
1.37 ad 1210: * Update an LWP's cached credentials to mirror the process' master copy.
1211: *
1212: * This happens early in the syscall path, on user trap, and on LWP
1213: * creation. A long-running LWP can also voluntarily choose to update
1214: * it's credentials by calling this routine. This may be called from
1215: * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1216: */
1217: void
1218: lwp_update_creds(struct lwp *l)
1219: {
1220: kauth_cred_t oc;
1221: struct proc *p;
1222:
1223: p = l->l_proc;
1224: oc = l->l_cred;
1225:
1.103 ad 1226: mutex_enter(p->p_lock);
1.37 ad 1227: kauth_cred_hold(p->p_cred);
1228: l->l_cred = p->p_cred;
1.98 ad 1229: l->l_prflag &= ~LPR_CRMOD;
1.103 ad 1230: mutex_exit(p->p_lock);
1.88 ad 1231: if (oc != NULL)
1.37 ad 1232: kauth_cred_free(oc);
1.52 ad 1233: }
1234:
1235: /*
1236: * Verify that an LWP is locked, and optionally verify that the lock matches
1237: * one we specify.
1238: */
1239: int
1240: lwp_locked(struct lwp *l, kmutex_t *mtx)
1241: {
1242: kmutex_t *cur = l->l_mutex;
1243:
1244: return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1245: }
1246:
1247: /*
1248: * Lend a new mutex to an LWP. The old mutex must be held.
1249: */
1250: void
1251: lwp_setlock(struct lwp *l, kmutex_t *new)
1252: {
1253:
1.63 ad 1254: KASSERT(mutex_owned(l->l_mutex));
1.52 ad 1255:
1.107 ad 1256: membar_exit();
1.52 ad 1257: l->l_mutex = new;
1258: }
1259:
1260: /*
1261: * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1262: * must be held.
1263: */
1264: void
1265: lwp_unlock_to(struct lwp *l, kmutex_t *new)
1266: {
1267: kmutex_t *old;
1268:
1.152 ! rmind 1269: KASSERT(lwp_locked(l, NULL));
1.52 ad 1270:
1271: old = l->l_mutex;
1.107 ad 1272: membar_exit();
1.52 ad 1273: l->l_mutex = new;
1274: mutex_spin_exit(old);
1275: }
1276:
1.60 yamt 1277: int
1278: lwp_trylock(struct lwp *l)
1279: {
1280: kmutex_t *old;
1281:
1282: for (;;) {
1283: if (!mutex_tryenter(old = l->l_mutex))
1284: return 0;
1285: if (__predict_true(l->l_mutex == old))
1286: return 1;
1287: mutex_spin_exit(old);
1288: }
1289: }
1290:
1.134 rmind 1291: void
1.96 ad 1292: lwp_unsleep(lwp_t *l, bool cleanup)
1293: {
1294:
1295: KASSERT(mutex_owned(l->l_mutex));
1.134 rmind 1296: (*l->l_syncobj->sobj_unsleep)(l, cleanup);
1.96 ad 1297: }
1298:
1.52 ad 1299: /*
1.56 pavel 1300: * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1.52 ad 1301: * set.
1302: */
1303: void
1304: lwp_userret(struct lwp *l)
1305: {
1306: struct proc *p;
1307: int sig;
1308:
1.114 rmind 1309: KASSERT(l == curlwp);
1310: KASSERT(l->l_stat == LSONPROC);
1.52 ad 1311: p = l->l_proc;
1312:
1.75 ad 1313: #ifndef __HAVE_FAST_SOFTINTS
1314: /* Run pending soft interrupts. */
1315: if (l->l_cpu->ci_data.cpu_softints != 0)
1316: softint_overlay();
1317: #endif
1318:
1.125 ad 1319: #ifdef KERN_SA
1320: /* Generate UNBLOCKED upcall if needed */
1321: if (l->l_flag & LW_SA_BLOCKING) {
1322: sa_unblock_userret(l);
1323: /* NOTREACHED */
1324: }
1325: #endif
1326:
1.52 ad 1327: /*
1328: * It should be safe to do this read unlocked on a multiprocessor
1329: * system..
1.126 wrstuden 1330: *
1331: * LW_SA_UPCALL will be handled after the while() loop, so don't
1332: * consider it now.
1.52 ad 1333: */
1.126 wrstuden 1334: while ((l->l_flag & (LW_USERRET & ~(LW_SA_UPCALL))) != 0) {
1.52 ad 1335: /*
1336: * Process pending signals first, unless the process
1.61 ad 1337: * is dumping core or exiting, where we will instead
1.101 rmind 1338: * enter the LW_WSUSPEND case below.
1.52 ad 1339: */
1.61 ad 1340: if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1341: LW_PENDSIG) {
1.103 ad 1342: mutex_enter(p->p_lock);
1.52 ad 1343: while ((sig = issignal(l)) != 0)
1344: postsig(sig);
1.103 ad 1345: mutex_exit(p->p_lock);
1.52 ad 1346: }
1347:
1348: /*
1349: * Core-dump or suspend pending.
1350: *
1351: * In case of core dump, suspend ourselves, so that the
1352: * kernel stack and therefore the userland registers saved
1353: * in the trapframe are around for coredump() to write them
1354: * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1355: * will write the core file out once all other LWPs are
1356: * suspended.
1357: */
1.56 pavel 1358: if ((l->l_flag & LW_WSUSPEND) != 0) {
1.103 ad 1359: mutex_enter(p->p_lock);
1.52 ad 1360: p->p_nrlwps--;
1361: cv_broadcast(&p->p_lwpcv);
1362: lwp_lock(l);
1363: l->l_stat = LSSUSPENDED;
1.104 ad 1364: lwp_unlock(l);
1.103 ad 1365: mutex_exit(p->p_lock);
1.104 ad 1366: lwp_lock(l);
1.64 yamt 1367: mi_switch(l);
1.52 ad 1368: }
1369:
1370: /* Process is exiting. */
1.56 pavel 1371: if ((l->l_flag & LW_WEXIT) != 0) {
1.52 ad 1372: lwp_exit(l);
1373: KASSERT(0);
1374: /* NOTREACHED */
1375: }
1376: }
1.124 wrstuden 1377:
1378: #ifdef KERN_SA
1379: /*
1380: * Timer events are handled specially. We only try once to deliver
1381: * pending timer upcalls; if if fails, we can try again on the next
1382: * loop around. If we need to re-enter lwp_userret(), MD code will
1383: * bounce us back here through the trap path after we return.
1384: */
1385: if (p->p_timerpend)
1386: timerupcall(l);
1.125 ad 1387: if (l->l_flag & LW_SA_UPCALL)
1388: sa_upcall_userret(l);
1.124 wrstuden 1389: #endif /* KERN_SA */
1.52 ad 1390: }
1391:
1392: /*
1393: * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1394: */
1395: void
1396: lwp_need_userret(struct lwp *l)
1397: {
1.63 ad 1398: KASSERT(lwp_locked(l, NULL));
1.52 ad 1399:
1400: /*
1401: * Since the tests in lwp_userret() are done unlocked, make sure
1402: * that the condition will be seen before forcing the LWP to enter
1403: * kernel mode.
1404: */
1.81 ad 1405: membar_producer();
1.52 ad 1406: cpu_signotify(l);
1407: }
1408:
1409: /*
1410: * Add one reference to an LWP. This will prevent the LWP from
1411: * exiting, thus keep the lwp structure and PCB around to inspect.
1412: */
1413: void
1414: lwp_addref(struct lwp *l)
1415: {
1416:
1.103 ad 1417: KASSERT(mutex_owned(l->l_proc->p_lock));
1.52 ad 1418: KASSERT(l->l_stat != LSZOMB);
1419: KASSERT(l->l_refcnt != 0);
1420:
1421: l->l_refcnt++;
1422: }
1423:
1424: /*
1425: * Remove one reference to an LWP. If this is the last reference,
1426: * then we must finalize the LWP's death.
1427: */
1428: void
1429: lwp_delref(struct lwp *l)
1430: {
1431: struct proc *p = l->l_proc;
1432:
1.103 ad 1433: mutex_enter(p->p_lock);
1.142 christos 1434: lwp_delref2(l);
1435: mutex_exit(p->p_lock);
1436: }
1437:
1438: /*
1439: * Remove one reference to an LWP. If this is the last reference,
1440: * then we must finalize the LWP's death. The proc mutex is held
1441: * on entry.
1442: */
1443: void
1444: lwp_delref2(struct lwp *l)
1445: {
1446: struct proc *p = l->l_proc;
1447:
1448: KASSERT(mutex_owned(p->p_lock));
1.72 ad 1449: KASSERT(l->l_stat != LSZOMB);
1450: KASSERT(l->l_refcnt > 0);
1.52 ad 1451: if (--l->l_refcnt == 0)
1.76 ad 1452: cv_broadcast(&p->p_lwpcv);
1.52 ad 1453: }
1454:
1455: /*
1456: * Drain all references to the current LWP.
1457: */
1458: void
1459: lwp_drainrefs(struct lwp *l)
1460: {
1461: struct proc *p = l->l_proc;
1462:
1.103 ad 1463: KASSERT(mutex_owned(p->p_lock));
1.52 ad 1464: KASSERT(l->l_refcnt != 0);
1465:
1466: l->l_refcnt--;
1467: while (l->l_refcnt != 0)
1.103 ad 1468: cv_wait(&p->p_lwpcv, p->p_lock);
1.37 ad 1469: }
1.41 thorpej 1470:
1471: /*
1.127 ad 1472: * Return true if the specified LWP is 'alive'. Only p->p_lock need
1473: * be held.
1474: */
1475: bool
1476: lwp_alive(lwp_t *l)
1477: {
1478:
1479: KASSERT(mutex_owned(l->l_proc->p_lock));
1480:
1481: switch (l->l_stat) {
1482: case LSSLEEP:
1483: case LSRUN:
1484: case LSONPROC:
1485: case LSSTOP:
1486: case LSSUSPENDED:
1487: return true;
1488: default:
1489: return false;
1490: }
1491: }
1492:
1493: /*
1494: * Return first live LWP in the process.
1495: */
1496: lwp_t *
1497: lwp_find_first(proc_t *p)
1498: {
1499: lwp_t *l;
1500:
1501: KASSERT(mutex_owned(p->p_lock));
1502:
1503: LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1504: if (lwp_alive(l)) {
1505: return l;
1506: }
1507: }
1508:
1509: return NULL;
1510: }
1511:
1512: /*
1.78 ad 1513: * Allocate a new lwpctl structure for a user LWP.
1514: */
1515: int
1516: lwp_ctl_alloc(vaddr_t *uaddr)
1517: {
1518: lcproc_t *lp;
1519: u_int bit, i, offset;
1520: struct uvm_object *uao;
1521: int error;
1522: lcpage_t *lcp;
1523: proc_t *p;
1524: lwp_t *l;
1525:
1526: l = curlwp;
1527: p = l->l_proc;
1528:
1.81 ad 1529: if (l->l_lcpage != NULL) {
1530: lcp = l->l_lcpage;
1531: *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1.143 njoly 1532: return 0;
1.81 ad 1533: }
1.78 ad 1534:
1535: /* First time around, allocate header structure for the process. */
1536: if ((lp = p->p_lwpctl) == NULL) {
1537: lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
1538: mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
1539: lp->lp_uao = NULL;
1540: TAILQ_INIT(&lp->lp_pages);
1.103 ad 1541: mutex_enter(p->p_lock);
1.78 ad 1542: if (p->p_lwpctl == NULL) {
1543: p->p_lwpctl = lp;
1.103 ad 1544: mutex_exit(p->p_lock);
1.78 ad 1545: } else {
1.103 ad 1546: mutex_exit(p->p_lock);
1.78 ad 1547: mutex_destroy(&lp->lp_lock);
1548: kmem_free(lp, sizeof(*lp));
1549: lp = p->p_lwpctl;
1550: }
1551: }
1552:
1553: /*
1554: * Set up an anonymous memory region to hold the shared pages.
1555: * Map them into the process' address space. The user vmspace
1556: * gets the first reference on the UAO.
1557: */
1558: mutex_enter(&lp->lp_lock);
1559: if (lp->lp_uao == NULL) {
1560: lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
1561: lp->lp_cur = 0;
1562: lp->lp_max = LWPCTL_UAREA_SZ;
1563: lp->lp_uva = p->p_emul->e_vm_default_addr(p,
1564: (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
1565: error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
1566: LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
1567: UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
1568: if (error != 0) {
1569: uao_detach(lp->lp_uao);
1570: lp->lp_uao = NULL;
1571: mutex_exit(&lp->lp_lock);
1572: return error;
1573: }
1574: }
1575:
1576: /* Get a free block and allocate for this LWP. */
1577: TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
1578: if (lcp->lcp_nfree != 0)
1579: break;
1580: }
1581: if (lcp == NULL) {
1582: /* Nothing available - try to set up a free page. */
1583: if (lp->lp_cur == lp->lp_max) {
1584: mutex_exit(&lp->lp_lock);
1585: return ENOMEM;
1586: }
1587: lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1.79 yamt 1588: if (lcp == NULL) {
1589: mutex_exit(&lp->lp_lock);
1.78 ad 1590: return ENOMEM;
1.79 yamt 1591: }
1.78 ad 1592: /*
1593: * Wire the next page down in kernel space. Since this
1594: * is a new mapping, we must add a reference.
1595: */
1596: uao = lp->lp_uao;
1597: (*uao->pgops->pgo_reference)(uao);
1.99 ad 1598: lcp->lcp_kaddr = vm_map_min(kernel_map);
1.78 ad 1599: error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
1600: uao, lp->lp_cur, PAGE_SIZE,
1601: UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
1602: UVM_INH_NONE, UVM_ADV_RANDOM, 0));
1603: if (error != 0) {
1604: mutex_exit(&lp->lp_lock);
1605: kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1606: (*uao->pgops->pgo_detach)(uao);
1607: return error;
1608: }
1.89 yamt 1609: error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
1610: lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
1611: if (error != 0) {
1612: mutex_exit(&lp->lp_lock);
1613: uvm_unmap(kernel_map, lcp->lcp_kaddr,
1614: lcp->lcp_kaddr + PAGE_SIZE);
1615: kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1616: return error;
1617: }
1.78 ad 1618: /* Prepare the page descriptor and link into the list. */
1619: lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
1620: lp->lp_cur += PAGE_SIZE;
1621: lcp->lcp_nfree = LWPCTL_PER_PAGE;
1622: lcp->lcp_rotor = 0;
1623: memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
1624: TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1625: }
1626: for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
1627: if (++i >= LWPCTL_BITMAP_ENTRIES)
1628: i = 0;
1629: }
1630: bit = ffs(lcp->lcp_bitmap[i]) - 1;
1631: lcp->lcp_bitmap[i] ^= (1 << bit);
1632: lcp->lcp_rotor = i;
1633: lcp->lcp_nfree--;
1634: l->l_lcpage = lcp;
1635: offset = (i << 5) + bit;
1636: l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
1637: *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
1638: mutex_exit(&lp->lp_lock);
1639:
1.107 ad 1640: KPREEMPT_DISABLE(l);
1.111 ad 1641: l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
1.107 ad 1642: KPREEMPT_ENABLE(l);
1.78 ad 1643:
1644: return 0;
1645: }
1646:
1647: /*
1648: * Free an lwpctl structure back to the per-process list.
1649: */
1650: void
1651: lwp_ctl_free(lwp_t *l)
1652: {
1653: lcproc_t *lp;
1654: lcpage_t *lcp;
1655: u_int map, offset;
1656:
1657: lp = l->l_proc->p_lwpctl;
1658: KASSERT(lp != NULL);
1659:
1660: lcp = l->l_lcpage;
1661: offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
1662: KASSERT(offset < LWPCTL_PER_PAGE);
1663:
1664: mutex_enter(&lp->lp_lock);
1665: lcp->lcp_nfree++;
1666: map = offset >> 5;
1667: lcp->lcp_bitmap[map] |= (1 << (offset & 31));
1668: if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
1669: lcp->lcp_rotor = map;
1670: if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
1671: TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
1672: TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
1673: }
1674: mutex_exit(&lp->lp_lock);
1675: }
1676:
1677: /*
1678: * Process is exiting; tear down lwpctl state. This can only be safely
1679: * called by the last LWP in the process.
1680: */
1681: void
1682: lwp_ctl_exit(void)
1683: {
1684: lcpage_t *lcp, *next;
1685: lcproc_t *lp;
1686: proc_t *p;
1687: lwp_t *l;
1688:
1689: l = curlwp;
1690: l->l_lwpctl = NULL;
1.95 ad 1691: l->l_lcpage = NULL;
1.78 ad 1692: p = l->l_proc;
1693: lp = p->p_lwpctl;
1694:
1695: KASSERT(lp != NULL);
1696: KASSERT(p->p_nlwps == 1);
1697:
1698: for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
1699: next = TAILQ_NEXT(lcp, lcp_chain);
1700: uvm_unmap(kernel_map, lcp->lcp_kaddr,
1701: lcp->lcp_kaddr + PAGE_SIZE);
1702: kmem_free(lcp, LWPCTL_LCPAGE_SZ);
1703: }
1704:
1705: if (lp->lp_uao != NULL) {
1706: uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
1707: lp->lp_uva + LWPCTL_UAREA_SZ);
1708: }
1709:
1710: mutex_destroy(&lp->lp_lock);
1711: kmem_free(lp, sizeof(*lp));
1712: p->p_lwpctl = NULL;
1713: }
1.84 yamt 1714:
1.130 ad 1715: /*
1716: * Return the current LWP's "preemption counter". Used to detect
1717: * preemption across operations that can tolerate preemption without
1718: * crashing, but which may generate incorrect results if preempted.
1719: */
1720: uint64_t
1721: lwp_pctr(void)
1722: {
1723:
1724: return curlwp->l_ncsw;
1725: }
1726:
1.151 chs 1727: /*
1728: * Set an LWP's private data pointer.
1729: */
1730: int
1731: lwp_setprivate(struct lwp *l, void *ptr)
1732: {
1733: int error = 0;
1734:
1735: l->l_private = ptr;
1736: #ifdef __HAVE_CPU_LWP_SETPRIVATE
1737: error = cpu_lwp_setprivate(l, ptr);
1738: #endif
1739: return error;
1740: }
1741:
1.84 yamt 1742: #if defined(DDB)
1743: void
1744: lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
1745: {
1746: lwp_t *l;
1747:
1748: LIST_FOREACH(l, &alllwp, l_list) {
1749: uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
1750:
1751: if (addr < stack || stack + KSTACK_SIZE <= addr) {
1752: continue;
1753: }
1754: (*pr)("%p is %p+%zu, LWP %p's stack\n",
1755: (void *)addr, (void *)stack,
1756: (size_t)(addr - stack), l);
1757: }
1758: }
1759: #endif /* defined(DDB) */
CVSweb <webmaster@jp.NetBSD.org>