Annotation of src/sys/kern/kern_lwp.c, Revision 1.71
1.71 ! ad 1: /* $NetBSD: kern_lwp.c,v 1.70 2007/09/06 23:58:56 ad Exp $ */
1.2 thorpej 2:
3: /*-
1.52 ad 4: * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
1.2 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.52 ad 8: * by Nathan J. Williams, and Andrew Doran.
1.2 thorpej 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
1.9 lukem 38:
1.52 ad 39: /*
40: * Overview
41: *
1.66 ad 42: * Lightweight processes (LWPs) are the basic unit or thread of
1.52 ad 43: * execution within the kernel. The core state of an LWP is described
1.66 ad 44: * by "struct lwp", also known as lwp_t.
1.52 ad 45: *
46: * Each LWP is contained within a process (described by "struct proc"),
47: * Every process contains at least one LWP, but may contain more. The
48: * process describes attributes shared among all of its LWPs such as a
49: * private address space, global execution state (stopped, active,
50: * zombie, ...), signal disposition and so on. On a multiprocessor
1.66 ad 51: * machine, multiple LWPs be executing concurrently in the kernel.
1.52 ad 52: *
53: * Execution states
54: *
55: * At any given time, an LWP has overall state that is described by
56: * lwp::l_stat. The states are broken into two sets below. The first
57: * set is guaranteed to represent the absolute, current state of the
58: * LWP:
59: *
60: * LSONPROC
61: *
62: * On processor: the LWP is executing on a CPU, either in the
63: * kernel or in user space.
64: *
65: * LSRUN
66: *
67: * Runnable: the LWP is parked on a run queue, and may soon be
68: * chosen to run by a idle processor, or by a processor that
69: * has been asked to preempt a currently runnning but lower
70: * priority LWP. If the LWP is not swapped in (L_INMEM == 0)
71: * then the LWP is not on a run queue, but may be soon.
72: *
73: * LSIDL
74: *
1.66 ad 75: * Idle: the LWP has been created but has not yet executed,
76: * or it has ceased executing a unit of work and is waiting
77: * to be started again.
1.52 ad 78: *
79: * LSSUSPENDED:
80: *
81: * Suspended: the LWP has had its execution suspended by
82: * another LWP in the same process using the _lwp_suspend()
83: * system call. User-level LWPs also enter the suspended
84: * state when the system is shutting down.
85: *
86: * The second set represent a "statement of intent" on behalf of the
87: * LWP. The LWP may in fact be executing on a processor, may be
1.66 ad 88: * sleeping or idle. It is expected to take the necessary action to
89: * stop executing or become "running" again within a short timeframe.
90: * The LW_RUNNING flag in lwp::l_flag indicates that an LWP is running.
91: * Importantly, in indicates that its state is tied to a CPU.
1.52 ad 92: *
93: * LSZOMB:
94: *
1.66 ad 95: * Dead or dying: the LWP has released most of its resources
96: * and is a) about to switch away into oblivion b) has already
97: * switched away. When it switches away, its few remaining
98: * resources can be collected.
1.52 ad 99: *
100: * LSSLEEP:
101: *
102: * Sleeping: the LWP has entered itself onto a sleep queue, and
1.66 ad 103: * has switched away or will switch away shortly to allow other
104: * LWPs to run on the CPU.
1.52 ad 105: *
106: * LSSTOP:
107: *
108: * Stopped: the LWP has been stopped as a result of a job
109: * control signal, or as a result of the ptrace() interface.
1.66 ad 110: *
1.52 ad 111: * Stopped LWPs may run briefly within the kernel to handle
112: * signals that they receive, but will not return to user space
113: * until their process' state is changed away from stopped.
1.66 ad 114: *
1.52 ad 115: * Single LWPs within a process can not be set stopped
116: * selectively: all actions that can stop or continue LWPs
117: * occur at the process level.
118: *
119: * State transitions
120: *
1.66 ad 121: * Note that the LSSTOP state may only be set when returning to
122: * user space in userret(), or when sleeping interruptably. The
123: * LSSUSPENDED state may only be set in userret(). Before setting
124: * those states, we try to ensure that the LWPs will release all
125: * locks that they hold, and at a minimum try to ensure that the
126: * LWP can be set runnable again by a signal.
1.52 ad 127: *
128: * LWPs may transition states in the following ways:
129: *
130: * RUN -------> ONPROC ONPROC -----> RUN
131: * > STOPPED > SLEEP
132: * > SUSPENDED > STOPPED
133: * > SUSPENDED
134: * > ZOMB
135: *
136: * STOPPED ---> RUN SUSPENDED --> RUN
137: * > SLEEP > SLEEP
138: *
139: * SLEEP -----> ONPROC IDL --------> RUN
140: * > RUN > SUSPENDED
141: * > STOPPED > STOPPED
142: * > SUSPENDED
143: *
1.66 ad 144: * Other state transitions are possible with kernel threads (eg
145: * ONPROC -> IDL), but only happen under tightly controlled
146: * circumstances the side effects are understood.
147: *
1.52 ad 148: * Locking
149: *
150: * The majority of fields in 'struct lwp' are covered by a single,
1.66 ad 151: * general spin lock pointed to by lwp::l_mutex. The locks covering
1.52 ad 152: * each field are documented in sys/lwp.h.
153: *
1.66 ad 154: * State transitions must be made with the LWP's general lock held,
155: * and may cause the LWP's lock pointer to change. Manipulation of
156: * the general lock is not performed directly, but through calls to
157: * lwp_lock(), lwp_relock() and similar.
1.52 ad 158: *
159: * States and their associated locks:
160: *
1.64 yamt 161: * LSIDL, LSZOMB, LSONPROC:
1.52 ad 162: *
1.64 yamt 163: * Always covered by spc_lwplock, which protects running LWPs.
164: * This is a per-CPU lock.
1.52 ad 165: *
1.64 yamt 166: * LSRUN:
1.52 ad 167: *
1.64 yamt 168: * Always covered by spc_mutex, which protects the run queues.
169: * This may be a per-CPU lock, depending on the scheduler.
1.52 ad 170: *
171: * LSSLEEP:
172: *
1.66 ad 173: * Covered by a lock associated with the sleep queue that the
1.52 ad 174: * LWP resides on, indirectly referenced by l_sleepq->sq_mutex.
175: *
176: * LSSTOP, LSSUSPENDED:
177: *
178: * If the LWP was previously sleeping (l_wchan != NULL), then
1.66 ad 179: * l_mutex references the sleep queue lock. If the LWP was
1.52 ad 180: * runnable or on the CPU when halted, or has been removed from
1.66 ad 181: * the sleep queue since halted, then the lock is spc_lwplock.
1.52 ad 182: *
183: * The lock order is as follows:
184: *
1.64 yamt 185: * spc::spc_lwplock ->
186: * sleepq_t::sq_mutex ->
187: * tschain_t::tc_mutex ->
188: * spc::spc_mutex
1.52 ad 189: *
1.66 ad 190: * Each process has an scheduler state lock (proc::p_smutex), and a
1.52 ad 191: * number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
192: * so on. When an LWP is to be entered into or removed from one of the
193: * following states, p_mutex must be held and the process wide counters
194: * adjusted:
195: *
196: * LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
197: *
198: * Note that an LWP is considered running or likely to run soon if in
199: * one of the following states. This affects the value of p_nrlwps:
200: *
201: * LSRUN, LSONPROC, LSSLEEP
202: *
203: * p_smutex does not need to be held when transitioning among these
204: * three states.
205: */
206:
1.9 lukem 207: #include <sys/cdefs.h>
1.71 ! ad 208: __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.70 2007/09/06 23:58:56 ad Exp $");
1.8 martin 209:
210: #include "opt_multiprocessor.h"
1.52 ad 211: #include "opt_lockdebug.h"
1.2 thorpej 212:
1.47 hannken 213: #define _LWP_API_PRIVATE
214:
1.2 thorpej 215: #include <sys/param.h>
216: #include <sys/systm.h>
1.64 yamt 217: #include <sys/cpu.h>
1.2 thorpej 218: #include <sys/pool.h>
219: #include <sys/proc.h>
220: #include <sys/syscallargs.h>
1.57 dsl 221: #include <sys/syscall_stats.h>
1.37 ad 222: #include <sys/kauth.h>
1.52 ad 223: #include <sys/sleepq.h>
224: #include <sys/lockdebug.h>
225: #include <sys/kmem.h>
1.2 thorpej 226:
227: #include <uvm/uvm_extern.h>
228:
1.52 ad 229: struct lwplist alllwp;
230:
231: POOL_INIT(lwp_pool, sizeof(struct lwp), MIN_LWP_ALIGNMENT, 0, 0, "lwppl",
1.62 ad 232: &pool_allocator_nointr, IPL_NONE);
1.41 thorpej 233: POOL_INIT(lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl",
1.62 ad 234: &pool_allocator_nointr, IPL_NONE);
1.41 thorpej 235:
236: static specificdata_domain_t lwp_specificdata_domain;
237:
1.2 thorpej 238: #define LWP_DEBUG
239:
240: #ifdef LWP_DEBUG
241: int lwp_debug = 0;
242: #define DPRINTF(x) if (lwp_debug) printf x
243: #else
244: #define DPRINTF(x)
245: #endif
1.41 thorpej 246:
247: void
248: lwpinit(void)
249: {
250:
251: lwp_specificdata_domain = specificdata_domain_create();
252: KASSERT(lwp_specificdata_domain != NULL);
1.52 ad 253: lwp_sys_init();
1.41 thorpej 254: }
255:
1.52 ad 256: /*
257: * Set an suspended.
258: *
259: * Must be called with p_smutex held, and the LWP locked. Will unlock the
260: * LWP before return.
261: */
1.2 thorpej 262: int
1.52 ad 263: lwp_suspend(struct lwp *curl, struct lwp *t)
1.2 thorpej 264: {
1.52 ad 265: int error;
1.2 thorpej 266:
1.63 ad 267: KASSERT(mutex_owned(&t->l_proc->p_smutex));
268: KASSERT(lwp_locked(t, NULL));
1.33 chs 269:
1.52 ad 270: KASSERT(curl != t || curl->l_stat == LSONPROC);
1.2 thorpej 271:
1.52 ad 272: /*
273: * If the current LWP has been told to exit, we must not suspend anyone
274: * else or deadlock could occur. We won't return to userspace.
1.2 thorpej 275: */
1.56 pavel 276: if ((curl->l_stat & (LW_WEXIT | LW_WCORE)) != 0) {
1.52 ad 277: lwp_unlock(t);
278: return (EDEADLK);
1.2 thorpej 279: }
280:
1.52 ad 281: error = 0;
1.2 thorpej 282:
1.52 ad 283: switch (t->l_stat) {
284: case LSRUN:
285: case LSONPROC:
1.56 pavel 286: t->l_flag |= LW_WSUSPEND;
1.52 ad 287: lwp_need_userret(t);
288: lwp_unlock(t);
289: break;
1.2 thorpej 290:
1.52 ad 291: case LSSLEEP:
1.56 pavel 292: t->l_flag |= LW_WSUSPEND;
1.2 thorpej 293:
294: /*
1.52 ad 295: * Kick the LWP and try to get it to the kernel boundary
296: * so that it will release any locks that it holds.
297: * setrunnable() will release the lock.
1.2 thorpej 298: */
1.56 pavel 299: if ((t->l_flag & LW_SINTR) != 0)
1.52 ad 300: setrunnable(t);
301: else
302: lwp_unlock(t);
303: break;
1.2 thorpej 304:
1.52 ad 305: case LSSUSPENDED:
306: lwp_unlock(t);
307: break;
1.17 manu 308:
1.52 ad 309: case LSSTOP:
1.56 pavel 310: t->l_flag |= LW_WSUSPEND;
1.52 ad 311: setrunnable(t);
312: break;
1.2 thorpej 313:
1.52 ad 314: case LSIDL:
315: case LSZOMB:
316: error = EINTR; /* It's what Solaris does..... */
317: lwp_unlock(t);
318: break;
1.2 thorpej 319: }
320:
1.69 rmind 321: return (error);
1.2 thorpej 322: }
323:
1.52 ad 324: /*
325: * Restart a suspended LWP.
326: *
327: * Must be called with p_smutex held, and the LWP locked. Will unlock the
328: * LWP before return.
329: */
1.2 thorpej 330: void
331: lwp_continue(struct lwp *l)
332: {
333:
1.63 ad 334: KASSERT(mutex_owned(&l->l_proc->p_smutex));
335: KASSERT(lwp_locked(l, NULL));
1.52 ad 336:
1.2 thorpej 337: DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
338: l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
339: l->l_wchan));
340:
1.52 ad 341: /* If rebooting or not suspended, then just bail out. */
1.56 pavel 342: if ((l->l_flag & LW_WREBOOT) != 0) {
1.52 ad 343: lwp_unlock(l);
1.2 thorpej 344: return;
1.10 fvdl 345: }
1.2 thorpej 346:
1.56 pavel 347: l->l_flag &= ~LW_WSUSPEND;
1.2 thorpej 348:
1.52 ad 349: if (l->l_stat != LSSUSPENDED) {
350: lwp_unlock(l);
351: return;
1.2 thorpej 352: }
353:
1.52 ad 354: /* setrunnable() will release the lock. */
355: setrunnable(l);
1.2 thorpej 356: }
357:
1.52 ad 358: /*
359: * Wait for an LWP within the current process to exit. If 'lid' is
360: * non-zero, we are waiting for a specific LWP.
361: *
362: * Must be called with p->p_smutex held.
363: */
1.2 thorpej 364: int
365: lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
366: {
367: struct proc *p = l->l_proc;
1.52 ad 368: struct lwp *l2;
369: int nfound, error;
1.63 ad 370: lwpid_t curlid;
371: bool exiting;
1.2 thorpej 372:
373: DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
374: p->p_pid, l->l_lid, lid));
375:
1.63 ad 376: KASSERT(mutex_owned(&p->p_smutex));
1.52 ad 377:
378: p->p_nlwpwait++;
1.63 ad 379: l->l_waitingfor = lid;
380: curlid = l->l_lid;
381: exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
1.52 ad 382:
383: for (;;) {
384: /*
385: * Avoid a race between exit1() and sigexit(): if the
386: * process is dumping core, then we need to bail out: call
387: * into lwp_userret() where we will be suspended until the
388: * deed is done.
389: */
390: if ((p->p_sflag & PS_WCORE) != 0) {
391: mutex_exit(&p->p_smutex);
392: lwp_userret(l);
393: #ifdef DIAGNOSTIC
394: panic("lwp_wait1");
395: #endif
396: /* NOTREACHED */
397: }
398:
399: /*
400: * First off, drain any detached LWP that is waiting to be
401: * reaped.
402: */
403: while ((l2 = p->p_zomblwp) != NULL) {
404: p->p_zomblwp = NULL;
1.63 ad 405: lwp_free(l2, false, false);/* releases proc mutex */
1.52 ad 406: mutex_enter(&p->p_smutex);
407: }
408:
409: /*
410: * Now look for an LWP to collect. If the whole process is
411: * exiting, count detached LWPs as eligible to be collected,
412: * but don't drain them here.
413: */
414: nfound = 0;
1.63 ad 415: error = 0;
1.52 ad 416: LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1.63 ad 417: /*
418: * If a specific wait and the target is waiting on
419: * us, then avoid deadlock. This also traps LWPs
420: * that try to wait on themselves.
421: *
422: * Note that this does not handle more complicated
423: * cycles, like: t1 -> t2 -> t3 -> t1. The process
424: * can still be killed so it is not a major problem.
425: */
426: if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
427: error = EDEADLK;
428: break;
429: }
430: if (l2 == l)
1.52 ad 431: continue;
432: if ((l2->l_prflag & LPR_DETACHED) != 0) {
1.63 ad 433: nfound += exiting;
434: continue;
435: }
436: if (lid != 0) {
437: if (l2->l_lid != lid)
438: continue;
439: /*
440: * Mark this LWP as the first waiter, if there
441: * is no other.
442: */
443: if (l2->l_waiter == 0)
444: l2->l_waiter = curlid;
445: } else if (l2->l_waiter != 0) {
446: /*
447: * It already has a waiter - so don't
448: * collect it. If the waiter doesn't
449: * grab it we'll get another chance
450: * later.
451: */
452: nfound++;
1.52 ad 453: continue;
454: }
455: nfound++;
1.2 thorpej 456:
1.52 ad 457: /* No need to lock the LWP in order to see LSZOMB. */
458: if (l2->l_stat != LSZOMB)
459: continue;
1.2 thorpej 460:
1.63 ad 461: /*
462: * We're no longer waiting. Reset the "first waiter"
463: * pointer on the target, in case it was us.
464: */
465: l->l_waitingfor = 0;
466: l2->l_waiter = 0;
467: p->p_nlwpwait--;
1.2 thorpej 468: if (departed)
469: *departed = l2->l_lid;
1.63 ad 470:
471: /* lwp_free() releases the proc lock. */
472: lwp_free(l2, false, false);
1.52 ad 473: mutex_enter(&p->p_smutex);
474: return 0;
475: }
1.2 thorpej 476:
1.63 ad 477: if (error != 0)
478: break;
1.52 ad 479: if (nfound == 0) {
480: error = ESRCH;
481: break;
482: }
1.63 ad 483:
484: /*
485: * The kernel is careful to ensure that it can not deadlock
486: * when exiting - just keep waiting.
487: */
488: if (exiting) {
1.52 ad 489: KASSERT(p->p_nlwps > 1);
490: cv_wait(&p->p_lwpcv, &p->p_smutex);
491: continue;
492: }
1.63 ad 493:
494: /*
495: * If all other LWPs are waiting for exits or suspends
496: * and the supply of zombies and potential zombies is
497: * exhausted, then we are about to deadlock.
498: *
499: * If the process is exiting (and this LWP is not the one
500: * that is coordinating the exit) then bail out now.
501: */
1.52 ad 502: if ((p->p_sflag & PS_WEXIT) != 0 ||
1.63 ad 503: p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
1.52 ad 504: error = EDEADLK;
505: break;
1.2 thorpej 506: }
1.63 ad 507:
508: /*
509: * Sit around and wait for something to happen. We'll be
510: * awoken if any of the conditions examined change: if an
511: * LWP exits, is collected, or is detached.
512: */
1.52 ad 513: if ((error = cv_wait_sig(&p->p_lwpcv, &p->p_smutex)) != 0)
514: break;
1.2 thorpej 515: }
516:
1.63 ad 517: /*
518: * We didn't find any LWPs to collect, we may have received a
519: * signal, or some other condition has caused us to bail out.
520: *
521: * If waiting on a specific LWP, clear the waiters marker: some
522: * other LWP may want it. Then, kick all the remaining waiters
523: * so that they can re-check for zombies and for deadlock.
524: */
525: if (lid != 0) {
526: LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
527: if (l2->l_lid == lid) {
528: if (l2->l_waiter == curlid)
529: l2->l_waiter = 0;
530: break;
531: }
532: }
533: }
1.52 ad 534: p->p_nlwpwait--;
1.63 ad 535: l->l_waitingfor = 0;
536: cv_broadcast(&p->p_lwpcv);
537:
1.52 ad 538: return error;
1.2 thorpej 539: }
540:
1.52 ad 541: /*
542: * Create a new LWP within process 'p2', using LWP 'l1' as a template.
543: * The new LWP is created in state LSIDL and must be set running,
544: * suspended, or stopped by the caller.
545: */
1.2 thorpej 546: int
1.59 thorpej 547: newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, bool inmem,
1.2 thorpej 548: int flags, void *stack, size_t stacksize,
549: void (*func)(void *), void *arg, struct lwp **rnewlwpp)
550: {
1.52 ad 551: struct lwp *l2, *isfree;
552: turnstile_t *ts;
1.2 thorpej 553:
1.52 ad 554: /*
555: * First off, reap any detached LWP waiting to be collected.
556: * We can re-use its LWP structure and turnstile.
557: */
558: isfree = NULL;
559: if (p2->p_zomblwp != NULL) {
560: mutex_enter(&p2->p_smutex);
561: if ((isfree = p2->p_zomblwp) != NULL) {
562: p2->p_zomblwp = NULL;
1.63 ad 563: lwp_free(isfree, true, false);/* releases proc mutex */
1.52 ad 564: } else
565: mutex_exit(&p2->p_smutex);
566: }
567: if (isfree == NULL) {
568: l2 = pool_get(&lwp_pool, PR_WAITOK);
569: memset(l2, 0, sizeof(*l2));
570: l2->l_ts = pool_cache_get(&turnstile_cache, PR_WAITOK);
1.60 yamt 571: SLIST_INIT(&l2->l_pi_lenders);
1.52 ad 572: } else {
573: l2 = isfree;
574: ts = l2->l_ts;
1.60 yamt 575: KASSERT(l2->l_inheritedprio == MAXPRI);
576: KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
1.52 ad 577: memset(l2, 0, sizeof(*l2));
578: l2->l_ts = ts;
579: }
1.2 thorpej 580:
581: l2->l_stat = LSIDL;
582: l2->l_proc = p2;
1.52 ad 583: l2->l_refcnt = 1;
584: l2->l_priority = l1->l_priority;
585: l2->l_usrpri = l1->l_usrpri;
1.60 yamt 586: l2->l_inheritedprio = MAXPRI;
1.64 yamt 587: l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
1.52 ad 588: l2->l_cpu = l1->l_cpu;
1.56 pavel 589: l2->l_flag = inmem ? LW_INMEM : 0;
1.42 christos 590: lwp_initspecific(l2);
1.64 yamt 591: sched_lwp_fork(l2);
1.41 thorpej 592:
1.56 pavel 593: if (p2->p_flag & PK_SYSTEM) {
1.52 ad 594: /*
595: * Mark it as a system process and not a candidate for
596: * swapping.
597: */
1.56 pavel 598: l2->l_flag |= LW_SYSTEM;
1.52 ad 599: }
1.2 thorpej 600:
1.37 ad 601: lwp_update_creds(l2);
1.70 ad 602: callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
603: callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
1.65 ad 604: mutex_init(&l2->l_swaplock, MUTEX_DEFAULT, IPL_NONE);
1.52 ad 605: cv_init(&l2->l_sigcv, "sigwait");
606: l2->l_syncobj = &sched_syncobj;
1.2 thorpej 607:
608: if (rnewlwpp != NULL)
609: *rnewlwpp = l2;
610:
1.36 yamt 611: l2->l_addr = UAREA_TO_USER(uaddr);
1.2 thorpej 612: uvm_lwp_fork(l1, l2, stack, stacksize, func,
613: (arg != NULL) ? arg : l2);
614:
1.52 ad 615: mutex_enter(&p2->p_smutex);
616:
617: if ((flags & LWP_DETACHED) != 0) {
618: l2->l_prflag = LPR_DETACHED;
619: p2->p_ndlwps++;
620: } else
621: l2->l_prflag = 0;
622:
623: l2->l_sigmask = l1->l_sigmask;
624: CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
625: sigemptyset(&l2->l_sigpend.sp_set);
626:
1.53 yamt 627: p2->p_nlwpid++;
628: if (p2->p_nlwpid == 0)
629: p2->p_nlwpid++;
630: l2->l_lid = p2->p_nlwpid;
1.2 thorpej 631: LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
632: p2->p_nlwps++;
633:
1.52 ad 634: mutex_exit(&p2->p_smutex);
635:
1.65 ad 636: mutex_enter(&proclist_lock);
1.52 ad 637: mutex_enter(&proclist_mutex);
1.2 thorpej 638: LIST_INSERT_HEAD(&alllwp, l2, l_list);
1.52 ad 639: mutex_exit(&proclist_mutex);
1.65 ad 640: mutex_exit(&proclist_lock);
1.2 thorpej 641:
1.57 dsl 642: SYSCALL_TIME_LWP_INIT(l2);
643:
1.16 manu 644: if (p2->p_emul->e_lwp_fork)
645: (*p2->p_emul->e_lwp_fork)(l1, l2);
646:
1.2 thorpej 647: return (0);
648: }
649:
650: /*
1.64 yamt 651: * Called by MD code when a new LWP begins execution. Must be called
652: * with the previous LWP locked (so at splsched), or if there is no
653: * previous LWP, at splsched.
654: */
655: void
656: lwp_startup(struct lwp *prev, struct lwp *new)
657: {
658:
659: if (prev != NULL) {
660: lwp_unlock(prev);
661: }
662: spl0();
663: pmap_activate(new);
664: LOCKDEBUG_BARRIER(NULL, 0);
1.65 ad 665: if ((new->l_pflag & LP_MPSAFE) == 0) {
666: KERNEL_LOCK(1, new);
667: }
1.64 yamt 668: }
669:
670: /*
1.65 ad 671: * Exit an LWP.
1.2 thorpej 672: */
673: void
674: lwp_exit(struct lwp *l)
675: {
676: struct proc *p = l->l_proc;
1.52 ad 677: struct lwp *l2;
1.65 ad 678: bool current;
679:
680: current = (l == curlwp);
1.2 thorpej 681:
682: DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
1.52 ad 683: DPRINTF((" nlwps: %d nzlwps: %d\n", p->p_nlwps, p->p_nzlwps));
1.65 ad 684: KASSERT(current || l->l_stat == LSIDL);
1.2 thorpej 685:
1.52 ad 686: /*
687: * Verify that we hold no locks other than the kernel lock.
688: */
689: #ifdef MULTIPROCESSOR
690: LOCKDEBUG_BARRIER(&kernel_lock, 0);
691: #else
692: LOCKDEBUG_BARRIER(NULL, 0);
693: #endif
1.16 manu 694:
1.2 thorpej 695: /*
1.52 ad 696: * If we are the last live LWP in a process, we need to exit the
697: * entire process. We do so with an exit status of zero, because
698: * it's a "controlled" exit, and because that's what Solaris does.
699: *
700: * We are not quite a zombie yet, but for accounting purposes we
701: * must increment the count of zombies here.
1.45 thorpej 702: *
703: * Note: the last LWP's specificdata will be deleted here.
1.2 thorpej 704: */
1.52 ad 705: mutex_enter(&p->p_smutex);
706: if (p->p_nlwps - p->p_nzlwps == 1) {
1.65 ad 707: KASSERT(current == true);
1.2 thorpej 708: DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
709: p->p_pid, l->l_lid));
710: exit1(l, 0);
1.19 jdolecek 711: /* NOTREACHED */
1.2 thorpej 712: }
1.52 ad 713: p->p_nzlwps++;
714: mutex_exit(&p->p_smutex);
715:
716: if (p->p_emul->e_lwp_exit)
717: (*p->p_emul->e_lwp_exit)(l);
1.2 thorpej 718:
1.45 thorpej 719: /* Delete the specificdata while it's still safe to sleep. */
720: specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
721:
1.52 ad 722: /*
723: * Release our cached credentials.
724: */
1.37 ad 725: kauth_cred_free(l->l_cred);
1.70 ad 726: callout_destroy(&l->l_timeout_ch);
1.65 ad 727:
728: /*
729: * While we can still block, mark the LWP as unswappable to
730: * prevent conflicts with the with the swapper.
731: */
732: if (current)
733: uvm_lwp_hold(l);
1.37 ad 734:
1.52 ad 735: /*
736: * Remove the LWP from the global list.
737: */
1.65 ad 738: mutex_enter(&proclist_lock);
1.52 ad 739: mutex_enter(&proclist_mutex);
740: LIST_REMOVE(l, l_list);
741: mutex_exit(&proclist_mutex);
1.65 ad 742: mutex_exit(&proclist_lock);
1.19 jdolecek 743:
1.52 ad 744: /*
745: * Get rid of all references to the LWP that others (e.g. procfs)
746: * may have, and mark the LWP as a zombie. If the LWP is detached,
747: * mark it waiting for collection in the proc structure. Note that
748: * before we can do that, we need to free any other dead, deatched
749: * LWP waiting to meet its maker.
750: *
751: * XXXSMP disable preemption.
752: */
753: mutex_enter(&p->p_smutex);
754: lwp_drainrefs(l);
1.31 yamt 755:
1.52 ad 756: if ((l->l_prflag & LPR_DETACHED) != 0) {
757: while ((l2 = p->p_zomblwp) != NULL) {
758: p->p_zomblwp = NULL;
1.63 ad 759: lwp_free(l2, false, false);/* releases proc mutex */
1.52 ad 760: mutex_enter(&p->p_smutex);
761: }
762: p->p_zomblwp = l;
763: }
1.31 yamt 764:
1.52 ad 765: /*
766: * If we find a pending signal for the process and we have been
767: * asked to check for signals, then we loose: arrange to have
768: * all other LWPs in the process check for signals.
769: */
1.56 pavel 770: if ((l->l_flag & LW_PENDSIG) != 0 &&
1.52 ad 771: firstsig(&p->p_sigpend.sp_set) != 0) {
772: LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
773: lwp_lock(l2);
1.56 pavel 774: l2->l_flag |= LW_PENDSIG;
1.52 ad 775: lwp_unlock(l2);
776: }
1.31 yamt 777: }
778:
1.52 ad 779: lwp_lock(l);
780: l->l_stat = LSZOMB;
781: lwp_unlock(l);
1.2 thorpej 782: p->p_nrlwps--;
1.52 ad 783: cv_broadcast(&p->p_lwpcv);
784: mutex_exit(&p->p_smutex);
785:
786: /*
787: * We can no longer block. At this point, lwp_free() may already
788: * be gunning for us. On a multi-CPU system, we may be off p_lwps.
789: *
790: * Free MD LWP resources.
791: */
792: #ifndef __NO_CPU_LWP_FREE
793: cpu_lwp_free(l, 0);
794: #endif
1.2 thorpej 795:
1.65 ad 796: if (current) {
797: pmap_deactivate(l);
798:
799: /*
800: * Release the kernel lock, and switch away into
801: * oblivion.
802: */
1.52 ad 803: #ifdef notyet
1.65 ad 804: /* XXXSMP hold in lwp_userret() */
805: KERNEL_UNLOCK_LAST(l);
1.52 ad 806: #else
1.65 ad 807: KERNEL_UNLOCK_ALL(l, NULL);
1.52 ad 808: #endif
1.65 ad 809: lwp_exit_switchaway(l);
810: }
1.2 thorpej 811: }
812:
813: void
1.64 yamt 814: lwp_exit_switchaway(struct lwp *l)
1.2 thorpej 815: {
1.64 yamt 816: struct cpu_info *ci;
817: struct lwp *idlelwp;
818:
819: /* Unlocked, but is for statistics only. */
820: uvmexp.swtch++;
821:
822: (void)splsched();
823: l->l_flag &= ~LW_RUNNING;
824: ci = curcpu();
825: idlelwp = ci->ci_data.cpu_idlelwp;
826: idlelwp->l_stat = LSONPROC;
827: cpu_switchto(NULL, idlelwp);
1.52 ad 828: }
829:
830: /*
831: * Free a dead LWP's remaining resources.
832: *
833: * XXXLWP limits.
834: */
835: void
1.63 ad 836: lwp_free(struct lwp *l, bool recycle, bool last)
1.52 ad 837: {
838: struct proc *p = l->l_proc;
839: ksiginfoq_t kq;
840:
841: /*
842: * If this was not the last LWP in the process, then adjust
843: * counters and unlock.
844: */
845: if (!last) {
846: /*
847: * Add the LWP's run time to the process' base value.
848: * This needs to co-incide with coming off p_lwps.
849: */
850: timeradd(&l->l_rtime, &p->p_rtime, &p->p_rtime);
1.64 yamt 851: p->p_pctcpu += l->l_pctcpu;
1.52 ad 852: LIST_REMOVE(l, l_sibling);
853: p->p_nlwps--;
854: p->p_nzlwps--;
855: if ((l->l_prflag & LPR_DETACHED) != 0)
856: p->p_ndlwps--;
1.63 ad 857:
858: /*
859: * Have any LWPs sleeping in lwp_wait() recheck for
860: * deadlock.
861: */
862: cv_broadcast(&p->p_lwpcv);
1.52 ad 863: mutex_exit(&p->p_smutex);
1.63 ad 864: }
1.52 ad 865:
866: #ifdef MULTIPROCESSOR
1.63 ad 867: /*
868: * In the unlikely event that the LWP is still on the CPU,
869: * then spin until it has switched away. We need to release
870: * all locks to avoid deadlock against interrupt handlers on
871: * the target CPU.
872: */
1.64 yamt 873: if ((l->l_flag & LW_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
1.63 ad 874: int count;
1.64 yamt 875: (void)count; /* XXXgcc */
1.63 ad 876: KERNEL_UNLOCK_ALL(curlwp, &count);
1.64 yamt 877: while ((l->l_flag & LW_RUNNING) != 0 ||
878: l->l_cpu->ci_curlwp == l)
1.63 ad 879: SPINLOCK_BACKOFF_HOOK;
880: KERNEL_LOCK(count, curlwp);
881: }
1.52 ad 882: #endif
883:
884: /*
885: * Destroy the LWP's remaining signal information.
886: */
887: ksiginfo_queue_init(&kq);
888: sigclear(&l->l_sigpend, NULL, &kq);
889: ksiginfo_queue_drain(&kq);
890: cv_destroy(&l->l_sigcv);
1.65 ad 891: mutex_destroy(&l->l_swaplock);
1.2 thorpej 892:
1.19 jdolecek 893: /*
1.52 ad 894: * Free the LWP's turnstile and the LWP structure itself unless the
1.64 yamt 895: * caller wants to recycle them. Also, free the scheduler specific data.
1.52 ad 896: *
897: * We can't return turnstile0 to the pool (it didn't come from it),
898: * so if it comes up just drop it quietly and move on.
899: *
900: * We don't recycle the VM resources at this time.
1.19 jdolecek 901: */
1.55 ad 902: KERNEL_LOCK(1, curlwp); /* XXXSMP */
1.64 yamt 903:
904: sched_lwp_exit(l);
905:
1.52 ad 906: if (!recycle && l->l_ts != &turnstile0)
907: pool_cache_put(&turnstile_cache, l->l_ts);
908: #ifndef __NO_CPU_LWP_FREE
909: cpu_lwp_free2(l);
910: #endif
1.19 jdolecek 911: uvm_lwp_exit(l);
1.60 yamt 912: KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
913: KASSERT(l->l_inheritedprio == MAXPRI);
1.52 ad 914: if (!recycle)
1.19 jdolecek 915: pool_put(&lwp_pool, l);
1.55 ad 916: KERNEL_UNLOCK_ONE(curlwp); /* XXXSMP */
1.2 thorpej 917: }
918:
919: /*
920: * Pick a LWP to represent the process for those operations which
921: * want information about a "process" that is actually associated
922: * with a LWP.
1.52 ad 923: *
924: * If 'locking' is false, no locking or lock checks are performed.
925: * This is intended for use by DDB.
926: *
927: * We don't bother locking the LWP here, since code that uses this
928: * interface is broken by design and an exact match is not required.
1.2 thorpej 929: */
930: struct lwp *
1.52 ad 931: proc_representative_lwp(struct proc *p, int *nrlwps, int locking)
1.2 thorpej 932: {
933: struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
1.27 matt 934: struct lwp *signalled;
1.52 ad 935: int cnt;
936:
937: if (locking) {
1.63 ad 938: KASSERT(mutex_owned(&p->p_smutex));
1.52 ad 939: }
1.2 thorpej 940:
941: /* Trivial case: only one LWP */
1.52 ad 942: if (p->p_nlwps == 1) {
943: l = LIST_FIRST(&p->p_lwps);
944: if (nrlwps)
1.68 tnn 945: *nrlwps = (l->l_stat == LSONPROC || l->l_stat == LSRUN);
1.52 ad 946: return l;
947: }
1.2 thorpej 948:
1.52 ad 949: cnt = 0;
1.2 thorpej 950: switch (p->p_stat) {
951: case SSTOP:
952: case SACTIVE:
953: /* Pick the most live LWP */
954: onproc = running = sleeping = stopped = suspended = NULL;
1.27 matt 955: signalled = NULL;
1.2 thorpej 956: LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1.64 yamt 957: if ((l->l_flag & LW_IDLE) != 0) {
958: continue;
959: }
1.27 matt 960: if (l->l_lid == p->p_sigctx.ps_lwp)
961: signalled = l;
1.2 thorpej 962: switch (l->l_stat) {
963: case LSONPROC:
964: onproc = l;
1.52 ad 965: cnt++;
1.2 thorpej 966: break;
967: case LSRUN:
968: running = l;
1.52 ad 969: cnt++;
1.2 thorpej 970: break;
971: case LSSLEEP:
972: sleeping = l;
973: break;
974: case LSSTOP:
975: stopped = l;
976: break;
977: case LSSUSPENDED:
978: suspended = l;
979: break;
980: }
981: }
1.52 ad 982: if (nrlwps)
983: *nrlwps = cnt;
1.27 matt 984: if (signalled)
1.52 ad 985: l = signalled;
986: else if (onproc)
987: l = onproc;
988: else if (running)
989: l = running;
990: else if (sleeping)
991: l = sleeping;
992: else if (stopped)
993: l = stopped;
994: else if (suspended)
995: l = suspended;
996: else
997: break;
998: return l;
1.2 thorpej 999: #ifdef DIAGNOSTIC
1000: case SIDL:
1.52 ad 1001: case SZOMB:
1002: case SDYING:
1003: case SDEAD:
1004: if (locking)
1005: mutex_exit(&p->p_smutex);
1.2 thorpej 1006: /* We have more than one LWP and we're in SIDL?
1007: * How'd that happen?
1008: */
1.52 ad 1009: panic("Too many LWPs in idle/dying process %d (%s) stat = %d",
1010: p->p_pid, p->p_comm, p->p_stat);
1011: break;
1.2 thorpej 1012: default:
1.52 ad 1013: if (locking)
1014: mutex_exit(&p->p_smutex);
1.2 thorpej 1015: panic("Process %d (%s) in unknown state %d",
1016: p->p_pid, p->p_comm, p->p_stat);
1017: #endif
1018: }
1019:
1.52 ad 1020: if (locking)
1021: mutex_exit(&p->p_smutex);
1.2 thorpej 1022: panic("proc_representative_lwp: couldn't find a lwp for process"
1023: " %d (%s)", p->p_pid, p->p_comm);
1024: /* NOTREACHED */
1025: return NULL;
1026: }
1.37 ad 1027:
1028: /*
1.52 ad 1029: * Look up a live LWP within the speicifed process, and return it locked.
1030: *
1031: * Must be called with p->p_smutex held.
1032: */
1033: struct lwp *
1034: lwp_find(struct proc *p, int id)
1035: {
1036: struct lwp *l;
1037:
1.63 ad 1038: KASSERT(mutex_owned(&p->p_smutex));
1.52 ad 1039:
1040: LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1041: if (l->l_lid == id)
1042: break;
1043: }
1044:
1045: /*
1046: * No need to lock - all of these conditions will
1047: * be visible with the process level mutex held.
1048: */
1049: if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
1050: l = NULL;
1051:
1052: return l;
1053: }
1054:
1055: /*
1.37 ad 1056: * Update an LWP's cached credentials to mirror the process' master copy.
1057: *
1058: * This happens early in the syscall path, on user trap, and on LWP
1059: * creation. A long-running LWP can also voluntarily choose to update
1060: * it's credentials by calling this routine. This may be called from
1061: * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
1062: */
1063: void
1064: lwp_update_creds(struct lwp *l)
1065: {
1066: kauth_cred_t oc;
1067: struct proc *p;
1068:
1069: p = l->l_proc;
1070: oc = l->l_cred;
1071:
1.52 ad 1072: mutex_enter(&p->p_mutex);
1.37 ad 1073: kauth_cred_hold(p->p_cred);
1074: l->l_cred = p->p_cred;
1.52 ad 1075: mutex_exit(&p->p_mutex);
1076: if (oc != NULL) {
1077: KERNEL_LOCK(1, l); /* XXXSMP */
1.37 ad 1078: kauth_cred_free(oc);
1.52 ad 1079: KERNEL_UNLOCK_ONE(l); /* XXXSMP */
1080: }
1081: }
1082:
1083: /*
1084: * Verify that an LWP is locked, and optionally verify that the lock matches
1085: * one we specify.
1086: */
1087: int
1088: lwp_locked(struct lwp *l, kmutex_t *mtx)
1089: {
1090: kmutex_t *cur = l->l_mutex;
1091:
1092: return mutex_owned(cur) && (mtx == cur || mtx == NULL);
1093: }
1094:
1095: /*
1096: * Lock an LWP.
1097: */
1098: void
1099: lwp_lock_retry(struct lwp *l, kmutex_t *old)
1100: {
1101:
1102: /*
1103: * XXXgcc ignoring kmutex_t * volatile on i386
1104: *
1105: * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
1106: */
1107: #if 1
1108: while (l->l_mutex != old) {
1109: #else
1110: for (;;) {
1111: #endif
1112: mutex_spin_exit(old);
1113: old = l->l_mutex;
1114: mutex_spin_enter(old);
1115:
1116: /*
1117: * mutex_enter() will have posted a read barrier. Re-test
1118: * l->l_mutex. If it has changed, we need to try again.
1119: */
1120: #if 1
1121: }
1122: #else
1123: } while (__predict_false(l->l_mutex != old));
1124: #endif
1125: }
1126:
1127: /*
1128: * Lend a new mutex to an LWP. The old mutex must be held.
1129: */
1130: void
1131: lwp_setlock(struct lwp *l, kmutex_t *new)
1132: {
1133:
1.63 ad 1134: KASSERT(mutex_owned(l->l_mutex));
1.52 ad 1135:
1136: mb_write();
1137: l->l_mutex = new;
1138: }
1139:
1140: /*
1141: * Lend a new mutex to an LWP, and release the old mutex. The old mutex
1142: * must be held.
1143: */
1144: void
1145: lwp_unlock_to(struct lwp *l, kmutex_t *new)
1146: {
1147: kmutex_t *old;
1148:
1.63 ad 1149: KASSERT(mutex_owned(l->l_mutex));
1.52 ad 1150:
1151: old = l->l_mutex;
1152: mb_write();
1153: l->l_mutex = new;
1154: mutex_spin_exit(old);
1155: }
1156:
1157: /*
1158: * Acquire a new mutex, and donate it to an LWP. The LWP must already be
1159: * locked.
1160: */
1161: void
1162: lwp_relock(struct lwp *l, kmutex_t *new)
1163: {
1164: kmutex_t *old;
1165:
1.63 ad 1166: KASSERT(mutex_owned(l->l_mutex));
1.52 ad 1167:
1168: old = l->l_mutex;
1169: if (old != new) {
1170: mutex_spin_enter(new);
1171: l->l_mutex = new;
1172: mutex_spin_exit(old);
1173: }
1174: }
1175:
1.60 yamt 1176: int
1177: lwp_trylock(struct lwp *l)
1178: {
1179: kmutex_t *old;
1180:
1181: for (;;) {
1182: if (!mutex_tryenter(old = l->l_mutex))
1183: return 0;
1184: if (__predict_true(l->l_mutex == old))
1185: return 1;
1186: mutex_spin_exit(old);
1187: }
1188: }
1189:
1.52 ad 1190: /*
1.56 pavel 1191: * Handle exceptions for mi_userret(). Called if a member of LW_USERRET is
1.52 ad 1192: * set.
1193: */
1194: void
1195: lwp_userret(struct lwp *l)
1196: {
1197: struct proc *p;
1.54 ad 1198: void (*hook)(void);
1.52 ad 1199: int sig;
1200:
1201: p = l->l_proc;
1202:
1203: /*
1204: * It should be safe to do this read unlocked on a multiprocessor
1205: * system..
1206: */
1.56 pavel 1207: while ((l->l_flag & LW_USERRET) != 0) {
1.52 ad 1208: /*
1209: * Process pending signals first, unless the process
1.61 ad 1210: * is dumping core or exiting, where we will instead
1211: * enter the L_WSUSPEND case below.
1.52 ad 1212: */
1.61 ad 1213: if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
1214: LW_PENDSIG) {
1.52 ad 1215: KERNEL_LOCK(1, l); /* XXXSMP pool_put() below */
1216: mutex_enter(&p->p_smutex);
1217: while ((sig = issignal(l)) != 0)
1218: postsig(sig);
1219: mutex_exit(&p->p_smutex);
1220: KERNEL_UNLOCK_LAST(l); /* XXXSMP */
1221: }
1222:
1223: /*
1224: * Core-dump or suspend pending.
1225: *
1226: * In case of core dump, suspend ourselves, so that the
1227: * kernel stack and therefore the userland registers saved
1228: * in the trapframe are around for coredump() to write them
1229: * out. We issue a wakeup on p->p_lwpcv so that sigexit()
1230: * will write the core file out once all other LWPs are
1231: * suspended.
1232: */
1.56 pavel 1233: if ((l->l_flag & LW_WSUSPEND) != 0) {
1.52 ad 1234: mutex_enter(&p->p_smutex);
1235: p->p_nrlwps--;
1236: cv_broadcast(&p->p_lwpcv);
1237: lwp_lock(l);
1238: l->l_stat = LSSUSPENDED;
1239: mutex_exit(&p->p_smutex);
1.64 yamt 1240: mi_switch(l);
1.52 ad 1241: }
1242:
1243: /* Process is exiting. */
1.56 pavel 1244: if ((l->l_flag & LW_WEXIT) != 0) {
1.52 ad 1245: KERNEL_LOCK(1, l);
1246: lwp_exit(l);
1247: KASSERT(0);
1248: /* NOTREACHED */
1249: }
1.54 ad 1250:
1251: /* Call userret hook; used by Linux emulation. */
1.56 pavel 1252: if ((l->l_flag & LW_WUSERRET) != 0) {
1.54 ad 1253: lwp_lock(l);
1.56 pavel 1254: l->l_flag &= ~LW_WUSERRET;
1.54 ad 1255: lwp_unlock(l);
1256: hook = p->p_userret;
1257: p->p_userret = NULL;
1258: (*hook)();
1259: }
1.52 ad 1260: }
1261: }
1262:
1263: /*
1264: * Force an LWP to enter the kernel, to take a trip through lwp_userret().
1265: */
1266: void
1267: lwp_need_userret(struct lwp *l)
1268: {
1.63 ad 1269: KASSERT(lwp_locked(l, NULL));
1.52 ad 1270:
1271: /*
1272: * Since the tests in lwp_userret() are done unlocked, make sure
1273: * that the condition will be seen before forcing the LWP to enter
1274: * kernel mode.
1275: */
1276: mb_write();
1277: cpu_signotify(l);
1278: }
1279:
1280: /*
1281: * Add one reference to an LWP. This will prevent the LWP from
1282: * exiting, thus keep the lwp structure and PCB around to inspect.
1283: */
1284: void
1285: lwp_addref(struct lwp *l)
1286: {
1287:
1.63 ad 1288: KASSERT(mutex_owned(&l->l_proc->p_smutex));
1.52 ad 1289: KASSERT(l->l_stat != LSZOMB);
1290: KASSERT(l->l_refcnt != 0);
1291:
1292: l->l_refcnt++;
1293: }
1294:
1295: /*
1296: * Remove one reference to an LWP. If this is the last reference,
1297: * then we must finalize the LWP's death.
1298: */
1299: void
1300: lwp_delref(struct lwp *l)
1301: {
1302: struct proc *p = l->l_proc;
1303:
1304: mutex_enter(&p->p_smutex);
1305: if (--l->l_refcnt == 0)
1306: cv_broadcast(&p->p_refcv);
1307: mutex_exit(&p->p_smutex);
1308: }
1309:
1310: /*
1311: * Drain all references to the current LWP.
1312: */
1313: void
1314: lwp_drainrefs(struct lwp *l)
1315: {
1316: struct proc *p = l->l_proc;
1317:
1.63 ad 1318: KASSERT(mutex_owned(&p->p_smutex));
1.52 ad 1319: KASSERT(l->l_refcnt != 0);
1320:
1321: l->l_refcnt--;
1322: while (l->l_refcnt != 0)
1323: cv_wait(&p->p_refcv, &p->p_smutex);
1.37 ad 1324: }
1.41 thorpej 1325:
1326: /*
1327: * lwp_specific_key_create --
1328: * Create a key for subsystem lwp-specific data.
1329: */
1330: int
1331: lwp_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1332: {
1333:
1.45 thorpej 1334: return (specificdata_key_create(lwp_specificdata_domain, keyp, dtor));
1.41 thorpej 1335: }
1336:
1337: /*
1338: * lwp_specific_key_delete --
1339: * Delete a key for subsystem lwp-specific data.
1340: */
1341: void
1342: lwp_specific_key_delete(specificdata_key_t key)
1343: {
1344:
1345: specificdata_key_delete(lwp_specificdata_domain, key);
1346: }
1347:
1.45 thorpej 1348: /*
1349: * lwp_initspecific --
1350: * Initialize an LWP's specificdata container.
1351: */
1.42 christos 1352: void
1353: lwp_initspecific(struct lwp *l)
1354: {
1355: int error;
1.45 thorpej 1356:
1.42 christos 1357: error = specificdata_init(lwp_specificdata_domain, &l->l_specdataref);
1358: KASSERT(error == 0);
1359: }
1360:
1.41 thorpej 1361: /*
1.45 thorpej 1362: * lwp_finispecific --
1363: * Finalize an LWP's specificdata container.
1364: */
1365: void
1366: lwp_finispecific(struct lwp *l)
1367: {
1368:
1369: specificdata_fini(lwp_specificdata_domain, &l->l_specdataref);
1370: }
1371:
1372: /*
1.41 thorpej 1373: * lwp_getspecific --
1374: * Return lwp-specific data corresponding to the specified key.
1375: *
1376: * Note: LWP specific data is NOT INTERLOCKED. An LWP should access
1377: * only its OWN SPECIFIC DATA. If it is necessary to access another
1378: * LWP's specifc data, care must be taken to ensure that doing so
1379: * would not cause internal data structure inconsistency (i.e. caller
1380: * can guarantee that the target LWP is not inside an lwp_getspecific()
1381: * or lwp_setspecific() call).
1382: */
1383: void *
1.44 thorpej 1384: lwp_getspecific(specificdata_key_t key)
1.41 thorpej 1385: {
1386:
1387: return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1.44 thorpej 1388: &curlwp->l_specdataref, key));
1.41 thorpej 1389: }
1390:
1.47 hannken 1391: void *
1392: _lwp_getspecific_by_lwp(struct lwp *l, specificdata_key_t key)
1393: {
1394:
1395: return (specificdata_getspecific_unlocked(lwp_specificdata_domain,
1396: &l->l_specdataref, key));
1397: }
1398:
1.41 thorpej 1399: /*
1400: * lwp_setspecific --
1401: * Set lwp-specific data corresponding to the specified key.
1402: */
1403: void
1.45 thorpej 1404: lwp_setspecific(specificdata_key_t key, void *data)
1.41 thorpej 1405: {
1406:
1407: specificdata_setspecific(lwp_specificdata_domain,
1.44 thorpej 1408: &curlwp->l_specdataref, key, data);
1.41 thorpej 1409: }
CVSweb <webmaster@jp.NetBSD.org>