[BACK]Return to kern_lwp.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/kern_lwp.c, Revision 1.106.2.7

1.106.2.7! yamt        1: /*     $NetBSD: kern_lwp.c,v 1.106.2.6 2010/03/11 15:04:16 yamt Exp $  */
1.2       thorpej     2:
                      3: /*-
1.106.2.2  yamt        4:  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
1.2       thorpej     5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
1.52      ad          8:  * by Nathan J. Williams, and Andrew Doran.
1.2       thorpej     9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
1.9       lukem      31:
1.52      ad         32: /*
                     33:  * Overview
                     34:  *
1.66      ad         35:  *     Lightweight processes (LWPs) are the basic unit or thread of
1.52      ad         36:  *     execution within the kernel.  The core state of an LWP is described
1.66      ad         37:  *     by "struct lwp", also known as lwp_t.
1.52      ad         38:  *
                     39:  *     Each LWP is contained within a process (described by "struct proc"),
                     40:  *     Every process contains at least one LWP, but may contain more.  The
                     41:  *     process describes attributes shared among all of its LWPs such as a
                     42:  *     private address space, global execution state (stopped, active,
                     43:  *     zombie, ...), signal disposition and so on.  On a multiprocessor
1.66      ad         44:  *     machine, multiple LWPs be executing concurrently in the kernel.
1.52      ad         45:  *
                     46:  * Execution states
                     47:  *
                     48:  *     At any given time, an LWP has overall state that is described by
                     49:  *     lwp::l_stat.  The states are broken into two sets below.  The first
                     50:  *     set is guaranteed to represent the absolute, current state of the
                     51:  *     LWP:
1.101     rmind      52:  *
                     53:  *     LSONPROC
                     54:  *
                     55:  *             On processor: the LWP is executing on a CPU, either in the
                     56:  *             kernel or in user space.
                     57:  *
                     58:  *     LSRUN
                     59:  *
                     60:  *             Runnable: the LWP is parked on a run queue, and may soon be
                     61:  *             chosen to run by an idle processor, or by a processor that
                     62:  *             has been asked to preempt a currently runnning but lower
1.106.2.6  yamt       63:  *             priority LWP.
1.101     rmind      64:  *
                     65:  *     LSIDL
                     66:  *
                     67:  *             Idle: the LWP has been created but has not yet executed,
1.66      ad         68:  *             or it has ceased executing a unit of work and is waiting
                     69:  *             to be started again.
1.101     rmind      70:  *
                     71:  *     LSSUSPENDED:
                     72:  *
                     73:  *             Suspended: the LWP has had its execution suspended by
1.52      ad         74:  *             another LWP in the same process using the _lwp_suspend()
                     75:  *             system call.  User-level LWPs also enter the suspended
                     76:  *             state when the system is shutting down.
                     77:  *
                     78:  *     The second set represent a "statement of intent" on behalf of the
                     79:  *     LWP.  The LWP may in fact be executing on a processor, may be
1.66      ad         80:  *     sleeping or idle. It is expected to take the necessary action to
1.101     rmind      81:  *     stop executing or become "running" again within a short timeframe.
1.106.2.2  yamt       82:  *     The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
1.101     rmind      83:  *     Importantly, it indicates that its state is tied to a CPU.
                     84:  *
                     85:  *     LSZOMB:
                     86:  *
                     87:  *             Dead or dying: the LWP has released most of its resources
1.106.2.2  yamt       88:  *             and is about to switch away into oblivion, or has already
1.66      ad         89:  *             switched away.  When it switches away, its few remaining
                     90:  *             resources can be collected.
1.101     rmind      91:  *
                     92:  *     LSSLEEP:
                     93:  *
                     94:  *             Sleeping: the LWP has entered itself onto a sleep queue, and
                     95:  *             has switched away or will switch away shortly to allow other
1.66      ad         96:  *             LWPs to run on the CPU.
1.101     rmind      97:  *
                     98:  *     LSSTOP:
                     99:  *
                    100:  *             Stopped: the LWP has been stopped as a result of a job
                    101:  *             control signal, or as a result of the ptrace() interface.
                    102:  *
                    103:  *             Stopped LWPs may run briefly within the kernel to handle
                    104:  *             signals that they receive, but will not return to user space
                    105:  *             until their process' state is changed away from stopped.
                    106:  *
                    107:  *             Single LWPs within a process can not be set stopped
                    108:  *             selectively: all actions that can stop or continue LWPs
                    109:  *             occur at the process level.
                    110:  *
1.52      ad        111:  * State transitions
                    112:  *
1.66      ad        113:  *     Note that the LSSTOP state may only be set when returning to
                    114:  *     user space in userret(), or when sleeping interruptably.  The
                    115:  *     LSSUSPENDED state may only be set in userret().  Before setting
                    116:  *     those states, we try to ensure that the LWPs will release all
                    117:  *     locks that they hold, and at a minimum try to ensure that the
                    118:  *     LWP can be set runnable again by a signal.
1.52      ad        119:  *
                    120:  *     LWPs may transition states in the following ways:
                    121:  *
                    122:  *      RUN -------> ONPROC            ONPROC -----> RUN
1.106.2.2  yamt      123:  *                                                 > SLEEP
                    124:  *                                                 > STOPPED
1.52      ad        125:  *                                                 > SUSPENDED
                    126:  *                                                 > ZOMB
1.106.2.2  yamt      127:  *                                                 > IDL (special cases)
1.52      ad        128:  *
                    129:  *      STOPPED ---> RUN               SUSPENDED --> RUN
1.106.2.2  yamt      130:  *                 > SLEEP
1.52      ad        131:  *
                    132:  *      SLEEP -----> ONPROC            IDL --------> RUN
1.101     rmind     133:  *                 > RUN                           > SUSPENDED
                    134:  *                 > STOPPED                       > STOPPED
1.106.2.2  yamt      135:  *                                                 > ONPROC (special cases)
1.52      ad        136:  *
1.106.2.2  yamt      137:  *     Some state transitions are only possible with kernel threads (eg
                    138:  *     ONPROC -> IDL) and happen under tightly controlled circumstances
                    139:  *     free of unwanted side effects.
                    140:  *
                    141:  * Migration
                    142:  *
                    143:  *     Migration of threads from one CPU to another could be performed
                    144:  *     internally by the scheduler via sched_takecpu() or sched_catchlwp()
                    145:  *     functions.  The universal lwp_migrate() function should be used for
                    146:  *     any other cases.  Subsystems in the kernel must be aware that CPU
                    147:  *     of LWP may change, while it is not locked.
1.66      ad        148:  *
1.52      ad        149:  * Locking
                    150:  *
                    151:  *     The majority of fields in 'struct lwp' are covered by a single,
1.66      ad        152:  *     general spin lock pointed to by lwp::l_mutex.  The locks covering
1.52      ad        153:  *     each field are documented in sys/lwp.h.
                    154:  *
1.66      ad        155:  *     State transitions must be made with the LWP's general lock held,
1.101     rmind     156:  *     and may cause the LWP's lock pointer to change. Manipulation of
1.66      ad        157:  *     the general lock is not performed directly, but through calls to
                    158:  *     lwp_lock(), lwp_relock() and similar.
1.52      ad        159:  *
                    160:  *     States and their associated locks:
                    161:  *
1.74      rmind     162:  *     LSONPROC, LSZOMB:
1.52      ad        163:  *
1.64      yamt      164:  *             Always covered by spc_lwplock, which protects running LWPs.
1.106.2.2  yamt      165:  *             This is a per-CPU lock and matches lwp::l_cpu.
1.52      ad        166:  *
1.74      rmind     167:  *     LSIDL, LSRUN:
1.52      ad        168:  *
1.64      yamt      169:  *             Always covered by spc_mutex, which protects the run queues.
1.106.2.2  yamt      170:  *             This is a per-CPU lock and matches lwp::l_cpu.
1.52      ad        171:  *
                    172:  *     LSSLEEP:
                    173:  *
1.66      ad        174:  *             Covered by a lock associated with the sleep queue that the
1.106.2.2  yamt      175:  *             LWP resides on.  Matches lwp::l_sleepq::sq_mutex.
1.52      ad        176:  *
                    177:  *     LSSTOP, LSSUSPENDED:
1.101     rmind     178:  *
1.52      ad        179:  *             If the LWP was previously sleeping (l_wchan != NULL), then
1.66      ad        180:  *             l_mutex references the sleep queue lock.  If the LWP was
1.52      ad        181:  *             runnable or on the CPU when halted, or has been removed from
1.66      ad        182:  *             the sleep queue since halted, then the lock is spc_lwplock.
1.52      ad        183:  *
                    184:  *     The lock order is as follows:
                    185:  *
1.64      yamt      186:  *             spc::spc_lwplock ->
1.106.2.2  yamt      187:  *                 sleeptab::st_mutex ->
1.64      yamt      188:  *                     tschain_t::tc_mutex ->
                    189:  *                         spc::spc_mutex
1.52      ad        190:  *
1.103     ad        191:  *     Each process has an scheduler state lock (proc::p_lock), and a
1.52      ad        192:  *     number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
                    193:  *     so on.  When an LWP is to be entered into or removed from one of the
1.103     ad        194:  *     following states, p_lock must be held and the process wide counters
1.52      ad        195:  *     adjusted:
                    196:  *
                    197:  *             LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
                    198:  *
1.106.2.2  yamt      199:  *     (But not always for kernel threads.  There are some special cases
                    200:  *     as mentioned above.  See kern_softint.c.)
                    201:  *
1.52      ad        202:  *     Note that an LWP is considered running or likely to run soon if in
                    203:  *     one of the following states.  This affects the value of p_nrlwps:
                    204:  *
                    205:  *             LSRUN, LSONPROC, LSSLEEP
                    206:  *
1.103     ad        207:  *     p_lock does not need to be held when transitioning among these
1.106.2.2  yamt      208:  *     three states, hence p_lock is rarely taken for state transitions.
1.52      ad        209:  */
                    210:
1.9       lukem     211: #include <sys/cdefs.h>
1.106.2.7! yamt      212: __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.106.2.6 2010/03/11 15:04:16 yamt Exp $");
1.8       martin    213:
1.84      yamt      214: #include "opt_ddb.h"
1.52      ad        215: #include "opt_lockdebug.h"
1.106.2.2  yamt      216: #include "opt_sa.h"
1.106.2.6  yamt      217: #include "opt_dtrace.h"
1.2       thorpej   218:
1.47      hannken   219: #define _LWP_API_PRIVATE
                    220:
1.2       thorpej   221: #include <sys/param.h>
                    222: #include <sys/systm.h>
1.64      yamt      223: #include <sys/cpu.h>
1.2       thorpej   224: #include <sys/pool.h>
                    225: #include <sys/proc.h>
1.106.2.2  yamt      226: #include <sys/sa.h>
                    227: #include <sys/savar.h>
1.2       thorpej   228: #include <sys/syscallargs.h>
1.57      dsl       229: #include <sys/syscall_stats.h>
1.37      ad        230: #include <sys/kauth.h>
1.52      ad        231: #include <sys/sleepq.h>
                    232: #include <sys/lockdebug.h>
                    233: #include <sys/kmem.h>
1.91      rmind     234: #include <sys/pset.h>
1.75      ad        235: #include <sys/intr.h>
1.78      ad        236: #include <sys/lwpctl.h>
1.81      ad        237: #include <sys/atomic.h>
1.106.2.3  yamt      238: #include <sys/filedesc.h>
1.106.2.6  yamt      239: #include <sys/dtrace_bsd.h>
                    240: #include <sys/sdt.h>
1.2       thorpej   241:
                    242: #include <uvm/uvm_extern.h>
1.80      skrll     243: #include <uvm/uvm_object.h>
1.2       thorpej   244:
1.106.2.7! yamt      245: struct lwplist         alllwp = LIST_HEAD_INITIALIZER(alllwp);
        !           246: static pool_cache_t    lwp_cache;
1.41      thorpej   247:
1.106.2.6  yamt      248: /* DTrace proc provider probes */
                    249: SDT_PROBE_DEFINE(proc,,,lwp_create,
                    250:        "struct lwp *", NULL,
                    251:        NULL, NULL, NULL, NULL,
                    252:        NULL, NULL, NULL, NULL);
                    253: SDT_PROBE_DEFINE(proc,,,lwp_start,
                    254:        "struct lwp *", NULL,
                    255:        NULL, NULL, NULL, NULL,
                    256:        NULL, NULL, NULL, NULL);
                    257: SDT_PROBE_DEFINE(proc,,,lwp_exit,
                    258:        "struct lwp *", NULL,
                    259:        NULL, NULL, NULL, NULL,
                    260:        NULL, NULL, NULL, NULL);
                    261:
1.106.2.7! yamt      262: struct turnstile turnstile0;
        !           263: struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
        !           264: #ifdef LWP0_CPU_INFO
        !           265:        .l_cpu = LWP0_CPU_INFO,
        !           266: #endif
        !           267:        .l_proc = &proc0,
        !           268:        .l_lid = 1,
        !           269:        .l_flag = LW_SYSTEM,
        !           270:        .l_stat = LSONPROC,
        !           271:        .l_ts = &turnstile0,
        !           272:        .l_syncobj = &sched_syncobj,
        !           273:        .l_refcnt = 1,
        !           274:        .l_priority = PRI_USER + NPRI_USER - 1,
        !           275:        .l_inheritedprio = -1,
        !           276:        .l_class = SCHED_OTHER,
        !           277:        .l_psid = PS_NONE,
        !           278:        .l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
        !           279:        .l_name = __UNCONST("swapper"),
        !           280:        .l_fd = &filedesc0,
        !           281: };
        !           282:
1.41      thorpej   283: void
                    284: lwpinit(void)
                    285: {
                    286:
1.106.2.7! yamt      287:        lwpinit_specificdata();
1.52      ad        288:        lwp_sys_init();
1.87      ad        289:        lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
                    290:            "lwppl", NULL, IPL_NONE, NULL, NULL, NULL);
1.41      thorpej   291: }
                    292:
1.106.2.7! yamt      293: void
        !           294: lwp0_init(void)
        !           295: {
        !           296:        struct lwp *l = &lwp0;
        !           297:
        !           298:        KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
        !           299:        KASSERT(l->l_lid == proc0.p_nlwpid);
        !           300:
        !           301:        LIST_INSERT_HEAD(&alllwp, l, l_list);
        !           302:
        !           303:        callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
        !           304:        callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
        !           305:        cv_init(&l->l_sigcv, "sigwait");
        !           306:
        !           307:        kauth_cred_hold(proc0.p_cred);
        !           308:        l->l_cred = proc0.p_cred;
        !           309:
        !           310:        lwp_initspecific(l);
        !           311:
        !           312:        SYSCALL_TIME_LWP_INIT(l);
        !           313: }
        !           314:
1.52      ad        315: /*
                    316:  * Set an suspended.
                    317:  *
1.103     ad        318:  * Must be called with p_lock held, and the LWP locked.  Will unlock the
1.52      ad        319:  * LWP before return.
                    320:  */
1.2       thorpej   321: int
1.52      ad        322: lwp_suspend(struct lwp *curl, struct lwp *t)
1.2       thorpej   323: {
1.52      ad        324:        int error;
1.2       thorpej   325:
1.103     ad        326:        KASSERT(mutex_owned(t->l_proc->p_lock));
1.63      ad        327:        KASSERT(lwp_locked(t, NULL));
1.33      chs       328:
1.52      ad        329:        KASSERT(curl != t || curl->l_stat == LSONPROC);
1.2       thorpej   330:
1.52      ad        331:        /*
                    332:         * If the current LWP has been told to exit, we must not suspend anyone
                    333:         * else or deadlock could occur.  We won't return to userspace.
1.2       thorpej   334:         */
1.106.2.1  yamt      335:        if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
1.52      ad        336:                lwp_unlock(t);
                    337:                return (EDEADLK);
1.2       thorpej   338:        }
                    339:
1.52      ad        340:        error = 0;
1.2       thorpej   341:
1.52      ad        342:        switch (t->l_stat) {
                    343:        case LSRUN:
                    344:        case LSONPROC:
1.56      pavel     345:                t->l_flag |= LW_WSUSPEND;
1.52      ad        346:                lwp_need_userret(t);
                    347:                lwp_unlock(t);
                    348:                break;
1.2       thorpej   349:
1.52      ad        350:        case LSSLEEP:
1.56      pavel     351:                t->l_flag |= LW_WSUSPEND;
1.2       thorpej   352:
                    353:                /*
1.52      ad        354:                 * Kick the LWP and try to get it to the kernel boundary
                    355:                 * so that it will release any locks that it holds.
                    356:                 * setrunnable() will release the lock.
1.2       thorpej   357:                 */
1.56      pavel     358:                if ((t->l_flag & LW_SINTR) != 0)
1.52      ad        359:                        setrunnable(t);
                    360:                else
                    361:                        lwp_unlock(t);
                    362:                break;
1.2       thorpej   363:
1.52      ad        364:        case LSSUSPENDED:
                    365:                lwp_unlock(t);
                    366:                break;
1.17      manu      367:
1.52      ad        368:        case LSSTOP:
1.56      pavel     369:                t->l_flag |= LW_WSUSPEND;
1.52      ad        370:                setrunnable(t);
                    371:                break;
1.2       thorpej   372:
1.52      ad        373:        case LSIDL:
                    374:        case LSZOMB:
                    375:                error = EINTR; /* It's what Solaris does..... */
                    376:                lwp_unlock(t);
                    377:                break;
1.2       thorpej   378:        }
                    379:
1.69      rmind     380:        return (error);
1.2       thorpej   381: }
                    382:
1.52      ad        383: /*
                    384:  * Restart a suspended LWP.
                    385:  *
1.103     ad        386:  * Must be called with p_lock held, and the LWP locked.  Will unlock the
1.52      ad        387:  * LWP before return.
                    388:  */
1.2       thorpej   389: void
                    390: lwp_continue(struct lwp *l)
                    391: {
                    392:
1.103     ad        393:        KASSERT(mutex_owned(l->l_proc->p_lock));
1.63      ad        394:        KASSERT(lwp_locked(l, NULL));
1.52      ad        395:
                    396:        /* If rebooting or not suspended, then just bail out. */
1.56      pavel     397:        if ((l->l_flag & LW_WREBOOT) != 0) {
1.52      ad        398:                lwp_unlock(l);
1.2       thorpej   399:                return;
1.10      fvdl      400:        }
1.2       thorpej   401:
1.56      pavel     402:        l->l_flag &= ~LW_WSUSPEND;
1.2       thorpej   403:
1.52      ad        404:        if (l->l_stat != LSSUSPENDED) {
                    405:                lwp_unlock(l);
                    406:                return;
1.2       thorpej   407:        }
                    408:
1.52      ad        409:        /* setrunnable() will release the lock. */
                    410:        setrunnable(l);
1.2       thorpej   411: }
                    412:
1.52      ad        413: /*
1.106.2.7! yamt      414:  * Restart a stopped LWP.
        !           415:  *
        !           416:  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
        !           417:  * LWP before return.
        !           418:  */
        !           419: void
        !           420: lwp_unstop(struct lwp *l)
        !           421: {
        !           422:        struct proc *p = l->l_proc;
        !           423:
        !           424:        KASSERT(mutex_owned(proc_lock));
        !           425:        KASSERT(mutex_owned(p->p_lock));
        !           426:
        !           427:        lwp_lock(l);
        !           428:
        !           429:        /* If not stopped, then just bail out. */
        !           430:        if (l->l_stat != LSSTOP) {
        !           431:                lwp_unlock(l);
        !           432:                return;
        !           433:        }
        !           434:
        !           435:        p->p_stat = SACTIVE;
        !           436:        p->p_sflag &= ~PS_STOPPING;
        !           437:
        !           438:        if (!p->p_waited)
        !           439:                p->p_pptr->p_nstopchild--;
        !           440:
        !           441:        if (l->l_wchan == NULL) {
        !           442:                /* setrunnable() will release the lock. */
        !           443:                setrunnable(l);
        !           444:        } else {
        !           445:                l->l_stat = LSSLEEP;
        !           446:                p->p_nrlwps++;
        !           447:                lwp_unlock(l);
        !           448:        }
        !           449: }
        !           450:
        !           451: /*
1.52      ad        452:  * Wait for an LWP within the current process to exit.  If 'lid' is
                    453:  * non-zero, we are waiting for a specific LWP.
                    454:  *
1.103     ad        455:  * Must be called with p->p_lock held.
1.52      ad        456:  */
1.2       thorpej   457: int
                    458: lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
                    459: {
                    460:        struct proc *p = l->l_proc;
1.52      ad        461:        struct lwp *l2;
                    462:        int nfound, error;
1.63      ad        463:        lwpid_t curlid;
                    464:        bool exiting;
1.2       thorpej   465:
1.103     ad        466:        KASSERT(mutex_owned(p->p_lock));
1.52      ad        467:
                    468:        p->p_nlwpwait++;
1.63      ad        469:        l->l_waitingfor = lid;
                    470:        curlid = l->l_lid;
                    471:        exiting = ((flags & LWPWAIT_EXITCONTROL) != 0);
1.52      ad        472:
                    473:        for (;;) {
                    474:                /*
                    475:                 * Avoid a race between exit1() and sigexit(): if the
                    476:                 * process is dumping core, then we need to bail out: call
                    477:                 * into lwp_userret() where we will be suspended until the
                    478:                 * deed is done.
                    479:                 */
                    480:                if ((p->p_sflag & PS_WCORE) != 0) {
1.103     ad        481:                        mutex_exit(p->p_lock);
1.52      ad        482:                        lwp_userret(l);
                    483: #ifdef DIAGNOSTIC
                    484:                        panic("lwp_wait1");
                    485: #endif
                    486:                        /* NOTREACHED */
                    487:                }
                    488:
                    489:                /*
                    490:                 * First off, drain any detached LWP that is waiting to be
                    491:                 * reaped.
                    492:                 */
                    493:                while ((l2 = p->p_zomblwp) != NULL) {
                    494:                        p->p_zomblwp = NULL;
1.63      ad        495:                        lwp_free(l2, false, false);/* releases proc mutex */
1.103     ad        496:                        mutex_enter(p->p_lock);
1.52      ad        497:                }
                    498:
                    499:                /*
                    500:                 * Now look for an LWP to collect.  If the whole process is
                    501:                 * exiting, count detached LWPs as eligible to be collected,
                    502:                 * but don't drain them here.
                    503:                 */
                    504:                nfound = 0;
1.63      ad        505:                error = 0;
1.52      ad        506:                LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1.63      ad        507:                        /*
                    508:                         * If a specific wait and the target is waiting on
                    509:                         * us, then avoid deadlock.  This also traps LWPs
                    510:                         * that try to wait on themselves.
                    511:                         *
                    512:                         * Note that this does not handle more complicated
                    513:                         * cycles, like: t1 -> t2 -> t3 -> t1.  The process
                    514:                         * can still be killed so it is not a major problem.
                    515:                         */
                    516:                        if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
                    517:                                error = EDEADLK;
                    518:                                break;
                    519:                        }
                    520:                        if (l2 == l)
1.52      ad        521:                                continue;
                    522:                        if ((l2->l_prflag & LPR_DETACHED) != 0) {
1.63      ad        523:                                nfound += exiting;
                    524:                                continue;
                    525:                        }
                    526:                        if (lid != 0) {
                    527:                                if (l2->l_lid != lid)
                    528:                                        continue;
                    529:                                /*
                    530:                                 * Mark this LWP as the first waiter, if there
                    531:                                 * is no other.
                    532:                                 */
                    533:                                if (l2->l_waiter == 0)
                    534:                                        l2->l_waiter = curlid;
                    535:                        } else if (l2->l_waiter != 0) {
                    536:                                /*
                    537:                                 * It already has a waiter - so don't
                    538:                                 * collect it.  If the waiter doesn't
                    539:                                 * grab it we'll get another chance
                    540:                                 * later.
                    541:                                 */
                    542:                                nfound++;
1.52      ad        543:                                continue;
                    544:                        }
                    545:                        nfound++;
1.2       thorpej   546:
1.52      ad        547:                        /* No need to lock the LWP in order to see LSZOMB. */
                    548:                        if (l2->l_stat != LSZOMB)
                    549:                                continue;
1.2       thorpej   550:
1.63      ad        551:                        /*
                    552:                         * We're no longer waiting.  Reset the "first waiter"
                    553:                         * pointer on the target, in case it was us.
                    554:                         */
                    555:                        l->l_waitingfor = 0;
                    556:                        l2->l_waiter = 0;
                    557:                        p->p_nlwpwait--;
1.2       thorpej   558:                        if (departed)
                    559:                                *departed = l2->l_lid;
1.75      ad        560:                        sched_lwp_collect(l2);
1.63      ad        561:
                    562:                        /* lwp_free() releases the proc lock. */
                    563:                        lwp_free(l2, false, false);
1.103     ad        564:                        mutex_enter(p->p_lock);
1.52      ad        565:                        return 0;
                    566:                }
1.2       thorpej   567:
1.63      ad        568:                if (error != 0)
                    569:                        break;
1.52      ad        570:                if (nfound == 0) {
                    571:                        error = ESRCH;
                    572:                        break;
                    573:                }
1.63      ad        574:
                    575:                /*
                    576:                 * The kernel is careful to ensure that it can not deadlock
                    577:                 * when exiting - just keep waiting.
                    578:                 */
                    579:                if (exiting) {
1.52      ad        580:                        KASSERT(p->p_nlwps > 1);
1.103     ad        581:                        cv_wait(&p->p_lwpcv, p->p_lock);
1.52      ad        582:                        continue;
                    583:                }
1.63      ad        584:
                    585:                /*
                    586:                 * If all other LWPs are waiting for exits or suspends
                    587:                 * and the supply of zombies and potential zombies is
                    588:                 * exhausted, then we are about to deadlock.
                    589:                 *
                    590:                 * If the process is exiting (and this LWP is not the one
                    591:                 * that is coordinating the exit) then bail out now.
                    592:                 */
1.52      ad        593:                if ((p->p_sflag & PS_WEXIT) != 0 ||
1.63      ad        594:                    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
1.52      ad        595:                        error = EDEADLK;
                    596:                        break;
1.2       thorpej   597:                }
1.63      ad        598:
                    599:                /*
                    600:                 * Sit around and wait for something to happen.  We'll be
                    601:                 * awoken if any of the conditions examined change: if an
                    602:                 * LWP exits, is collected, or is detached.
                    603:                 */
1.103     ad        604:                if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
1.52      ad        605:                        break;
1.2       thorpej   606:        }
                    607:
1.63      ad        608:        /*
                    609:         * We didn't find any LWPs to collect, we may have received a
                    610:         * signal, or some other condition has caused us to bail out.
                    611:         *
                    612:         * If waiting on a specific LWP, clear the waiters marker: some
                    613:         * other LWP may want it.  Then, kick all the remaining waiters
                    614:         * so that they can re-check for zombies and for deadlock.
                    615:         */
                    616:        if (lid != 0) {
                    617:                LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
                    618:                        if (l2->l_lid == lid) {
                    619:                                if (l2->l_waiter == curlid)
                    620:                                        l2->l_waiter = 0;
                    621:                                break;
                    622:                        }
                    623:                }
                    624:        }
1.52      ad        625:        p->p_nlwpwait--;
1.63      ad        626:        l->l_waitingfor = 0;
                    627:        cv_broadcast(&p->p_lwpcv);
                    628:
1.52      ad        629:        return error;
1.2       thorpej   630: }
                    631:
1.52      ad        632: /*
                    633:  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
                    634:  * The new LWP is created in state LSIDL and must be set running,
                    635:  * suspended, or stopped by the caller.
                    636:  */
1.2       thorpej   637: int
1.106.2.6  yamt      638: lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
1.75      ad        639:           void *stack, size_t stacksize, void (*func)(void *), void *arg,
                    640:           lwp_t **rnewlwpp, int sclass)
1.2       thorpej   641: {
1.52      ad        642:        struct lwp *l2, *isfree;
                    643:        turnstile_t *ts;
1.106.2.7! yamt      644:        lwpid_t lid;
1.2       thorpej   645:
1.106.2.1  yamt      646:        KASSERT(l1 == curlwp || l1->l_proc == &proc0);
                    647:
1.52      ad        648:        /*
                    649:         * First off, reap any detached LWP waiting to be collected.
                    650:         * We can re-use its LWP structure and turnstile.
                    651:         */
                    652:        isfree = NULL;
                    653:        if (p2->p_zomblwp != NULL) {
1.103     ad        654:                mutex_enter(p2->p_lock);
1.52      ad        655:                if ((isfree = p2->p_zomblwp) != NULL) {
                    656:                        p2->p_zomblwp = NULL;
1.63      ad        657:                        lwp_free(isfree, true, false);/* releases proc mutex */
1.52      ad        658:                } else
1.103     ad        659:                        mutex_exit(p2->p_lock);
1.52      ad        660:        }
                    661:        if (isfree == NULL) {
1.87      ad        662:                l2 = pool_cache_get(lwp_cache, PR_WAITOK);
1.52      ad        663:                memset(l2, 0, sizeof(*l2));
1.76      ad        664:                l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
1.60      yamt      665:                SLIST_INIT(&l2->l_pi_lenders);
1.52      ad        666:        } else {
                    667:                l2 = isfree;
                    668:                ts = l2->l_ts;
1.75      ad        669:                KASSERT(l2->l_inheritedprio == -1);
1.60      yamt      670:                KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
1.52      ad        671:                memset(l2, 0, sizeof(*l2));
                    672:                l2->l_ts = ts;
                    673:        }
1.2       thorpej   674:
                    675:        l2->l_stat = LSIDL;
                    676:        l2->l_proc = p2;
1.52      ad        677:        l2->l_refcnt = 1;
1.75      ad        678:        l2->l_class = sclass;
1.106.2.2  yamt      679:
                    680:        /*
                    681:         * If vfork(), we want the LWP to run fast and on the same CPU
                    682:         * as its parent, so that it can reuse the VM context and cache
                    683:         * footprint on the local CPU.
                    684:         */
                    685:        l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
1.82      ad        686:        l2->l_kpribase = PRI_KERNEL;
1.52      ad        687:        l2->l_priority = l1->l_priority;
1.75      ad        688:        l2->l_inheritedprio = -1;
1.106.2.6  yamt      689:        l2->l_flag = 0;
1.88      ad        690:        l2->l_pflag = LP_MPSAFE;
1.106.2.1  yamt      691:        TAILQ_INIT(&l2->l_ld_locks);
1.41      thorpej   692:
1.106.2.3  yamt      693:        /*
                    694:         * If not the first LWP in the process, grab a reference to the
                    695:         * descriptor table.
                    696:         */
                    697:        l2->l_fd = p2->p_fd;
                    698:        if (p2->p_nlwps != 0) {
                    699:                KASSERT(l1->l_proc == p2);
1.106.2.6  yamt      700:                fd_hold(l2);
1.106.2.3  yamt      701:        } else {
                    702:                KASSERT(l1->l_proc != p2);
                    703:        }
                    704:
1.56      pavel     705:        if (p2->p_flag & PK_SYSTEM) {
1.106.2.6  yamt      706:                /* Mark it as a system LWP. */
1.56      pavel     707:                l2->l_flag |= LW_SYSTEM;
1.52      ad        708:        }
1.2       thorpej   709:
1.106.2.1  yamt      710:        kpreempt_disable();
                    711:        l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
                    712:        l2->l_cpu = l1->l_cpu;
                    713:        kpreempt_enable();
                    714:
1.106.2.6  yamt      715:        kdtrace_thread_ctor(NULL, l2);
1.73      rmind     716:        lwp_initspecific(l2);
1.75      ad        717:        sched_lwp_fork(l1, l2);
1.37      ad        718:        lwp_update_creds(l2);
1.70      ad        719:        callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
                    720:        callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
1.52      ad        721:        cv_init(&l2->l_sigcv, "sigwait");
                    722:        l2->l_syncobj = &sched_syncobj;
1.2       thorpej   723:
                    724:        if (rnewlwpp != NULL)
                    725:                *rnewlwpp = l2;
                    726:
1.106.2.6  yamt      727:        uvm_lwp_setuarea(l2, uaddr);
1.2       thorpej   728:        uvm_lwp_fork(l1, l2, stack, stacksize, func,
                    729:            (arg != NULL) ? arg : l2);
                    730:
1.106.2.7! yamt      731:        if ((flags & LWP_PIDLID) != 0) {
        !           732:                lid = proc_alloc_pid(p2);
        !           733:                l2->l_pflag |= LP_PIDLID;
        !           734:        } else {
        !           735:                lid = 0;
        !           736:        }
        !           737:
1.103     ad        738:        mutex_enter(p2->p_lock);
1.52      ad        739:
                    740:        if ((flags & LWP_DETACHED) != 0) {
                    741:                l2->l_prflag = LPR_DETACHED;
                    742:                p2->p_ndlwps++;
                    743:        } else
                    744:                l2->l_prflag = 0;
                    745:
                    746:        l2->l_sigmask = l1->l_sigmask;
                    747:        CIRCLEQ_INIT(&l2->l_sigpend.sp_info);
                    748:        sigemptyset(&l2->l_sigpend.sp_set);
                    749:
1.106.2.7! yamt      750:        if (lid == 0) {
1.53      yamt      751:                p2->p_nlwpid++;
1.106.2.7! yamt      752:                if (p2->p_nlwpid == 0)
        !           753:                        p2->p_nlwpid++;
        !           754:                lid = p2->p_nlwpid;
        !           755:        }
        !           756:        l2->l_lid = lid;
1.2       thorpej   757:        LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
                    758:        p2->p_nlwps++;
1.106.2.7! yamt      759:        p2->p_nrlwps++;
1.2       thorpej   760:
1.91      rmind     761:        if ((p2->p_flag & PK_SYSTEM) == 0) {
1.106.2.2  yamt      762:                /* Inherit an affinity */
                    763:                if (l1->l_flag & LW_AFFINITY) {
                    764:                        /*
                    765:                         * Note that we hold the state lock while inheriting
                    766:                         * the affinity to avoid race with sched_setaffinity().
                    767:                         */
                    768:                        lwp_lock(l1);
                    769:                        if (l1->l_flag & LW_AFFINITY) {
                    770:                                kcpuset_use(l1->l_affinity);
                    771:                                l2->l_affinity = l1->l_affinity;
                    772:                                l2->l_flag |= LW_AFFINITY;
                    773:                        }
                    774:                        lwp_unlock(l1);
                    775:                }
1.91      rmind     776:                lwp_lock(l2);
                    777:                /* Inherit a processor-set */
                    778:                l2->l_psid = l1->l_psid;
                    779:                /* Look for a CPU to start */
                    780:                l2->l_cpu = sched_takecpu(l2);
                    781:                lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
                    782:        }
1.106.2.2  yamt      783:        mutex_exit(p2->p_lock);
                    784:
1.106.2.6  yamt      785:        SDT_PROBE(proc,,,lwp_create, l2, 0,0,0,0);
                    786:
1.106.2.2  yamt      787:        mutex_enter(proc_lock);
                    788:        LIST_INSERT_HEAD(&alllwp, l2, l_list);
                    789:        mutex_exit(proc_lock);
1.91      rmind     790:
1.57      dsl       791:        SYSCALL_TIME_LWP_INIT(l2);
                    792:
1.16      manu      793:        if (p2->p_emul->e_lwp_fork)
                    794:                (*p2->p_emul->e_lwp_fork)(l1, l2);
                    795:
1.2       thorpej   796:        return (0);
                    797: }
                    798:
                    799: /*
1.64      yamt      800:  * Called by MD code when a new LWP begins execution.  Must be called
                    801:  * with the previous LWP locked (so at splsched), or if there is no
                    802:  * previous LWP, at splsched.
                    803:  */
                    804: void
                    805: lwp_startup(struct lwp *prev, struct lwp *new)
                    806: {
                    807:
1.106.2.6  yamt      808:        SDT_PROBE(proc,,,lwp_start, new, 0,0,0,0);
                    809:
1.106.2.1  yamt      810:        KASSERT(kpreempt_disabled());
1.64      yamt      811:        if (prev != NULL) {
1.81      ad        812:                /*
                    813:                 * Normalize the count of the spin-mutexes, it was
                    814:                 * increased in mi_switch().  Unmark the state of
                    815:                 * context switch - it is finished for previous LWP.
                    816:                 */
                    817:                curcpu()->ci_mtx_count++;
                    818:                membar_exit();
                    819:                prev->l_ctxswtch = 0;
1.64      yamt      820:        }
1.106.2.1  yamt      821:        KPREEMPT_DISABLE(new);
1.64      yamt      822:        spl0();
1.106.2.1  yamt      823:        pmap_activate(new);
1.64      yamt      824:        LOCKDEBUG_BARRIER(NULL, 0);
1.106.2.1  yamt      825:        KPREEMPT_ENABLE(new);
1.65      ad        826:        if ((new->l_pflag & LP_MPSAFE) == 0) {
                    827:                KERNEL_LOCK(1, new);
                    828:        }
1.64      yamt      829: }
                    830:
                    831: /*
1.65      ad        832:  * Exit an LWP.
1.2       thorpej   833:  */
                    834: void
                    835: lwp_exit(struct lwp *l)
                    836: {
                    837:        struct proc *p = l->l_proc;
1.52      ad        838:        struct lwp *l2;
1.65      ad        839:        bool current;
                    840:
                    841:        current = (l == curlwp);
1.2       thorpej   842:
1.106.2.2  yamt      843:        KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
1.106.2.3  yamt      844:        KASSERT(p == curproc);
1.2       thorpej   845:
1.106.2.6  yamt      846:        SDT_PROBE(proc,,,lwp_exit, l, 0,0,0,0);
                    847:
1.52      ad        848:        /*
                    849:         * Verify that we hold no locks other than the kernel lock.
                    850:         */
                    851:        LOCKDEBUG_BARRIER(&kernel_lock, 0);
1.16      manu      852:
1.2       thorpej   853:        /*
1.52      ad        854:         * If we are the last live LWP in a process, we need to exit the
                    855:         * entire process.  We do so with an exit status of zero, because
                    856:         * it's a "controlled" exit, and because that's what Solaris does.
                    857:         *
                    858:         * We are not quite a zombie yet, but for accounting purposes we
                    859:         * must increment the count of zombies here.
1.45      thorpej   860:         *
                    861:         * Note: the last LWP's specificdata will be deleted here.
1.2       thorpej   862:         */
1.103     ad        863:        mutex_enter(p->p_lock);
1.52      ad        864:        if (p->p_nlwps - p->p_nzlwps == 1) {
1.65      ad        865:                KASSERT(current == true);
1.88      ad        866:                /* XXXSMP kernel_lock not held */
1.2       thorpej   867:                exit1(l, 0);
1.19      jdolecek  868:                /* NOTREACHED */
1.2       thorpej   869:        }
1.52      ad        870:        p->p_nzlwps++;
1.103     ad        871:        mutex_exit(p->p_lock);
1.52      ad        872:
                    873:        if (p->p_emul->e_lwp_exit)
                    874:                (*p->p_emul->e_lwp_exit)(l);
1.2       thorpej   875:
1.106.2.3  yamt      876:        /* Drop filedesc reference. */
                    877:        fd_free();
                    878:
1.45      thorpej   879:        /* Delete the specificdata while it's still safe to sleep. */
1.106.2.7! yamt      880:        lwp_finispecific(l);
1.45      thorpej   881:
1.52      ad        882:        /*
                    883:         * Release our cached credentials.
                    884:         */
1.37      ad        885:        kauth_cred_free(l->l_cred);
1.70      ad        886:        callout_destroy(&l->l_timeout_ch);
1.65      ad        887:
                    888:        /*
1.52      ad        889:         * Remove the LWP from the global list.
1.106.2.7! yamt      890:         * Free its LID from the PID namespace if needed.
1.52      ad        891:         */
1.102     ad        892:        mutex_enter(proc_lock);
1.52      ad        893:        LIST_REMOVE(l, l_list);
1.106.2.7! yamt      894:        if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) {
        !           895:                proc_free_pid(l->l_lid);
        !           896:        }
1.102     ad        897:        mutex_exit(proc_lock);
1.19      jdolecek  898:
1.52      ad        899:        /*
                    900:         * Get rid of all references to the LWP that others (e.g. procfs)
                    901:         * may have, and mark the LWP as a zombie.  If the LWP is detached,
                    902:         * mark it waiting for collection in the proc structure.  Note that
                    903:         * before we can do that, we need to free any other dead, deatched
                    904:         * LWP waiting to meet its maker.
                    905:         */
1.103     ad        906:        mutex_enter(p->p_lock);
1.52      ad        907:        lwp_drainrefs(l);
1.31      yamt      908:
1.52      ad        909:        if ((l->l_prflag & LPR_DETACHED) != 0) {
                    910:                while ((l2 = p->p_zomblwp) != NULL) {
                    911:                        p->p_zomblwp = NULL;
1.63      ad        912:                        lwp_free(l2, false, false);/* releases proc mutex */
1.103     ad        913:                        mutex_enter(p->p_lock);
1.72      ad        914:                        l->l_refcnt++;
                    915:                        lwp_drainrefs(l);
1.52      ad        916:                }
                    917:                p->p_zomblwp = l;
                    918:        }
1.31      yamt      919:
1.52      ad        920:        /*
                    921:         * If we find a pending signal for the process and we have been
1.106.2.7! yamt      922:         * asked to check for signals, then we lose: arrange to have
1.52      ad        923:         * all other LWPs in the process check for signals.
                    924:         */
1.56      pavel     925:        if ((l->l_flag & LW_PENDSIG) != 0 &&
1.52      ad        926:            firstsig(&p->p_sigpend.sp_set) != 0) {
                    927:                LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
                    928:                        lwp_lock(l2);
1.56      pavel     929:                        l2->l_flag |= LW_PENDSIG;
1.52      ad        930:                        lwp_unlock(l2);
                    931:                }
1.31      yamt      932:        }
                    933:
1.52      ad        934:        lwp_lock(l);
                    935:        l->l_stat = LSZOMB;
1.90      ad        936:        if (l->l_name != NULL)
                    937:                strcpy(l->l_name, "(zombie)");
1.106.2.2  yamt      938:        if (l->l_flag & LW_AFFINITY) {
                    939:                l->l_flag &= ~LW_AFFINITY;
                    940:        } else {
                    941:                KASSERT(l->l_affinity == NULL);
                    942:        }
1.52      ad        943:        lwp_unlock(l);
1.2       thorpej   944:        p->p_nrlwps--;
1.52      ad        945:        cv_broadcast(&p->p_lwpcv);
1.78      ad        946:        if (l->l_lwpctl != NULL)
                    947:                l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
1.103     ad        948:        mutex_exit(p->p_lock);
1.52      ad        949:
1.106.2.2  yamt      950:        /* Safe without lock since LWP is in zombie state */
                    951:        if (l->l_affinity) {
                    952:                kcpuset_unuse(l->l_affinity, NULL);
                    953:                l->l_affinity = NULL;
                    954:        }
                    955:
1.52      ad        956:        /*
                    957:         * We can no longer block.  At this point, lwp_free() may already
                    958:         * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
                    959:         *
                    960:         * Free MD LWP resources.
                    961:         */
                    962:        cpu_lwp_free(l, 0);
1.2       thorpej   963:
1.65      ad        964:        if (current) {
                    965:                pmap_deactivate(l);
                    966:
                    967:                /*
                    968:                 * Release the kernel lock, and switch away into
                    969:                 * oblivion.
                    970:                 */
1.52      ad        971: #ifdef notyet
1.65      ad        972:                /* XXXSMP hold in lwp_userret() */
                    973:                KERNEL_UNLOCK_LAST(l);
1.52      ad        974: #else
1.65      ad        975:                KERNEL_UNLOCK_ALL(l, NULL);
1.52      ad        976: #endif
1.65      ad        977:                lwp_exit_switchaway(l);
                    978:        }
1.2       thorpej   979: }
                    980:
1.52      ad        981: /*
                    982:  * Free a dead LWP's remaining resources.
                    983:  *
                    984:  * XXXLWP limits.
                    985:  */
                    986: void
1.63      ad        987: lwp_free(struct lwp *l, bool recycle, bool last)
1.52      ad        988: {
                    989:        struct proc *p = l->l_proc;
1.100     ad        990:        struct rusage *ru;
1.52      ad        991:        ksiginfoq_t kq;
                    992:
1.92      yamt      993:        KASSERT(l != curlwp);
                    994:
1.52      ad        995:        /*
                    996:         * If this was not the last LWP in the process, then adjust
                    997:         * counters and unlock.
                    998:         */
                    999:        if (!last) {
                   1000:                /*
                   1001:                 * Add the LWP's run time to the process' base value.
                   1002:                 * This needs to co-incide with coming off p_lwps.
                   1003:                 */
1.86      yamt     1004:                bintime_add(&p->p_rtime, &l->l_rtime);
1.64      yamt     1005:                p->p_pctcpu += l->l_pctcpu;
1.100     ad       1006:                ru = &p->p_stats->p_ru;
                   1007:                ruadd(ru, &l->l_ru);
                   1008:                ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
                   1009:                ru->ru_nivcsw += l->l_nivcsw;
1.52      ad       1010:                LIST_REMOVE(l, l_sibling);
                   1011:                p->p_nlwps--;
                   1012:                p->p_nzlwps--;
                   1013:                if ((l->l_prflag & LPR_DETACHED) != 0)
                   1014:                        p->p_ndlwps--;
1.63      ad       1015:
                   1016:                /*
                   1017:                 * Have any LWPs sleeping in lwp_wait() recheck for
                   1018:                 * deadlock.
                   1019:                 */
                   1020:                cv_broadcast(&p->p_lwpcv);
1.103     ad       1021:                mutex_exit(p->p_lock);
1.63      ad       1022:        }
1.52      ad       1023:
                   1024: #ifdef MULTIPROCESSOR
1.63      ad       1025:        /*
                   1026:         * In the unlikely event that the LWP is still on the CPU,
                   1027:         * then spin until it has switched away.  We need to release
                   1028:         * all locks to avoid deadlock against interrupt handlers on
                   1029:         * the target CPU.
                   1030:         */
1.106.2.2  yamt     1031:        if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
1.63      ad       1032:                int count;
1.64      yamt     1033:                (void)count; /* XXXgcc */
1.63      ad       1034:                KERNEL_UNLOCK_ALL(curlwp, &count);
1.106.2.2  yamt     1035:                while ((l->l_pflag & LP_RUNNING) != 0 ||
1.64      yamt     1036:                    l->l_cpu->ci_curlwp == l)
1.63      ad       1037:                        SPINLOCK_BACKOFF_HOOK;
                   1038:                KERNEL_LOCK(count, curlwp);
                   1039:        }
1.52      ad       1040: #endif
                   1041:
                   1042:        /*
                   1043:         * Destroy the LWP's remaining signal information.
                   1044:         */
                   1045:        ksiginfo_queue_init(&kq);
                   1046:        sigclear(&l->l_sigpend, NULL, &kq);
                   1047:        ksiginfo_queue_drain(&kq);
                   1048:        cv_destroy(&l->l_sigcv);
1.2       thorpej  1049:
1.19      jdolecek 1050:        /*
1.52      ad       1051:         * Free the LWP's turnstile and the LWP structure itself unless the
1.93      yamt     1052:         * caller wants to recycle them.  Also, free the scheduler specific
                   1053:         * data.
1.52      ad       1054:         *
                   1055:         * We can't return turnstile0 to the pool (it didn't come from it),
                   1056:         * so if it comes up just drop it quietly and move on.
                   1057:         *
                   1058:         * We don't recycle the VM resources at this time.
1.19      jdolecek 1059:         */
1.78      ad       1060:        if (l->l_lwpctl != NULL)
                   1061:                lwp_ctl_free(l);
1.64      yamt     1062:
1.52      ad       1063:        if (!recycle && l->l_ts != &turnstile0)
1.76      ad       1064:                pool_cache_put(turnstile_cache, l->l_ts);
1.90      ad       1065:        if (l->l_name != NULL)
                   1066:                kmem_free(l->l_name, MAXCOMLEN);
1.106.2.6  yamt     1067:
1.52      ad       1068:        cpu_lwp_free2(l);
1.19      jdolecek 1069:        uvm_lwp_exit(l);
1.106.2.6  yamt     1070:
1.60      yamt     1071:        KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
1.75      ad       1072:        KASSERT(l->l_inheritedprio == -1);
1.106.2.6  yamt     1073:        kdtrace_thread_dtor(NULL, l);
1.52      ad       1074:        if (!recycle)
1.87      ad       1075:                pool_cache_put(lwp_cache, l);
1.2       thorpej  1076: }
                   1077:
                   1078: /*
1.91      rmind    1079:  * Migrate the LWP to the another CPU.  Unlocks the LWP.
                   1080:  */
                   1081: void
1.106.2.2  yamt     1082: lwp_migrate(lwp_t *l, struct cpu_info *tci)
1.91      rmind    1083: {
1.106.2.2  yamt     1084:        struct schedstate_percpu *tspc;
                   1085:        int lstat = l->l_stat;
                   1086:
1.91      rmind    1087:        KASSERT(lwp_locked(l, NULL));
1.106.2.2  yamt     1088:        KASSERT(tci != NULL);
                   1089:
                   1090:        /* If LWP is still on the CPU, it must be handled like LSONPROC */
                   1091:        if ((l->l_pflag & LP_RUNNING) != 0) {
                   1092:                lstat = LSONPROC;
                   1093:        }
1.91      rmind    1094:
1.106.2.2  yamt     1095:        /*
                   1096:         * The destination CPU could be changed while previous migration
                   1097:         * was not finished.
                   1098:         */
                   1099:        if (l->l_target_cpu != NULL) {
                   1100:                l->l_target_cpu = tci;
1.91      rmind    1101:                lwp_unlock(l);
                   1102:                return;
                   1103:        }
                   1104:
1.106.2.2  yamt     1105:        /* Nothing to do if trying to migrate to the same CPU */
                   1106:        if (l->l_cpu == tci) {
                   1107:                lwp_unlock(l);
                   1108:                return;
                   1109:        }
                   1110:
                   1111:        KASSERT(l->l_target_cpu == NULL);
                   1112:        tspc = &tci->ci_schedstate;
                   1113:        switch (lstat) {
1.91      rmind    1114:        case LSRUN:
1.106.2.6  yamt     1115:                l->l_target_cpu = tci;
                   1116:                break;
1.91      rmind    1117:        case LSIDL:
1.106.2.2  yamt     1118:                l->l_cpu = tci;
                   1119:                lwp_unlock_to(l, tspc->spc_mutex);
1.91      rmind    1120:                return;
                   1121:        case LSSLEEP:
1.106.2.2  yamt     1122:                l->l_cpu = tci;
1.91      rmind    1123:                break;
                   1124:        case LSSTOP:
                   1125:        case LSSUSPENDED:
1.106.2.2  yamt     1126:                l->l_cpu = tci;
                   1127:                if (l->l_wchan == NULL) {
                   1128:                        lwp_unlock_to(l, tspc->spc_lwplock);
                   1129:                        return;
1.91      rmind    1130:                }
1.106.2.2  yamt     1131:                break;
1.91      rmind    1132:        case LSONPROC:
1.106.2.2  yamt     1133:                l->l_target_cpu = tci;
                   1134:                spc_lock(l->l_cpu);
                   1135:                cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
                   1136:                spc_unlock(l->l_cpu);
1.91      rmind    1137:                break;
                   1138:        }
                   1139:        lwp_unlock(l);
                   1140: }
                   1141:
                   1142: /*
1.94      rmind    1143:  * Find the LWP in the process.  Arguments may be zero, in such case,
                   1144:  * the calling process and first LWP in the list will be used.
1.103     ad       1145:  * On success - returns proc locked.
1.91      rmind    1146:  */
                   1147: struct lwp *
                   1148: lwp_find2(pid_t pid, lwpid_t lid)
                   1149: {
                   1150:        proc_t *p;
                   1151:        lwp_t *l;
                   1152:
1.106.2.7! yamt     1153:        /* Find the process. */
1.94      rmind    1154:        if (pid != 0) {
1.106.2.7! yamt     1155:                mutex_enter(proc_lock);
        !          1156:                p = proc_find(pid);
        !          1157:                if (p == NULL) {
        !          1158:                        mutex_exit(proc_lock);
        !          1159:                        return NULL;
        !          1160:                }
        !          1161:                mutex_enter(p->p_lock);
1.102     ad       1162:                mutex_exit(proc_lock);
1.106.2.7! yamt     1163:        } else {
        !          1164:                p = curlwp->l_proc;
        !          1165:                mutex_enter(p->p_lock);
        !          1166:        }
        !          1167:        /* Find the thread. */
        !          1168:        if (lid != 0) {
        !          1169:                l = lwp_find(p, lid);
        !          1170:        } else {
        !          1171:                l = LIST_FIRST(&p->p_lwps);
1.94      rmind    1172:        }
1.103     ad       1173:        if (l == NULL) {
                   1174:                mutex_exit(p->p_lock);
                   1175:        }
1.91      rmind    1176:        return l;
                   1177: }
                   1178:
                   1179: /*
1.106.2.7! yamt     1180:  * Look up a live LWP within the specified process, and return it locked.
1.52      ad       1181:  *
1.103     ad       1182:  * Must be called with p->p_lock held.
1.52      ad       1183:  */
                   1184: struct lwp *
1.106.2.7! yamt     1185: lwp_find(struct proc *p, lwpid_t id)
1.52      ad       1186: {
                   1187:        struct lwp *l;
                   1188:
1.103     ad       1189:        KASSERT(mutex_owned(p->p_lock));
1.52      ad       1190:
                   1191:        LIST_FOREACH(l, &p->p_lwps, l_sibling) {
                   1192:                if (l->l_lid == id)
                   1193:                        break;
                   1194:        }
                   1195:
                   1196:        /*
                   1197:         * No need to lock - all of these conditions will
                   1198:         * be visible with the process level mutex held.
                   1199:         */
                   1200:        if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
                   1201:                l = NULL;
                   1202:
                   1203:        return l;
                   1204: }
                   1205:
                   1206: /*
1.37      ad       1207:  * Update an LWP's cached credentials to mirror the process' master copy.
                   1208:  *
                   1209:  * This happens early in the syscall path, on user trap, and on LWP
                   1210:  * creation.  A long-running LWP can also voluntarily choose to update
                   1211:  * it's credentials by calling this routine.  This may be called from
                   1212:  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
                   1213:  */
                   1214: void
                   1215: lwp_update_creds(struct lwp *l)
                   1216: {
                   1217:        kauth_cred_t oc;
                   1218:        struct proc *p;
                   1219:
                   1220:        p = l->l_proc;
                   1221:        oc = l->l_cred;
                   1222:
1.103     ad       1223:        mutex_enter(p->p_lock);
1.37      ad       1224:        kauth_cred_hold(p->p_cred);
                   1225:        l->l_cred = p->p_cred;
1.98      ad       1226:        l->l_prflag &= ~LPR_CRMOD;
1.103     ad       1227:        mutex_exit(p->p_lock);
1.88      ad       1228:        if (oc != NULL)
1.37      ad       1229:                kauth_cred_free(oc);
1.52      ad       1230: }
                   1231:
                   1232: /*
                   1233:  * Verify that an LWP is locked, and optionally verify that the lock matches
                   1234:  * one we specify.
                   1235:  */
                   1236: int
                   1237: lwp_locked(struct lwp *l, kmutex_t *mtx)
                   1238: {
                   1239:        kmutex_t *cur = l->l_mutex;
                   1240:
                   1241:        return mutex_owned(cur) && (mtx == cur || mtx == NULL);
                   1242: }
                   1243:
                   1244: /*
                   1245:  * Lock an LWP.
                   1246:  */
1.106.2.2  yamt     1247: kmutex_t *
1.52      ad       1248: lwp_lock_retry(struct lwp *l, kmutex_t *old)
                   1249: {
                   1250:
                   1251:        /*
                   1252:         * XXXgcc ignoring kmutex_t * volatile on i386
                   1253:         *
                   1254:         * gcc version 4.1.2 20061021 prerelease (NetBSD nb1 20061021)
                   1255:         */
                   1256: #if 1
                   1257:        while (l->l_mutex != old) {
                   1258: #else
                   1259:        for (;;) {
                   1260: #endif
                   1261:                mutex_spin_exit(old);
                   1262:                old = l->l_mutex;
                   1263:                mutex_spin_enter(old);
                   1264:
                   1265:                /*
                   1266:                 * mutex_enter() will have posted a read barrier.  Re-test
                   1267:                 * l->l_mutex.  If it has changed, we need to try again.
                   1268:                 */
                   1269: #if 1
                   1270:        }
                   1271: #else
                   1272:        } while (__predict_false(l->l_mutex != old));
                   1273: #endif
1.106.2.2  yamt     1274:
                   1275:        return old;
1.52      ad       1276: }
                   1277:
                   1278: /*
                   1279:  * Lend a new mutex to an LWP.  The old mutex must be held.
                   1280:  */
                   1281: void
                   1282: lwp_setlock(struct lwp *l, kmutex_t *new)
                   1283: {
                   1284:
1.63      ad       1285:        KASSERT(mutex_owned(l->l_mutex));
1.52      ad       1286:
1.106.2.1  yamt     1287:        membar_exit();
1.52      ad       1288:        l->l_mutex = new;
                   1289: }
                   1290:
                   1291: /*
                   1292:  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
                   1293:  * must be held.
                   1294:  */
                   1295: void
                   1296: lwp_unlock_to(struct lwp *l, kmutex_t *new)
                   1297: {
                   1298:        kmutex_t *old;
                   1299:
1.63      ad       1300:        KASSERT(mutex_owned(l->l_mutex));
1.52      ad       1301:
                   1302:        old = l->l_mutex;
1.106.2.1  yamt     1303:        membar_exit();
1.52      ad       1304:        l->l_mutex = new;
                   1305:        mutex_spin_exit(old);
                   1306: }
                   1307:
                   1308: /*
                   1309:  * Acquire a new mutex, and donate it to an LWP.  The LWP must already be
                   1310:  * locked.
                   1311:  */
                   1312: void
                   1313: lwp_relock(struct lwp *l, kmutex_t *new)
                   1314: {
                   1315:        kmutex_t *old;
                   1316:
1.63      ad       1317:        KASSERT(mutex_owned(l->l_mutex));
1.52      ad       1318:
                   1319:        old = l->l_mutex;
                   1320:        if (old != new) {
                   1321:                mutex_spin_enter(new);
                   1322:                l->l_mutex = new;
                   1323:                mutex_spin_exit(old);
                   1324:        }
                   1325: }
                   1326:
1.60      yamt     1327: int
                   1328: lwp_trylock(struct lwp *l)
                   1329: {
                   1330:        kmutex_t *old;
                   1331:
                   1332:        for (;;) {
                   1333:                if (!mutex_tryenter(old = l->l_mutex))
                   1334:                        return 0;
                   1335:                if (__predict_true(l->l_mutex == old))
                   1336:                        return 1;
                   1337:                mutex_spin_exit(old);
                   1338:        }
                   1339: }
                   1340:
1.106.2.6  yamt     1341: void
1.96      ad       1342: lwp_unsleep(lwp_t *l, bool cleanup)
                   1343: {
                   1344:
                   1345:        KASSERT(mutex_owned(l->l_mutex));
1.106.2.6  yamt     1346:        (*l->l_syncobj->sobj_unsleep)(l, cleanup);
1.96      ad       1347: }
                   1348:
                   1349:
1.52      ad       1350: /*
1.56      pavel    1351:  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
1.52      ad       1352:  * set.
                   1353:  */
                   1354: void
                   1355: lwp_userret(struct lwp *l)
                   1356: {
                   1357:        struct proc *p;
                   1358:        int sig;
                   1359:
1.106.2.2  yamt     1360:        KASSERT(l == curlwp);
                   1361:        KASSERT(l->l_stat == LSONPROC);
1.52      ad       1362:        p = l->l_proc;
                   1363:
1.75      ad       1364: #ifndef __HAVE_FAST_SOFTINTS
                   1365:        /* Run pending soft interrupts. */
                   1366:        if (l->l_cpu->ci_data.cpu_softints != 0)
                   1367:                softint_overlay();
                   1368: #endif
                   1369:
1.106.2.2  yamt     1370: #ifdef KERN_SA
                   1371:        /* Generate UNBLOCKED upcall if needed */
                   1372:        if (l->l_flag & LW_SA_BLOCKING) {
                   1373:                sa_unblock_userret(l);
                   1374:                /* NOTREACHED */
                   1375:        }
                   1376: #endif
                   1377:
1.52      ad       1378:        /*
                   1379:         * It should be safe to do this read unlocked on a multiprocessor
                   1380:         * system..
1.106.2.2  yamt     1381:         *
                   1382:         * LW_SA_UPCALL will be handled after the while() loop, so don't
                   1383:         * consider it now.
1.52      ad       1384:         */
1.106.2.2  yamt     1385:        while ((l->l_flag & (LW_USERRET & ~(LW_SA_UPCALL))) != 0) {
1.52      ad       1386:                /*
                   1387:                 * Process pending signals first, unless the process
1.61      ad       1388:                 * is dumping core or exiting, where we will instead
1.101     rmind    1389:                 * enter the LW_WSUSPEND case below.
1.52      ad       1390:                 */
1.61      ad       1391:                if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
                   1392:                    LW_PENDSIG) {
1.103     ad       1393:                        mutex_enter(p->p_lock);
1.52      ad       1394:                        while ((sig = issignal(l)) != 0)
                   1395:                                postsig(sig);
1.103     ad       1396:                        mutex_exit(p->p_lock);
1.52      ad       1397:                }
                   1398:
                   1399:                /*
                   1400:                 * Core-dump or suspend pending.
                   1401:                 *
                   1402:                 * In case of core dump, suspend ourselves, so that the
                   1403:                 * kernel stack and therefore the userland registers saved
                   1404:                 * in the trapframe are around for coredump() to write them
                   1405:                 * out.  We issue a wakeup on p->p_lwpcv so that sigexit()
                   1406:                 * will write the core file out once all other LWPs are
                   1407:                 * suspended.
                   1408:                 */
1.56      pavel    1409:                if ((l->l_flag & LW_WSUSPEND) != 0) {
1.103     ad       1410:                        mutex_enter(p->p_lock);
1.52      ad       1411:                        p->p_nrlwps--;
                   1412:                        cv_broadcast(&p->p_lwpcv);
                   1413:                        lwp_lock(l);
                   1414:                        l->l_stat = LSSUSPENDED;
1.104     ad       1415:                        lwp_unlock(l);
1.103     ad       1416:                        mutex_exit(p->p_lock);
1.104     ad       1417:                        lwp_lock(l);
1.64      yamt     1418:                        mi_switch(l);
1.52      ad       1419:                }
                   1420:
                   1421:                /* Process is exiting. */
1.56      pavel    1422:                if ((l->l_flag & LW_WEXIT) != 0) {
1.52      ad       1423:                        lwp_exit(l);
                   1424:                        KASSERT(0);
                   1425:                        /* NOTREACHED */
                   1426:                }
                   1427:        }
1.106.2.2  yamt     1428:
                   1429: #ifdef KERN_SA
                   1430:        /*
                   1431:         * Timer events are handled specially.  We only try once to deliver
                   1432:         * pending timer upcalls; if if fails, we can try again on the next
                   1433:         * loop around.  If we need to re-enter lwp_userret(), MD code will
                   1434:         * bounce us back here through the trap path after we return.
                   1435:         */
                   1436:        if (p->p_timerpend)
                   1437:                timerupcall(l);
                   1438:        if (l->l_flag & LW_SA_UPCALL)
                   1439:                sa_upcall_userret(l);
                   1440: #endif /* KERN_SA */
1.52      ad       1441: }
                   1442:
                   1443: /*
                   1444:  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
                   1445:  */
                   1446: void
                   1447: lwp_need_userret(struct lwp *l)
                   1448: {
1.63      ad       1449:        KASSERT(lwp_locked(l, NULL));
1.52      ad       1450:
                   1451:        /*
                   1452:         * Since the tests in lwp_userret() are done unlocked, make sure
                   1453:         * that the condition will be seen before forcing the LWP to enter
                   1454:         * kernel mode.
                   1455:         */
1.81      ad       1456:        membar_producer();
1.52      ad       1457:        cpu_signotify(l);
                   1458: }
                   1459:
                   1460: /*
                   1461:  * Add one reference to an LWP.  This will prevent the LWP from
                   1462:  * exiting, thus keep the lwp structure and PCB around to inspect.
                   1463:  */
                   1464: void
                   1465: lwp_addref(struct lwp *l)
                   1466: {
                   1467:
1.103     ad       1468:        KASSERT(mutex_owned(l->l_proc->p_lock));
1.52      ad       1469:        KASSERT(l->l_stat != LSZOMB);
                   1470:        KASSERT(l->l_refcnt != 0);
                   1471:
                   1472:        l->l_refcnt++;
                   1473: }
                   1474:
                   1475: /*
                   1476:  * Remove one reference to an LWP.  If this is the last reference,
                   1477:  * then we must finalize the LWP's death.
                   1478:  */
                   1479: void
                   1480: lwp_delref(struct lwp *l)
                   1481: {
                   1482:        struct proc *p = l->l_proc;
                   1483:
1.103     ad       1484:        mutex_enter(p->p_lock);
1.106.2.7! yamt     1485:        lwp_delref2(l);
        !          1486:        mutex_exit(p->p_lock);
        !          1487: }
        !          1488:
        !          1489: /*
        !          1490:  * Remove one reference to an LWP.  If this is the last reference,
        !          1491:  * then we must finalize the LWP's death.  The proc mutex is held
        !          1492:  * on entry.
        !          1493:  */
        !          1494: void
        !          1495: lwp_delref2(struct lwp *l)
        !          1496: {
        !          1497:        struct proc *p = l->l_proc;
        !          1498:
        !          1499:        KASSERT(mutex_owned(p->p_lock));
1.72      ad       1500:        KASSERT(l->l_stat != LSZOMB);
                   1501:        KASSERT(l->l_refcnt > 0);
1.52      ad       1502:        if (--l->l_refcnt == 0)
1.76      ad       1503:                cv_broadcast(&p->p_lwpcv);
1.52      ad       1504: }
                   1505:
                   1506: /*
                   1507:  * Drain all references to the current LWP.
                   1508:  */
                   1509: void
                   1510: lwp_drainrefs(struct lwp *l)
                   1511: {
                   1512:        struct proc *p = l->l_proc;
                   1513:
1.103     ad       1514:        KASSERT(mutex_owned(p->p_lock));
1.52      ad       1515:        KASSERT(l->l_refcnt != 0);
                   1516:
                   1517:        l->l_refcnt--;
                   1518:        while (l->l_refcnt != 0)
1.103     ad       1519:                cv_wait(&p->p_lwpcv, p->p_lock);
1.37      ad       1520: }
1.41      thorpej  1521:
                   1522: /*
1.106.2.2  yamt     1523:  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
                   1524:  * be held.
                   1525:  */
                   1526: bool
                   1527: lwp_alive(lwp_t *l)
                   1528: {
                   1529:
                   1530:        KASSERT(mutex_owned(l->l_proc->p_lock));
                   1531:
                   1532:        switch (l->l_stat) {
                   1533:        case LSSLEEP:
                   1534:        case LSRUN:
                   1535:        case LSONPROC:
                   1536:        case LSSTOP:
                   1537:        case LSSUSPENDED:
                   1538:                return true;
                   1539:        default:
                   1540:                return false;
                   1541:        }
                   1542: }
                   1543:
                   1544: /*
                   1545:  * Return first live LWP in the process.
                   1546:  */
                   1547: lwp_t *
                   1548: lwp_find_first(proc_t *p)
                   1549: {
                   1550:        lwp_t *l;
                   1551:
                   1552:        KASSERT(mutex_owned(p->p_lock));
                   1553:
                   1554:        LIST_FOREACH(l, &p->p_lwps, l_sibling) {
                   1555:                if (lwp_alive(l)) {
                   1556:                        return l;
                   1557:                }
                   1558:        }
                   1559:
                   1560:        return NULL;
                   1561: }
                   1562:
                   1563: /*
1.78      ad       1564:  * Allocate a new lwpctl structure for a user LWP.
                   1565:  */
                   1566: int
                   1567: lwp_ctl_alloc(vaddr_t *uaddr)
                   1568: {
                   1569:        lcproc_t *lp;
                   1570:        u_int bit, i, offset;
                   1571:        struct uvm_object *uao;
                   1572:        int error;
                   1573:        lcpage_t *lcp;
                   1574:        proc_t *p;
                   1575:        lwp_t *l;
                   1576:
                   1577:        l = curlwp;
                   1578:        p = l->l_proc;
                   1579:
1.81      ad       1580:        if (l->l_lcpage != NULL) {
                   1581:                lcp = l->l_lcpage;
                   1582:                *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1.106.2.7! yamt     1583:                return 0;
1.81      ad       1584:        }
1.78      ad       1585:
                   1586:        /* First time around, allocate header structure for the process. */
                   1587:        if ((lp = p->p_lwpctl) == NULL) {
                   1588:                lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
                   1589:                mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
                   1590:                lp->lp_uao = NULL;
                   1591:                TAILQ_INIT(&lp->lp_pages);
1.103     ad       1592:                mutex_enter(p->p_lock);
1.78      ad       1593:                if (p->p_lwpctl == NULL) {
                   1594:                        p->p_lwpctl = lp;
1.103     ad       1595:                        mutex_exit(p->p_lock);
1.78      ad       1596:                } else {
1.103     ad       1597:                        mutex_exit(p->p_lock);
1.78      ad       1598:                        mutex_destroy(&lp->lp_lock);
                   1599:                        kmem_free(lp, sizeof(*lp));
                   1600:                        lp = p->p_lwpctl;
                   1601:                }
                   1602:        }
                   1603:
                   1604:        /*
                   1605:         * Set up an anonymous memory region to hold the shared pages.
                   1606:         * Map them into the process' address space.  The user vmspace
                   1607:         * gets the first reference on the UAO.
                   1608:         */
                   1609:        mutex_enter(&lp->lp_lock);
                   1610:        if (lp->lp_uao == NULL) {
                   1611:                lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
                   1612:                lp->lp_cur = 0;
                   1613:                lp->lp_max = LWPCTL_UAREA_SZ;
                   1614:                lp->lp_uva = p->p_emul->e_vm_default_addr(p,
                   1615:                     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
                   1616:                error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
                   1617:                    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
                   1618:                    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
                   1619:                if (error != 0) {
                   1620:                        uao_detach(lp->lp_uao);
                   1621:                        lp->lp_uao = NULL;
                   1622:                        mutex_exit(&lp->lp_lock);
                   1623:                        return error;
                   1624:                }
                   1625:        }
                   1626:
                   1627:        /* Get a free block and allocate for this LWP. */
                   1628:        TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
                   1629:                if (lcp->lcp_nfree != 0)
                   1630:                        break;
                   1631:        }
                   1632:        if (lcp == NULL) {
                   1633:                /* Nothing available - try to set up a free page. */
                   1634:                if (lp->lp_cur == lp->lp_max) {
                   1635:                        mutex_exit(&lp->lp_lock);
                   1636:                        return ENOMEM;
                   1637:                }
                   1638:                lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1.79      yamt     1639:                if (lcp == NULL) {
                   1640:                        mutex_exit(&lp->lp_lock);
1.78      ad       1641:                        return ENOMEM;
1.79      yamt     1642:                }
1.78      ad       1643:                /*
                   1644:                 * Wire the next page down in kernel space.  Since this
                   1645:                 * is a new mapping, we must add a reference.
                   1646:                 */
                   1647:                uao = lp->lp_uao;
                   1648:                (*uao->pgops->pgo_reference)(uao);
1.99      ad       1649:                lcp->lcp_kaddr = vm_map_min(kernel_map);
1.78      ad       1650:                error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
                   1651:                    uao, lp->lp_cur, PAGE_SIZE,
                   1652:                    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
                   1653:                    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
                   1654:                if (error != 0) {
                   1655:                        mutex_exit(&lp->lp_lock);
                   1656:                        kmem_free(lcp, LWPCTL_LCPAGE_SZ);
                   1657:                        (*uao->pgops->pgo_detach)(uao);
                   1658:                        return error;
                   1659:                }
1.89      yamt     1660:                error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
                   1661:                    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
                   1662:                if (error != 0) {
                   1663:                        mutex_exit(&lp->lp_lock);
                   1664:                        uvm_unmap(kernel_map, lcp->lcp_kaddr,
                   1665:                            lcp->lcp_kaddr + PAGE_SIZE);
                   1666:                        kmem_free(lcp, LWPCTL_LCPAGE_SZ);
                   1667:                        return error;
                   1668:                }
1.78      ad       1669:                /* Prepare the page descriptor and link into the list. */
                   1670:                lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
                   1671:                lp->lp_cur += PAGE_SIZE;
                   1672:                lcp->lcp_nfree = LWPCTL_PER_PAGE;
                   1673:                lcp->lcp_rotor = 0;
                   1674:                memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
                   1675:                TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
                   1676:        }
                   1677:        for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
                   1678:                if (++i >= LWPCTL_BITMAP_ENTRIES)
                   1679:                        i = 0;
                   1680:        }
                   1681:        bit = ffs(lcp->lcp_bitmap[i]) - 1;
                   1682:        lcp->lcp_bitmap[i] ^= (1 << bit);
                   1683:        lcp->lcp_rotor = i;
                   1684:        lcp->lcp_nfree--;
                   1685:        l->l_lcpage = lcp;
                   1686:        offset = (i << 5) + bit;
                   1687:        l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
                   1688:        *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
                   1689:        mutex_exit(&lp->lp_lock);
                   1690:
1.106.2.1  yamt     1691:        KPREEMPT_DISABLE(l);
1.106.2.2  yamt     1692:        l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
1.106.2.1  yamt     1693:        KPREEMPT_ENABLE(l);
1.78      ad       1694:
                   1695:        return 0;
                   1696: }
                   1697:
                   1698: /*
                   1699:  * Free an lwpctl structure back to the per-process list.
                   1700:  */
                   1701: void
                   1702: lwp_ctl_free(lwp_t *l)
                   1703: {
                   1704:        lcproc_t *lp;
                   1705:        lcpage_t *lcp;
                   1706:        u_int map, offset;
                   1707:
                   1708:        lp = l->l_proc->p_lwpctl;
                   1709:        KASSERT(lp != NULL);
                   1710:
                   1711:        lcp = l->l_lcpage;
                   1712:        offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
                   1713:        KASSERT(offset < LWPCTL_PER_PAGE);
                   1714:
                   1715:        mutex_enter(&lp->lp_lock);
                   1716:        lcp->lcp_nfree++;
                   1717:        map = offset >> 5;
                   1718:        lcp->lcp_bitmap[map] |= (1 << (offset & 31));
                   1719:        if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
                   1720:                lcp->lcp_rotor = map;
                   1721:        if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
                   1722:                TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
                   1723:                TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
                   1724:        }
                   1725:        mutex_exit(&lp->lp_lock);
                   1726: }
                   1727:
                   1728: /*
                   1729:  * Process is exiting; tear down lwpctl state.  This can only be safely
                   1730:  * called by the last LWP in the process.
                   1731:  */
                   1732: void
                   1733: lwp_ctl_exit(void)
                   1734: {
                   1735:        lcpage_t *lcp, *next;
                   1736:        lcproc_t *lp;
                   1737:        proc_t *p;
                   1738:        lwp_t *l;
                   1739:
                   1740:        l = curlwp;
                   1741:        l->l_lwpctl = NULL;
1.95      ad       1742:        l->l_lcpage = NULL;
1.78      ad       1743:        p = l->l_proc;
                   1744:        lp = p->p_lwpctl;
                   1745:
                   1746:        KASSERT(lp != NULL);
                   1747:        KASSERT(p->p_nlwps == 1);
                   1748:
                   1749:        for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
                   1750:                next = TAILQ_NEXT(lcp, lcp_chain);
                   1751:                uvm_unmap(kernel_map, lcp->lcp_kaddr,
                   1752:                    lcp->lcp_kaddr + PAGE_SIZE);
                   1753:                kmem_free(lcp, LWPCTL_LCPAGE_SZ);
                   1754:        }
                   1755:
                   1756:        if (lp->lp_uao != NULL) {
                   1757:                uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
                   1758:                    lp->lp_uva + LWPCTL_UAREA_SZ);
                   1759:        }
                   1760:
                   1761:        mutex_destroy(&lp->lp_lock);
                   1762:        kmem_free(lp, sizeof(*lp));
                   1763:        p->p_lwpctl = NULL;
                   1764: }
1.84      yamt     1765:
1.106.2.3  yamt     1766: /*
                   1767:  * Return the current LWP's "preemption counter".  Used to detect
                   1768:  * preemption across operations that can tolerate preemption without
                   1769:  * crashing, but which may generate incorrect results if preempted.
                   1770:  */
                   1771: uint64_t
                   1772: lwp_pctr(void)
                   1773: {
                   1774:
                   1775:        return curlwp->l_ncsw;
                   1776: }
                   1777:
1.106.2.7! yamt     1778: /*
        !          1779:  * Set an LWP's private data pointer.
        !          1780:  */
        !          1781: int
        !          1782: lwp_setprivate(struct lwp *l, void *ptr)
        !          1783: {
        !          1784:        int error = 0;
        !          1785:
        !          1786:        l->l_private = ptr;
        !          1787: #ifdef __HAVE_CPU_LWP_SETPRIVATE
        !          1788:        error = cpu_lwp_setprivate(l, ptr);
        !          1789: #endif
        !          1790:        return error;
        !          1791: }
        !          1792:
1.84      yamt     1793: #if defined(DDB)
                   1794: void
                   1795: lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
                   1796: {
                   1797:        lwp_t *l;
                   1798:
                   1799:        LIST_FOREACH(l, &alllwp, l_list) {
                   1800:                uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
                   1801:
                   1802:                if (addr < stack || stack + KSTACK_SIZE <= addr) {
                   1803:                        continue;
                   1804:                }
                   1805:                (*pr)("%p is %p+%zu, LWP %p's stack\n",
                   1806:                    (void *)addr, (void *)stack,
                   1807:                    (size_t)(addr - stack), l);
                   1808:        }
                   1809: }
                   1810: #endif /* defined(DDB) */

CVSweb <webmaster@jp.NetBSD.org>