[BACK]Return to kern_lwp.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/kern_lwp.c, Revision 1.180

1.180   ! christos    1: /*     $NetBSD: kern_lwp.c,v 1.179 2014/10/18 08:33:29 snj Exp $       */
1.2       thorpej     2:
                      3: /*-
1.127     ad          4:  * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
1.2       thorpej     5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
1.52      ad          8:  * by Nathan J. Williams, and Andrew Doran.
1.2       thorpej     9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
1.9       lukem      31:
1.52      ad         32: /*
                     33:  * Overview
                     34:  *
1.66      ad         35:  *     Lightweight processes (LWPs) are the basic unit or thread of
1.52      ad         36:  *     execution within the kernel.  The core state of an LWP is described
1.66      ad         37:  *     by "struct lwp", also known as lwp_t.
1.52      ad         38:  *
                     39:  *     Each LWP is contained within a process (described by "struct proc"),
                     40:  *     Every process contains at least one LWP, but may contain more.  The
                     41:  *     process describes attributes shared among all of its LWPs such as a
                     42:  *     private address space, global execution state (stopped, active,
                     43:  *     zombie, ...), signal disposition and so on.  On a multiprocessor
1.66      ad         44:  *     machine, multiple LWPs be executing concurrently in the kernel.
1.52      ad         45:  *
                     46:  * Execution states
                     47:  *
                     48:  *     At any given time, an LWP has overall state that is described by
                     49:  *     lwp::l_stat.  The states are broken into two sets below.  The first
                     50:  *     set is guaranteed to represent the absolute, current state of the
                     51:  *     LWP:
1.101     rmind      52:  *
                     53:  *     LSONPROC
                     54:  *
                     55:  *             On processor: the LWP is executing on a CPU, either in the
                     56:  *             kernel or in user space.
                     57:  *
                     58:  *     LSRUN
                     59:  *
                     60:  *             Runnable: the LWP is parked on a run queue, and may soon be
                     61:  *             chosen to run by an idle processor, or by a processor that
                     62:  *             has been asked to preempt a currently runnning but lower
1.134     rmind      63:  *             priority LWP.
1.101     rmind      64:  *
                     65:  *     LSIDL
                     66:  *
                     67:  *             Idle: the LWP has been created but has not yet executed,
1.66      ad         68:  *             or it has ceased executing a unit of work and is waiting
                     69:  *             to be started again.
1.101     rmind      70:  *
                     71:  *     LSSUSPENDED:
                     72:  *
                     73:  *             Suspended: the LWP has had its execution suspended by
1.52      ad         74:  *             another LWP in the same process using the _lwp_suspend()
                     75:  *             system call.  User-level LWPs also enter the suspended
                     76:  *             state when the system is shutting down.
                     77:  *
                     78:  *     The second set represent a "statement of intent" on behalf of the
                     79:  *     LWP.  The LWP may in fact be executing on a processor, may be
1.66      ad         80:  *     sleeping or idle. It is expected to take the necessary action to
1.101     rmind      81:  *     stop executing or become "running" again within a short timeframe.
1.115     ad         82:  *     The LP_RUNNING flag in lwp::l_pflag indicates that an LWP is running.
1.101     rmind      83:  *     Importantly, it indicates that its state is tied to a CPU.
                     84:  *
                     85:  *     LSZOMB:
                     86:  *
                     87:  *             Dead or dying: the LWP has released most of its resources
1.129     ad         88:  *             and is about to switch away into oblivion, or has already
1.66      ad         89:  *             switched away.  When it switches away, its few remaining
                     90:  *             resources can be collected.
1.101     rmind      91:  *
                     92:  *     LSSLEEP:
                     93:  *
                     94:  *             Sleeping: the LWP has entered itself onto a sleep queue, and
                     95:  *             has switched away or will switch away shortly to allow other
1.66      ad         96:  *             LWPs to run on the CPU.
1.101     rmind      97:  *
                     98:  *     LSSTOP:
                     99:  *
                    100:  *             Stopped: the LWP has been stopped as a result of a job
                    101:  *             control signal, or as a result of the ptrace() interface.
                    102:  *
                    103:  *             Stopped LWPs may run briefly within the kernel to handle
                    104:  *             signals that they receive, but will not return to user space
                    105:  *             until their process' state is changed away from stopped.
                    106:  *
                    107:  *             Single LWPs within a process can not be set stopped
                    108:  *             selectively: all actions that can stop or continue LWPs
                    109:  *             occur at the process level.
                    110:  *
1.52      ad        111:  * State transitions
                    112:  *
1.66      ad        113:  *     Note that the LSSTOP state may only be set when returning to
                    114:  *     user space in userret(), or when sleeping interruptably.  The
                    115:  *     LSSUSPENDED state may only be set in userret().  Before setting
                    116:  *     those states, we try to ensure that the LWPs will release all
                    117:  *     locks that they hold, and at a minimum try to ensure that the
                    118:  *     LWP can be set runnable again by a signal.
1.52      ad        119:  *
                    120:  *     LWPs may transition states in the following ways:
                    121:  *
                    122:  *      RUN -------> ONPROC            ONPROC -----> RUN
1.129     ad        123:  *                                                 > SLEEP
                    124:  *                                                 > STOPPED
1.52      ad        125:  *                                                 > SUSPENDED
                    126:  *                                                 > ZOMB
1.129     ad        127:  *                                                 > IDL (special cases)
1.52      ad        128:  *
                    129:  *      STOPPED ---> RUN               SUSPENDED --> RUN
1.129     ad        130:  *                 > SLEEP
1.52      ad        131:  *
                    132:  *      SLEEP -----> ONPROC            IDL --------> RUN
1.101     rmind     133:  *                 > RUN                           > SUSPENDED
                    134:  *                 > STOPPED                       > STOPPED
1.129     ad        135:  *                                                 > ONPROC (special cases)
1.52      ad        136:  *
1.129     ad        137:  *     Some state transitions are only possible with kernel threads (eg
                    138:  *     ONPROC -> IDL) and happen under tightly controlled circumstances
                    139:  *     free of unwanted side effects.
1.66      ad        140:  *
1.114     rmind     141:  * Migration
                    142:  *
                    143:  *     Migration of threads from one CPU to another could be performed
                    144:  *     internally by the scheduler via sched_takecpu() or sched_catchlwp()
                    145:  *     functions.  The universal lwp_migrate() function should be used for
                    146:  *     any other cases.  Subsystems in the kernel must be aware that CPU
                    147:  *     of LWP may change, while it is not locked.
                    148:  *
1.52      ad        149:  * Locking
                    150:  *
                    151:  *     The majority of fields in 'struct lwp' are covered by a single,
1.66      ad        152:  *     general spin lock pointed to by lwp::l_mutex.  The locks covering
1.52      ad        153:  *     each field are documented in sys/lwp.h.
                    154:  *
1.66      ad        155:  *     State transitions must be made with the LWP's general lock held,
1.152     rmind     156:  *     and may cause the LWP's lock pointer to change.  Manipulation of
1.66      ad        157:  *     the general lock is not performed directly, but through calls to
1.152     rmind     158:  *     lwp_lock(), lwp_unlock() and others.  It should be noted that the
                    159:  *     adaptive locks are not allowed to be released while the LWP's lock
                    160:  *     is being held (unlike for other spin-locks).
1.52      ad        161:  *
                    162:  *     States and their associated locks:
                    163:  *
1.74      rmind     164:  *     LSONPROC, LSZOMB:
1.52      ad        165:  *
1.64      yamt      166:  *             Always covered by spc_lwplock, which protects running LWPs.
1.129     ad        167:  *             This is a per-CPU lock and matches lwp::l_cpu.
1.52      ad        168:  *
1.74      rmind     169:  *     LSIDL, LSRUN:
1.52      ad        170:  *
1.64      yamt      171:  *             Always covered by spc_mutex, which protects the run queues.
1.129     ad        172:  *             This is a per-CPU lock and matches lwp::l_cpu.
1.52      ad        173:  *
                    174:  *     LSSLEEP:
                    175:  *
1.66      ad        176:  *             Covered by a lock associated with the sleep queue that the
1.129     ad        177:  *             LWP resides on.  Matches lwp::l_sleepq::sq_mutex.
1.52      ad        178:  *
                    179:  *     LSSTOP, LSSUSPENDED:
1.101     rmind     180:  *
1.52      ad        181:  *             If the LWP was previously sleeping (l_wchan != NULL), then
1.66      ad        182:  *             l_mutex references the sleep queue lock.  If the LWP was
1.52      ad        183:  *             runnable or on the CPU when halted, or has been removed from
1.66      ad        184:  *             the sleep queue since halted, then the lock is spc_lwplock.
1.52      ad        185:  *
                    186:  *     The lock order is as follows:
                    187:  *
1.64      yamt      188:  *             spc::spc_lwplock ->
1.112     ad        189:  *                 sleeptab::st_mutex ->
1.64      yamt      190:  *                     tschain_t::tc_mutex ->
                    191:  *                         spc::spc_mutex
1.52      ad        192:  *
1.103     ad        193:  *     Each process has an scheduler state lock (proc::p_lock), and a
1.52      ad        194:  *     number of counters on LWPs and their states: p_nzlwps, p_nrlwps, and
                    195:  *     so on.  When an LWP is to be entered into or removed from one of the
1.103     ad        196:  *     following states, p_lock must be held and the process wide counters
1.52      ad        197:  *     adjusted:
                    198:  *
                    199:  *             LSIDL, LSZOMB, LSSTOP, LSSUSPENDED
                    200:  *
1.129     ad        201:  *     (But not always for kernel threads.  There are some special cases
                    202:  *     as mentioned above.  See kern_softint.c.)
                    203:  *
1.52      ad        204:  *     Note that an LWP is considered running or likely to run soon if in
                    205:  *     one of the following states.  This affects the value of p_nrlwps:
                    206:  *
                    207:  *             LSRUN, LSONPROC, LSSLEEP
                    208:  *
1.103     ad        209:  *     p_lock does not need to be held when transitioning among these
1.129     ad        210:  *     three states, hence p_lock is rarely taken for state transitions.
1.52      ad        211:  */
                    212:
1.9       lukem     213: #include <sys/cdefs.h>
1.180   ! christos  214: __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.179 2014/10/18 08:33:29 snj Exp $");
1.8       martin    215:
1.84      yamt      216: #include "opt_ddb.h"
1.52      ad        217: #include "opt_lockdebug.h"
1.139     darran    218: #include "opt_dtrace.h"
1.2       thorpej   219:
1.47      hannken   220: #define _LWP_API_PRIVATE
                    221:
1.2       thorpej   222: #include <sys/param.h>
                    223: #include <sys/systm.h>
1.64      yamt      224: #include <sys/cpu.h>
1.2       thorpej   225: #include <sys/pool.h>
                    226: #include <sys/proc.h>
                    227: #include <sys/syscallargs.h>
1.57      dsl       228: #include <sys/syscall_stats.h>
1.37      ad        229: #include <sys/kauth.h>
1.161     christos  230: #include <sys/pserialize.h>
1.52      ad        231: #include <sys/sleepq.h>
                    232: #include <sys/lockdebug.h>
                    233: #include <sys/kmem.h>
1.91      rmind     234: #include <sys/pset.h>
1.75      ad        235: #include <sys/intr.h>
1.78      ad        236: #include <sys/lwpctl.h>
1.81      ad        237: #include <sys/atomic.h>
1.131     ad        238: #include <sys/filedesc.h>
1.138     darran    239: #include <sys/dtrace_bsd.h>
1.141     darran    240: #include <sys/sdt.h>
1.157     rmind     241: #include <sys/xcall.h>
1.169     christos  242: #include <sys/uidinfo.h>
                    243: #include <sys/sysctl.h>
1.138     darran    244:
1.2       thorpej   245: #include <uvm/uvm_extern.h>
1.80      skrll     246: #include <uvm/uvm_object.h>
1.2       thorpej   247:
1.152     rmind     248: static pool_cache_t    lwp_cache       __read_mostly;
                    249: struct lwplist         alllwp          __cacheline_aligned;
1.41      thorpej   250:
1.157     rmind     251: static void            lwp_dtor(void *, void *);
                    252:
1.141     darran    253: /* DTrace proc provider probes */
1.180   ! christos  254: SDT_PROVIDER_DEFINE(proc);
        !           255:
        !           256: SDT_PROBE_DEFINE1(proc, kernel, , lwp__create, "struct lwp *");
        !           257: SDT_PROBE_DEFINE1(proc, kernel, , lwp__start, "struct lwp *");
        !           258: SDT_PROBE_DEFINE1(proc, kernel, , lwp__exit, "struct lwp *");
1.141     darran    259:
1.147     pooka     260: struct turnstile turnstile0;
                    261: struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = {
                    262: #ifdef LWP0_CPU_INFO
                    263:        .l_cpu = LWP0_CPU_INFO,
                    264: #endif
1.154     matt      265: #ifdef LWP0_MD_INITIALIZER
                    266:        .l_md = LWP0_MD_INITIALIZER,
                    267: #endif
1.147     pooka     268:        .l_proc = &proc0,
                    269:        .l_lid = 1,
                    270:        .l_flag = LW_SYSTEM,
                    271:        .l_stat = LSONPROC,
                    272:        .l_ts = &turnstile0,
                    273:        .l_syncobj = &sched_syncobj,
                    274:        .l_refcnt = 1,
                    275:        .l_priority = PRI_USER + NPRI_USER - 1,
                    276:        .l_inheritedprio = -1,
                    277:        .l_class = SCHED_OTHER,
                    278:        .l_psid = PS_NONE,
                    279:        .l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders),
                    280:        .l_name = __UNCONST("swapper"),
                    281:        .l_fd = &filedesc0,
                    282: };
                    283:
1.169     christos  284: static int sysctl_kern_maxlwp(SYSCTLFN_PROTO);
                    285:
                    286: /*
                    287:  * sysctl helper routine for kern.maxlwp. Ensures that the new
                    288:  * values are not too low or too high.
                    289:  */
                    290: static int
                    291: sysctl_kern_maxlwp(SYSCTLFN_ARGS)
                    292: {
                    293:        int error, nmaxlwp;
                    294:        struct sysctlnode node;
                    295:
                    296:        nmaxlwp = maxlwp;
                    297:        node = *rnode;
                    298:        node.sysctl_data = &nmaxlwp;
                    299:        error = sysctl_lookup(SYSCTLFN_CALL(&node));
                    300:        if (error || newp == NULL)
                    301:                return error;
                    302:
                    303:        if (nmaxlwp < 0 || nmaxlwp >= 65536)
                    304:                return EINVAL;
                    305:        if (nmaxlwp > cpu_maxlwp())
                    306:                return EINVAL;
                    307:        maxlwp = nmaxlwp;
                    308:
                    309:        return 0;
                    310: }
                    311:
                    312: static void
                    313: sysctl_kern_lwp_setup(void)
                    314: {
                    315:        struct sysctllog *clog = NULL;
                    316:
                    317:        sysctl_createv(&clog, 0, NULL, NULL,
                    318:                       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
                    319:                       CTLTYPE_INT, "maxlwp",
                    320:                       SYSCTL_DESCR("Maximum number of simultaneous threads"),
                    321:                       sysctl_kern_maxlwp, 0, NULL, 0,
                    322:                       CTL_KERN, CTL_CREATE, CTL_EOL);
                    323: }
                    324:
1.41      thorpej   325: void
                    326: lwpinit(void)
                    327: {
                    328:
1.152     rmind     329:        LIST_INIT(&alllwp);
1.144     pooka     330:        lwpinit_specificdata();
1.52      ad        331:        lwp_sys_init();
1.87      ad        332:        lwp_cache = pool_cache_init(sizeof(lwp_t), MIN_LWP_ALIGNMENT, 0, 0,
1.157     rmind     333:            "lwppl", NULL, IPL_NONE, NULL, lwp_dtor, NULL);
1.169     christos  334:
                    335:        maxlwp = cpu_maxlwp();
                    336:        sysctl_kern_lwp_setup();
1.41      thorpej   337: }
                    338:
1.147     pooka     339: void
                    340: lwp0_init(void)
                    341: {
                    342:        struct lwp *l = &lwp0;
                    343:
                    344:        KASSERT((void *)uvm_lwp_getuarea(l) != NULL);
1.148     pooka     345:        KASSERT(l->l_lid == proc0.p_nlwpid);
1.147     pooka     346:
                    347:        LIST_INSERT_HEAD(&alllwp, l, l_list);
                    348:
                    349:        callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE);
                    350:        callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l);
                    351:        cv_init(&l->l_sigcv, "sigwait");
1.171     rmind     352:        cv_init(&l->l_waitcv, "vfork");
1.147     pooka     353:
                    354:        kauth_cred_hold(proc0.p_cred);
                    355:        l->l_cred = proc0.p_cred;
                    356:
1.164     yamt      357:        kdtrace_thread_ctor(NULL, l);
1.147     pooka     358:        lwp_initspecific(l);
                    359:
                    360:        SYSCALL_TIME_LWP_INIT(l);
                    361: }
                    362:
1.157     rmind     363: static void
                    364: lwp_dtor(void *arg, void *obj)
                    365: {
                    366:        lwp_t *l = obj;
                    367:        uint64_t where;
                    368:        (void)l;
                    369:
                    370:        /*
                    371:         * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
                    372:         * calls will exit before memory of LWP is returned to the pool, where
                    373:         * KVA of LWP structure might be freed and re-used for other purposes.
                    374:         * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
                    375:         * callers, therefore cross-call to all CPUs will do the job.  Also,
                    376:         * the value of l->l_cpu must be still valid at this point.
                    377:         */
                    378:        KASSERT(l->l_cpu != NULL);
                    379:        where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
                    380:        xc_wait(where);
                    381: }
                    382:
1.52      ad        383: /*
                    384:  * Set an suspended.
                    385:  *
1.103     ad        386:  * Must be called with p_lock held, and the LWP locked.  Will unlock the
1.52      ad        387:  * LWP before return.
                    388:  */
1.2       thorpej   389: int
1.52      ad        390: lwp_suspend(struct lwp *curl, struct lwp *t)
1.2       thorpej   391: {
1.52      ad        392:        int error;
1.2       thorpej   393:
1.103     ad        394:        KASSERT(mutex_owned(t->l_proc->p_lock));
1.63      ad        395:        KASSERT(lwp_locked(t, NULL));
1.33      chs       396:
1.52      ad        397:        KASSERT(curl != t || curl->l_stat == LSONPROC);
1.2       thorpej   398:
1.52      ad        399:        /*
                    400:         * If the current LWP has been told to exit, we must not suspend anyone
                    401:         * else or deadlock could occur.  We won't return to userspace.
1.2       thorpej   402:         */
1.109     rmind     403:        if ((curl->l_flag & (LW_WEXIT | LW_WCORE)) != 0) {
1.52      ad        404:                lwp_unlock(t);
                    405:                return (EDEADLK);
1.2       thorpej   406:        }
                    407:
1.52      ad        408:        error = 0;
1.2       thorpej   409:
1.52      ad        410:        switch (t->l_stat) {
                    411:        case LSRUN:
                    412:        case LSONPROC:
1.56      pavel     413:                t->l_flag |= LW_WSUSPEND;
1.52      ad        414:                lwp_need_userret(t);
                    415:                lwp_unlock(t);
                    416:                break;
1.2       thorpej   417:
1.52      ad        418:        case LSSLEEP:
1.56      pavel     419:                t->l_flag |= LW_WSUSPEND;
1.2       thorpej   420:
                    421:                /*
1.52      ad        422:                 * Kick the LWP and try to get it to the kernel boundary
                    423:                 * so that it will release any locks that it holds.
                    424:                 * setrunnable() will release the lock.
1.2       thorpej   425:                 */
1.56      pavel     426:                if ((t->l_flag & LW_SINTR) != 0)
1.52      ad        427:                        setrunnable(t);
                    428:                else
                    429:                        lwp_unlock(t);
                    430:                break;
1.2       thorpej   431:
1.52      ad        432:        case LSSUSPENDED:
                    433:                lwp_unlock(t);
                    434:                break;
1.17      manu      435:
1.52      ad        436:        case LSSTOP:
1.56      pavel     437:                t->l_flag |= LW_WSUSPEND;
1.52      ad        438:                setrunnable(t);
                    439:                break;
1.2       thorpej   440:
1.52      ad        441:        case LSIDL:
                    442:        case LSZOMB:
                    443:                error = EINTR; /* It's what Solaris does..... */
                    444:                lwp_unlock(t);
                    445:                break;
1.2       thorpej   446:        }
                    447:
1.69      rmind     448:        return (error);
1.2       thorpej   449: }
                    450:
1.52      ad        451: /*
                    452:  * Restart a suspended LWP.
                    453:  *
1.103     ad        454:  * Must be called with p_lock held, and the LWP locked.  Will unlock the
1.52      ad        455:  * LWP before return.
                    456:  */
1.2       thorpej   457: void
                    458: lwp_continue(struct lwp *l)
                    459: {
                    460:
1.103     ad        461:        KASSERT(mutex_owned(l->l_proc->p_lock));
1.63      ad        462:        KASSERT(lwp_locked(l, NULL));
1.52      ad        463:
                    464:        /* If rebooting or not suspended, then just bail out. */
1.56      pavel     465:        if ((l->l_flag & LW_WREBOOT) != 0) {
1.52      ad        466:                lwp_unlock(l);
1.2       thorpej   467:                return;
1.10      fvdl      468:        }
1.2       thorpej   469:
1.56      pavel     470:        l->l_flag &= ~LW_WSUSPEND;
1.2       thorpej   471:
1.52      ad        472:        if (l->l_stat != LSSUSPENDED) {
                    473:                lwp_unlock(l);
                    474:                return;
1.2       thorpej   475:        }
                    476:
1.52      ad        477:        /* setrunnable() will release the lock. */
                    478:        setrunnable(l);
1.2       thorpej   479: }
                    480:
1.52      ad        481: /*
1.142     christos  482:  * Restart a stopped LWP.
                    483:  *
                    484:  * Must be called with p_lock held, and the LWP NOT locked.  Will unlock the
                    485:  * LWP before return.
                    486:  */
                    487: void
                    488: lwp_unstop(struct lwp *l)
                    489: {
                    490:        struct proc *p = l->l_proc;
1.167     rmind     491:
1.142     christos  492:        KASSERT(mutex_owned(proc_lock));
                    493:        KASSERT(mutex_owned(p->p_lock));
                    494:
                    495:        lwp_lock(l);
                    496:
                    497:        /* If not stopped, then just bail out. */
                    498:        if (l->l_stat != LSSTOP) {
                    499:                lwp_unlock(l);
                    500:                return;
                    501:        }
                    502:
                    503:        p->p_stat = SACTIVE;
                    504:        p->p_sflag &= ~PS_STOPPING;
                    505:
                    506:        if (!p->p_waited)
                    507:                p->p_pptr->p_nstopchild--;
                    508:
                    509:        if (l->l_wchan == NULL) {
                    510:                /* setrunnable() will release the lock. */
                    511:                setrunnable(l);
1.163     christos  512:        } else if (p->p_xstat && (l->l_flag & LW_SINTR) != 0) {
                    513:                /* setrunnable() so we can receive the signal */
                    514:                setrunnable(l);
1.142     christos  515:        } else {
                    516:                l->l_stat = LSSLEEP;
                    517:                p->p_nrlwps++;
                    518:                lwp_unlock(l);
                    519:        }
                    520: }
                    521:
                    522: /*
1.52      ad        523:  * Wait for an LWP within the current process to exit.  If 'lid' is
                    524:  * non-zero, we are waiting for a specific LWP.
                    525:  *
1.103     ad        526:  * Must be called with p->p_lock held.
1.52      ad        527:  */
1.2       thorpej   528: int
1.173     rmind     529: lwp_wait(struct lwp *l, lwpid_t lid, lwpid_t *departed, bool exiting)
1.2       thorpej   530: {
1.173     rmind     531:        const lwpid_t curlid = l->l_lid;
                    532:        proc_t *p = l->l_proc;
                    533:        lwp_t *l2;
                    534:        int error;
1.2       thorpej   535:
1.103     ad        536:        KASSERT(mutex_owned(p->p_lock));
1.52      ad        537:
                    538:        p->p_nlwpwait++;
1.63      ad        539:        l->l_waitingfor = lid;
1.52      ad        540:
                    541:        for (;;) {
1.173     rmind     542:                int nfound;
                    543:
1.52      ad        544:                /*
                    545:                 * Avoid a race between exit1() and sigexit(): if the
                    546:                 * process is dumping core, then we need to bail out: call
                    547:                 * into lwp_userret() where we will be suspended until the
                    548:                 * deed is done.
                    549:                 */
                    550:                if ((p->p_sflag & PS_WCORE) != 0) {
1.103     ad        551:                        mutex_exit(p->p_lock);
1.52      ad        552:                        lwp_userret(l);
1.173     rmind     553:                        KASSERT(false);
1.52      ad        554:                }
                    555:
                    556:                /*
                    557:                 * First off, drain any detached LWP that is waiting to be
                    558:                 * reaped.
                    559:                 */
                    560:                while ((l2 = p->p_zomblwp) != NULL) {
                    561:                        p->p_zomblwp = NULL;
1.63      ad        562:                        lwp_free(l2, false, false);/* releases proc mutex */
1.103     ad        563:                        mutex_enter(p->p_lock);
1.52      ad        564:                }
                    565:
                    566:                /*
                    567:                 * Now look for an LWP to collect.  If the whole process is
                    568:                 * exiting, count detached LWPs as eligible to be collected,
                    569:                 * but don't drain them here.
                    570:                 */
                    571:                nfound = 0;
1.63      ad        572:                error = 0;
1.52      ad        573:                LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1.63      ad        574:                        /*
                    575:                         * If a specific wait and the target is waiting on
                    576:                         * us, then avoid deadlock.  This also traps LWPs
                    577:                         * that try to wait on themselves.
                    578:                         *
                    579:                         * Note that this does not handle more complicated
                    580:                         * cycles, like: t1 -> t2 -> t3 -> t1.  The process
                    581:                         * can still be killed so it is not a major problem.
                    582:                         */
                    583:                        if (l2->l_lid == lid && l2->l_waitingfor == curlid) {
                    584:                                error = EDEADLK;
                    585:                                break;
                    586:                        }
                    587:                        if (l2 == l)
1.52      ad        588:                                continue;
                    589:                        if ((l2->l_prflag & LPR_DETACHED) != 0) {
1.63      ad        590:                                nfound += exiting;
                    591:                                continue;
                    592:                        }
                    593:                        if (lid != 0) {
                    594:                                if (l2->l_lid != lid)
                    595:                                        continue;
                    596:                                /*
                    597:                                 * Mark this LWP as the first waiter, if there
                    598:                                 * is no other.
                    599:                                 */
                    600:                                if (l2->l_waiter == 0)
                    601:                                        l2->l_waiter = curlid;
                    602:                        } else if (l2->l_waiter != 0) {
                    603:                                /*
                    604:                                 * It already has a waiter - so don't
                    605:                                 * collect it.  If the waiter doesn't
                    606:                                 * grab it we'll get another chance
                    607:                                 * later.
                    608:                                 */
                    609:                                nfound++;
1.52      ad        610:                                continue;
                    611:                        }
                    612:                        nfound++;
1.2       thorpej   613:
1.52      ad        614:                        /* No need to lock the LWP in order to see LSZOMB. */
                    615:                        if (l2->l_stat != LSZOMB)
                    616:                                continue;
1.2       thorpej   617:
1.63      ad        618:                        /*
                    619:                         * We're no longer waiting.  Reset the "first waiter"
                    620:                         * pointer on the target, in case it was us.
                    621:                         */
                    622:                        l->l_waitingfor = 0;
                    623:                        l2->l_waiter = 0;
                    624:                        p->p_nlwpwait--;
1.2       thorpej   625:                        if (departed)
                    626:                                *departed = l2->l_lid;
1.75      ad        627:                        sched_lwp_collect(l2);
1.63      ad        628:
                    629:                        /* lwp_free() releases the proc lock. */
                    630:                        lwp_free(l2, false, false);
1.103     ad        631:                        mutex_enter(p->p_lock);
1.52      ad        632:                        return 0;
                    633:                }
1.2       thorpej   634:
1.63      ad        635:                if (error != 0)
                    636:                        break;
1.52      ad        637:                if (nfound == 0) {
                    638:                        error = ESRCH;
                    639:                        break;
                    640:                }
1.63      ad        641:
                    642:                /*
1.173     rmind     643:                 * Note: since the lock will be dropped, need to restart on
                    644:                 * wakeup to run all LWPs again, e.g. there may be new LWPs.
1.63      ad        645:                 */
                    646:                if (exiting) {
1.52      ad        647:                        KASSERT(p->p_nlwps > 1);
1.103     ad        648:                        cv_wait(&p->p_lwpcv, p->p_lock);
1.173     rmind     649:                        error = EAGAIN;
                    650:                        break;
1.52      ad        651:                }
1.63      ad        652:
                    653:                /*
                    654:                 * If all other LWPs are waiting for exits or suspends
                    655:                 * and the supply of zombies and potential zombies is
                    656:                 * exhausted, then we are about to deadlock.
                    657:                 *
                    658:                 * If the process is exiting (and this LWP is not the one
                    659:                 * that is coordinating the exit) then bail out now.
                    660:                 */
1.52      ad        661:                if ((p->p_sflag & PS_WEXIT) != 0 ||
1.63      ad        662:                    p->p_nrlwps + p->p_nzlwps - p->p_ndlwps <= p->p_nlwpwait) {
1.52      ad        663:                        error = EDEADLK;
                    664:                        break;
1.2       thorpej   665:                }
1.63      ad        666:
                    667:                /*
                    668:                 * Sit around and wait for something to happen.  We'll be
                    669:                 * awoken if any of the conditions examined change: if an
                    670:                 * LWP exits, is collected, or is detached.
                    671:                 */
1.103     ad        672:                if ((error = cv_wait_sig(&p->p_lwpcv, p->p_lock)) != 0)
1.52      ad        673:                        break;
1.2       thorpej   674:        }
                    675:
1.63      ad        676:        /*
                    677:         * We didn't find any LWPs to collect, we may have received a
                    678:         * signal, or some other condition has caused us to bail out.
                    679:         *
                    680:         * If waiting on a specific LWP, clear the waiters marker: some
                    681:         * other LWP may want it.  Then, kick all the remaining waiters
                    682:         * so that they can re-check for zombies and for deadlock.
                    683:         */
                    684:        if (lid != 0) {
                    685:                LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
                    686:                        if (l2->l_lid == lid) {
                    687:                                if (l2->l_waiter == curlid)
                    688:                                        l2->l_waiter = 0;
                    689:                                break;
                    690:                        }
                    691:                }
                    692:        }
1.52      ad        693:        p->p_nlwpwait--;
1.63      ad        694:        l->l_waitingfor = 0;
                    695:        cv_broadcast(&p->p_lwpcv);
                    696:
1.52      ad        697:        return error;
1.2       thorpej   698: }
                    699:
1.174     dsl       700: static lwpid_t
                    701: lwp_find_free_lid(lwpid_t try_lid, lwp_t * new_lwp, proc_t *p)
                    702: {
                    703:        #define LID_SCAN (1u << 31)
                    704:        lwp_t *scan, *free_before;
                    705:        lwpid_t nxt_lid;
                    706:
                    707:        /*
                    708:         * We want the first unused lid greater than or equal to
                    709:         * try_lid (modulo 2^31).
                    710:         * (If nothing else ld.elf_so doesn't want lwpid with the top bit set.)
                    711:         * We must not return 0, and avoiding 'LID_SCAN - 1' makes
                    712:         * the outer test easier.
                    713:         * This would be much easier if the list were sorted in
                    714:         * increasing order.
                    715:         * The list is kept sorted in decreasing order.
                    716:         * This code is only used after a process has generated 2^31 lwp.
                    717:         *
                    718:         * Code assumes it can always find an id.
                    719:         */
                    720:
                    721:        try_lid &= LID_SCAN - 1;
                    722:        if (try_lid <= 1)
                    723:                try_lid = 2;
                    724:
                    725:        free_before = NULL;
                    726:        nxt_lid = LID_SCAN - 1;
                    727:        LIST_FOREACH(scan, &p->p_lwps, l_sibling) {
                    728:                if (scan->l_lid != nxt_lid) {
                    729:                        /* There are available lid before this entry */
                    730:                        free_before = scan;
                    731:                        if (try_lid > scan->l_lid)
                    732:                                break;
                    733:                }
                    734:                if (try_lid == scan->l_lid) {
                    735:                        /* The ideal lid is busy, take a higher one */
                    736:                        if (free_before != NULL) {
                    737:                                try_lid = free_before->l_lid + 1;
                    738:                                break;
                    739:                        }
                    740:                        /* No higher ones, reuse low numbers */
                    741:                        try_lid = 2;
                    742:                }
                    743:
                    744:                nxt_lid = scan->l_lid - 1;
                    745:                if (LIST_NEXT(scan, l_sibling) == NULL) {
                    746:                    /* The value we have is lower than any existing lwp */
                    747:                    LIST_INSERT_AFTER(scan, new_lwp, l_sibling);
                    748:                    return try_lid;
                    749:                }
                    750:        }
                    751:
                    752:        LIST_INSERT_BEFORE(free_before, new_lwp, l_sibling);
                    753:        return try_lid;
                    754: }
                    755:
1.52      ad        756: /*
                    757:  * Create a new LWP within process 'p2', using LWP 'l1' as a template.
                    758:  * The new LWP is created in state LSIDL and must be set running,
                    759:  * suspended, or stopped by the caller.
                    760:  */
1.2       thorpej   761: int
1.134     rmind     762: lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
1.75      ad        763:           void *stack, size_t stacksize, void (*func)(void *), void *arg,
                    764:           lwp_t **rnewlwpp, int sclass)
1.2       thorpej   765: {
1.52      ad        766:        struct lwp *l2, *isfree;
                    767:        turnstile_t *ts;
1.151     chs       768:        lwpid_t lid;
1.2       thorpej   769:
1.107     ad        770:        KASSERT(l1 == curlwp || l1->l_proc == &proc0);
                    771:
1.52      ad        772:        /*
1.169     christos  773:         * Enforce limits, excluding the first lwp and kthreads.
                    774:         */
                    775:        if (p2->p_nlwps != 0 && p2 != &proc0) {
                    776:                uid_t uid = kauth_cred_getuid(l1->l_cred);
                    777:                int count = chglwpcnt(uid, 1);
                    778:                if (__predict_false(count >
                    779:                    p2->p_rlimit[RLIMIT_NTHR].rlim_cur)) {
                    780:                        if (kauth_authorize_process(l1->l_cred,
                    781:                            KAUTH_PROCESS_RLIMIT, p2,
                    782:                            KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
                    783:                            &p2->p_rlimit[RLIMIT_NTHR], KAUTH_ARG(RLIMIT_NTHR))
                    784:                            != 0) {
1.170     christos  785:                                (void)chglwpcnt(uid, -1);
                    786:                                return EAGAIN;
1.169     christos  787:                        }
                    788:                }
                    789:        }
                    790:
                    791:        /*
1.52      ad        792:         * First off, reap any detached LWP waiting to be collected.
                    793:         * We can re-use its LWP structure and turnstile.
                    794:         */
                    795:        isfree = NULL;
                    796:        if (p2->p_zomblwp != NULL) {
1.103     ad        797:                mutex_enter(p2->p_lock);
1.52      ad        798:                if ((isfree = p2->p_zomblwp) != NULL) {
                    799:                        p2->p_zomblwp = NULL;
1.63      ad        800:                        lwp_free(isfree, true, false);/* releases proc mutex */
1.52      ad        801:                } else
1.103     ad        802:                        mutex_exit(p2->p_lock);
1.52      ad        803:        }
                    804:        if (isfree == NULL) {
1.87      ad        805:                l2 = pool_cache_get(lwp_cache, PR_WAITOK);
1.52      ad        806:                memset(l2, 0, sizeof(*l2));
1.76      ad        807:                l2->l_ts = pool_cache_get(turnstile_cache, PR_WAITOK);
1.60      yamt      808:                SLIST_INIT(&l2->l_pi_lenders);
1.52      ad        809:        } else {
                    810:                l2 = isfree;
                    811:                ts = l2->l_ts;
1.75      ad        812:                KASSERT(l2->l_inheritedprio == -1);
1.60      yamt      813:                KASSERT(SLIST_EMPTY(&l2->l_pi_lenders));
1.52      ad        814:                memset(l2, 0, sizeof(*l2));
                    815:                l2->l_ts = ts;
                    816:        }
1.2       thorpej   817:
                    818:        l2->l_stat = LSIDL;
                    819:        l2->l_proc = p2;
1.52      ad        820:        l2->l_refcnt = 1;
1.75      ad        821:        l2->l_class = sclass;
1.116     ad        822:
                    823:        /*
                    824:         * If vfork(), we want the LWP to run fast and on the same CPU
                    825:         * as its parent, so that it can reuse the VM context and cache
                    826:         * footprint on the local CPU.
                    827:         */
                    828:        l2->l_kpriority = ((flags & LWP_VFORK) ? true : false);
1.82      ad        829:        l2->l_kpribase = PRI_KERNEL;
1.52      ad        830:        l2->l_priority = l1->l_priority;
1.75      ad        831:        l2->l_inheritedprio = -1;
1.134     rmind     832:        l2->l_flag = 0;
1.88      ad        833:        l2->l_pflag = LP_MPSAFE;
1.131     ad        834:        TAILQ_INIT(&l2->l_ld_locks);
                    835:
                    836:        /*
1.156     pooka     837:         * For vfork, borrow parent's lwpctl context if it exists.
                    838:         * This also causes us to return via lwp_userret.
                    839:         */
                    840:        if (flags & LWP_VFORK && l1->l_lwpctl) {
                    841:                l2->l_lwpctl = l1->l_lwpctl;
                    842:                l2->l_flag |= LW_LWPCTL;
                    843:        }
                    844:
                    845:        /*
1.131     ad        846:         * If not the first LWP in the process, grab a reference to the
                    847:         * descriptor table.
                    848:         */
1.97      ad        849:        l2->l_fd = p2->p_fd;
1.131     ad        850:        if (p2->p_nlwps != 0) {
                    851:                KASSERT(l1->l_proc == p2);
1.136     rmind     852:                fd_hold(l2);
1.131     ad        853:        } else {
                    854:                KASSERT(l1->l_proc != p2);
                    855:        }
1.41      thorpej   856:
1.56      pavel     857:        if (p2->p_flag & PK_SYSTEM) {
1.134     rmind     858:                /* Mark it as a system LWP. */
1.56      pavel     859:                l2->l_flag |= LW_SYSTEM;
1.52      ad        860:        }
1.2       thorpej   861:
1.107     ad        862:        kpreempt_disable();
                    863:        l2->l_mutex = l1->l_cpu->ci_schedstate.spc_mutex;
                    864:        l2->l_cpu = l1->l_cpu;
                    865:        kpreempt_enable();
                    866:
1.138     darran    867:        kdtrace_thread_ctor(NULL, l2);
1.73      rmind     868:        lwp_initspecific(l2);
1.75      ad        869:        sched_lwp_fork(l1, l2);
1.37      ad        870:        lwp_update_creds(l2);
1.70      ad        871:        callout_init(&l2->l_timeout_ch, CALLOUT_MPSAFE);
                    872:        callout_setfunc(&l2->l_timeout_ch, sleepq_timeout, l2);
1.52      ad        873:        cv_init(&l2->l_sigcv, "sigwait");
1.171     rmind     874:        cv_init(&l2->l_waitcv, "vfork");
1.52      ad        875:        l2->l_syncobj = &sched_syncobj;
1.2       thorpej   876:
                    877:        if (rnewlwpp != NULL)
                    878:                *rnewlwpp = l2;
                    879:
1.158     matt      880:        /*
                    881:         * PCU state needs to be saved before calling uvm_lwp_fork() so that
                    882:         * the MD cpu_lwp_fork() can copy the saved state to the new LWP.
                    883:         */
                    884:        pcu_save_all(l1);
                    885:
1.137     rmind     886:        uvm_lwp_setuarea(l2, uaddr);
1.2       thorpej   887:        uvm_lwp_fork(l1, l2, stack, stacksize, func,
                    888:            (arg != NULL) ? arg : l2);
                    889:
1.151     chs       890:        if ((flags & LWP_PIDLID) != 0) {
                    891:                lid = proc_alloc_pid(p2);
                    892:                l2->l_pflag |= LP_PIDLID;
                    893:        } else {
                    894:                lid = 0;
                    895:        }
                    896:
1.103     ad        897:        mutex_enter(p2->p_lock);
1.52      ad        898:
                    899:        if ((flags & LWP_DETACHED) != 0) {
                    900:                l2->l_prflag = LPR_DETACHED;
                    901:                p2->p_ndlwps++;
                    902:        } else
                    903:                l2->l_prflag = 0;
                    904:
1.165     jmcneill  905:        l2->l_sigstk = l1->l_sigstk;
1.52      ad        906:        l2->l_sigmask = l1->l_sigmask;
1.176     christos  907:        TAILQ_INIT(&l2->l_sigpend.sp_info);
1.52      ad        908:        sigemptyset(&l2->l_sigpend.sp_set);
                    909:
1.174     dsl       910:        if (__predict_true(lid == 0)) {
                    911:                /*
                    912:                 * XXX: l_lid are expected to be unique (for a process)
                    913:                 * if LWP_PIDLID is sometimes set this won't be true.
                    914:                 * Once 2^31 threads have been allocated we have to
                    915:                 * scan to ensure we allocate a unique value.
                    916:                 */
                    917:                lid = ++p2->p_nlwpid;
                    918:                if (__predict_false(lid & LID_SCAN)) {
                    919:                        lid = lwp_find_free_lid(lid, l2, p2);
                    920:                        p2->p_nlwpid = lid | LID_SCAN;
                    921:                        /* l2 as been inserted into p_lwps in order */
                    922:                        goto skip_insert;
                    923:                }
                    924:                p2->p_nlwpid = lid;
1.151     chs       925:        }
1.174     dsl       926:        LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
                    927:     skip_insert:
1.151     chs       928:        l2->l_lid = lid;
1.2       thorpej   929:        p2->p_nlwps++;
1.149     yamt      930:        p2->p_nrlwps++;
1.2       thorpej   931:
1.162     rmind     932:        KASSERT(l2->l_affinity == NULL);
                    933:
1.91      rmind     934:        if ((p2->p_flag & PK_SYSTEM) == 0) {
1.162     rmind     935:                /* Inherit the affinity mask. */
                    936:                if (l1->l_affinity) {
1.128     rmind     937:                        /*
                    938:                         * Note that we hold the state lock while inheriting
                    939:                         * the affinity to avoid race with sched_setaffinity().
                    940:                         */
                    941:                        lwp_lock(l1);
1.162     rmind     942:                        if (l1->l_affinity) {
1.122     rmind     943:                                kcpuset_use(l1->l_affinity);
                    944:                                l2->l_affinity = l1->l_affinity;
                    945:                        }
1.128     rmind     946:                        lwp_unlock(l1);
1.117     christos  947:                }
1.128     rmind     948:                lwp_lock(l2);
                    949:                /* Inherit a processor-set */
                    950:                l2->l_psid = l1->l_psid;
1.91      rmind     951:                /* Look for a CPU to start */
                    952:                l2->l_cpu = sched_takecpu(l2);
                    953:                lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_mutex);
                    954:        }
1.128     rmind     955:        mutex_exit(p2->p_lock);
                    956:
1.180   ! christos  957:        SDT_PROBE(proc, kernel, , lwp__create, l2, 0, 0, 0, 0);
1.141     darran    958:
1.128     rmind     959:        mutex_enter(proc_lock);
                    960:        LIST_INSERT_HEAD(&alllwp, l2, l_list);
                    961:        mutex_exit(proc_lock);
1.91      rmind     962:
1.57      dsl       963:        SYSCALL_TIME_LWP_INIT(l2);
                    964:
1.16      manu      965:        if (p2->p_emul->e_lwp_fork)
                    966:                (*p2->p_emul->e_lwp_fork)(l1, l2);
                    967:
1.2       thorpej   968:        return (0);
                    969: }
                    970:
                    971: /*
1.64      yamt      972:  * Called by MD code when a new LWP begins execution.  Must be called
                    973:  * with the previous LWP locked (so at splsched), or if there is no
                    974:  * previous LWP, at splsched.
                    975:  */
                    976: void
1.178     matt      977: lwp_startup(struct lwp *prev, struct lwp *new_lwp)
1.64      yamt      978: {
1.178     matt      979:        KASSERTMSG(new_lwp == curlwp, "l %p curlwp %p prevlwp %p", new_lwp, curlwp, prev);
1.64      yamt      980:
1.180   ! christos  981:        SDT_PROBE(proc, kernel, , lwp__start, new_lwp, 0, 0, 0, 0);
1.141     darran    982:
1.107     ad        983:        KASSERT(kpreempt_disabled());
1.64      yamt      984:        if (prev != NULL) {
1.81      ad        985:                /*
                    986:                 * Normalize the count of the spin-mutexes, it was
                    987:                 * increased in mi_switch().  Unmark the state of
                    988:                 * context switch - it is finished for previous LWP.
                    989:                 */
                    990:                curcpu()->ci_mtx_count++;
                    991:                membar_exit();
                    992:                prev->l_ctxswtch = 0;
1.64      yamt      993:        }
1.178     matt      994:        KPREEMPT_DISABLE(new_lwp);
1.107     ad        995:        spl0();
1.178     matt      996:        if (__predict_true(new_lwp->l_proc->p_vmspace))
                    997:                pmap_activate(new_lwp);
1.161     christos  998:
                    999:        /* Note trip through cpu_switchto(). */
                   1000:        pserialize_switchpoint();
                   1001:
1.64      yamt     1002:        LOCKDEBUG_BARRIER(NULL, 0);
1.178     matt     1003:        KPREEMPT_ENABLE(new_lwp);
                   1004:        if ((new_lwp->l_pflag & LP_MPSAFE) == 0) {
                   1005:                KERNEL_LOCK(1, new_lwp);
1.65      ad       1006:        }
1.64      yamt     1007: }
                   1008:
                   1009: /*
1.65      ad       1010:  * Exit an LWP.
1.2       thorpej  1011:  */
                   1012: void
                   1013: lwp_exit(struct lwp *l)
                   1014: {
                   1015:        struct proc *p = l->l_proc;
1.52      ad       1016:        struct lwp *l2;
1.65      ad       1017:        bool current;
                   1018:
                   1019:        current = (l == curlwp);
1.2       thorpej  1020:
1.114     rmind    1021:        KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL));
1.131     ad       1022:        KASSERT(p == curproc);
1.2       thorpej  1023:
1.180   ! christos 1024:        SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
1.141     darran   1025:
1.52      ad       1026:        /*
                   1027:         * Verify that we hold no locks other than the kernel lock.
                   1028:         */
                   1029:        LOCKDEBUG_BARRIER(&kernel_lock, 0);
1.16      manu     1030:
1.2       thorpej  1031:        /*
1.52      ad       1032:         * If we are the last live LWP in a process, we need to exit the
                   1033:         * entire process.  We do so with an exit status of zero, because
                   1034:         * it's a "controlled" exit, and because that's what Solaris does.
                   1035:         *
                   1036:         * We are not quite a zombie yet, but for accounting purposes we
                   1037:         * must increment the count of zombies here.
1.45      thorpej  1038:         *
                   1039:         * Note: the last LWP's specificdata will be deleted here.
1.2       thorpej  1040:         */
1.103     ad       1041:        mutex_enter(p->p_lock);
1.52      ad       1042:        if (p->p_nlwps - p->p_nzlwps == 1) {
1.65      ad       1043:                KASSERT(current == true);
1.172     matt     1044:                KASSERT(p != &proc0);
1.88      ad       1045:                /* XXXSMP kernel_lock not held */
1.2       thorpej  1046:                exit1(l, 0);
1.19      jdolecek 1047:                /* NOTREACHED */
1.2       thorpej  1048:        }
1.52      ad       1049:        p->p_nzlwps++;
1.103     ad       1050:        mutex_exit(p->p_lock);
1.52      ad       1051:
                   1052:        if (p->p_emul->e_lwp_exit)
                   1053:                (*p->p_emul->e_lwp_exit)(l);
1.2       thorpej  1054:
1.131     ad       1055:        /* Drop filedesc reference. */
                   1056:        fd_free();
                   1057:
1.45      thorpej  1058:        /* Delete the specificdata while it's still safe to sleep. */
1.145     pooka    1059:        lwp_finispecific(l);
1.45      thorpej  1060:
1.52      ad       1061:        /*
                   1062:         * Release our cached credentials.
                   1063:         */
1.37      ad       1064:        kauth_cred_free(l->l_cred);
1.70      ad       1065:        callout_destroy(&l->l_timeout_ch);
1.65      ad       1066:
                   1067:        /*
1.52      ad       1068:         * Remove the LWP from the global list.
1.151     chs      1069:         * Free its LID from the PID namespace if needed.
1.52      ad       1070:         */
1.102     ad       1071:        mutex_enter(proc_lock);
1.52      ad       1072:        LIST_REMOVE(l, l_list);
1.151     chs      1073:        if ((l->l_pflag & LP_PIDLID) != 0 && l->l_lid != p->p_pid) {
                   1074:                proc_free_pid(l->l_lid);
                   1075:        }
1.102     ad       1076:        mutex_exit(proc_lock);
1.19      jdolecek 1077:
1.52      ad       1078:        /*
                   1079:         * Get rid of all references to the LWP that others (e.g. procfs)
                   1080:         * may have, and mark the LWP as a zombie.  If the LWP is detached,
                   1081:         * mark it waiting for collection in the proc structure.  Note that
                   1082:         * before we can do that, we need to free any other dead, deatched
                   1083:         * LWP waiting to meet its maker.
                   1084:         */
1.103     ad       1085:        mutex_enter(p->p_lock);
1.52      ad       1086:        lwp_drainrefs(l);
1.31      yamt     1087:
1.52      ad       1088:        if ((l->l_prflag & LPR_DETACHED) != 0) {
                   1089:                while ((l2 = p->p_zomblwp) != NULL) {
                   1090:                        p->p_zomblwp = NULL;
1.63      ad       1091:                        lwp_free(l2, false, false);/* releases proc mutex */
1.103     ad       1092:                        mutex_enter(p->p_lock);
1.72      ad       1093:                        l->l_refcnt++;
                   1094:                        lwp_drainrefs(l);
1.52      ad       1095:                }
                   1096:                p->p_zomblwp = l;
                   1097:        }
1.31      yamt     1098:
1.52      ad       1099:        /*
                   1100:         * If we find a pending signal for the process and we have been
1.151     chs      1101:         * asked to check for signals, then we lose: arrange to have
1.52      ad       1102:         * all other LWPs in the process check for signals.
                   1103:         */
1.56      pavel    1104:        if ((l->l_flag & LW_PENDSIG) != 0 &&
1.52      ad       1105:            firstsig(&p->p_sigpend.sp_set) != 0) {
                   1106:                LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
                   1107:                        lwp_lock(l2);
1.56      pavel    1108:                        l2->l_flag |= LW_PENDSIG;
1.52      ad       1109:                        lwp_unlock(l2);
                   1110:                }
1.31      yamt     1111:        }
                   1112:
1.158     matt     1113:        /*
                   1114:         * Release any PCU resources before becoming a zombie.
                   1115:         */
                   1116:        pcu_discard_all(l);
                   1117:
1.52      ad       1118:        lwp_lock(l);
                   1119:        l->l_stat = LSZOMB;
1.162     rmind    1120:        if (l->l_name != NULL) {
1.90      ad       1121:                strcpy(l->l_name, "(zombie)");
1.128     rmind    1122:        }
1.52      ad       1123:        lwp_unlock(l);
1.2       thorpej  1124:        p->p_nrlwps--;
1.52      ad       1125:        cv_broadcast(&p->p_lwpcv);
1.78      ad       1126:        if (l->l_lwpctl != NULL)
                   1127:                l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
1.103     ad       1128:        mutex_exit(p->p_lock);
1.52      ad       1129:
                   1130:        /*
                   1131:         * We can no longer block.  At this point, lwp_free() may already
                   1132:         * be gunning for us.  On a multi-CPU system, we may be off p_lwps.
                   1133:         *
                   1134:         * Free MD LWP resources.
                   1135:         */
                   1136:        cpu_lwp_free(l, 0);
1.2       thorpej  1137:
1.65      ad       1138:        if (current) {
                   1139:                pmap_deactivate(l);
                   1140:
                   1141:                /*
                   1142:                 * Release the kernel lock, and switch away into
                   1143:                 * oblivion.
                   1144:                 */
1.52      ad       1145: #ifdef notyet
1.65      ad       1146:                /* XXXSMP hold in lwp_userret() */
                   1147:                KERNEL_UNLOCK_LAST(l);
1.52      ad       1148: #else
1.65      ad       1149:                KERNEL_UNLOCK_ALL(l, NULL);
1.52      ad       1150: #endif
1.65      ad       1151:                lwp_exit_switchaway(l);
                   1152:        }
1.2       thorpej  1153: }
                   1154:
1.52      ad       1155: /*
                   1156:  * Free a dead LWP's remaining resources.
                   1157:  *
                   1158:  * XXXLWP limits.
                   1159:  */
                   1160: void
1.63      ad       1161: lwp_free(struct lwp *l, bool recycle, bool last)
1.52      ad       1162: {
                   1163:        struct proc *p = l->l_proc;
1.100     ad       1164:        struct rusage *ru;
1.52      ad       1165:        ksiginfoq_t kq;
                   1166:
1.92      yamt     1167:        KASSERT(l != curlwp);
1.160     yamt     1168:        KASSERT(last || mutex_owned(p->p_lock));
1.92      yamt     1169:
1.177     christos 1170:        /*
                   1171:         * We use the process credentials instead of the lwp credentials here
                   1172:         * because the lwp credentials maybe cached (just after a setuid call)
                   1173:         * and we don't want pay for syncing, since the lwp is going away
                   1174:         * anyway
                   1175:         */
1.169     christos 1176:        if (p != &proc0 && p->p_nlwps != 1)
1.177     christos 1177:                (void)chglwpcnt(kauth_cred_getuid(p->p_cred), -1);
1.52      ad       1178:        /*
                   1179:         * If this was not the last LWP in the process, then adjust
                   1180:         * counters and unlock.
                   1181:         */
                   1182:        if (!last) {
                   1183:                /*
                   1184:                 * Add the LWP's run time to the process' base value.
                   1185:                 * This needs to co-incide with coming off p_lwps.
                   1186:                 */
1.86      yamt     1187:                bintime_add(&p->p_rtime, &l->l_rtime);
1.64      yamt     1188:                p->p_pctcpu += l->l_pctcpu;
1.100     ad       1189:                ru = &p->p_stats->p_ru;
                   1190:                ruadd(ru, &l->l_ru);
                   1191:                ru->ru_nvcsw += (l->l_ncsw - l->l_nivcsw);
                   1192:                ru->ru_nivcsw += l->l_nivcsw;
1.52      ad       1193:                LIST_REMOVE(l, l_sibling);
                   1194:                p->p_nlwps--;
                   1195:                p->p_nzlwps--;
                   1196:                if ((l->l_prflag & LPR_DETACHED) != 0)
                   1197:                        p->p_ndlwps--;
1.63      ad       1198:
                   1199:                /*
                   1200:                 * Have any LWPs sleeping in lwp_wait() recheck for
                   1201:                 * deadlock.
                   1202:                 */
                   1203:                cv_broadcast(&p->p_lwpcv);
1.103     ad       1204:                mutex_exit(p->p_lock);
1.63      ad       1205:        }
1.52      ad       1206:
                   1207: #ifdef MULTIPROCESSOR
1.63      ad       1208:        /*
                   1209:         * In the unlikely event that the LWP is still on the CPU,
                   1210:         * then spin until it has switched away.  We need to release
                   1211:         * all locks to avoid deadlock against interrupt handlers on
                   1212:         * the target CPU.
                   1213:         */
1.115     ad       1214:        if ((l->l_pflag & LP_RUNNING) != 0 || l->l_cpu->ci_curlwp == l) {
1.63      ad       1215:                int count;
1.64      yamt     1216:                (void)count; /* XXXgcc */
1.63      ad       1217:                KERNEL_UNLOCK_ALL(curlwp, &count);
1.115     ad       1218:                while ((l->l_pflag & LP_RUNNING) != 0 ||
1.64      yamt     1219:                    l->l_cpu->ci_curlwp == l)
1.63      ad       1220:                        SPINLOCK_BACKOFF_HOOK;
                   1221:                KERNEL_LOCK(count, curlwp);
                   1222:        }
1.52      ad       1223: #endif
                   1224:
                   1225:        /*
                   1226:         * Destroy the LWP's remaining signal information.
                   1227:         */
                   1228:        ksiginfo_queue_init(&kq);
                   1229:        sigclear(&l->l_sigpend, NULL, &kq);
                   1230:        ksiginfo_queue_drain(&kq);
                   1231:        cv_destroy(&l->l_sigcv);
1.171     rmind    1232:        cv_destroy(&l->l_waitcv);
1.2       thorpej  1233:
1.19      jdolecek 1234:        /*
1.162     rmind    1235:         * Free lwpctl structure and affinity.
                   1236:         */
                   1237:        if (l->l_lwpctl) {
                   1238:                lwp_ctl_free(l);
                   1239:        }
                   1240:        if (l->l_affinity) {
                   1241:                kcpuset_unuse(l->l_affinity, NULL);
                   1242:                l->l_affinity = NULL;
                   1243:        }
                   1244:
                   1245:        /*
1.52      ad       1246:         * Free the LWP's turnstile and the LWP structure itself unless the
1.93      yamt     1247:         * caller wants to recycle them.  Also, free the scheduler specific
                   1248:         * data.
1.52      ad       1249:         *
                   1250:         * We can't return turnstile0 to the pool (it didn't come from it),
                   1251:         * so if it comes up just drop it quietly and move on.
                   1252:         *
                   1253:         * We don't recycle the VM resources at this time.
1.19      jdolecek 1254:         */
1.64      yamt     1255:
1.52      ad       1256:        if (!recycle && l->l_ts != &turnstile0)
1.76      ad       1257:                pool_cache_put(turnstile_cache, l->l_ts);
1.90      ad       1258:        if (l->l_name != NULL)
                   1259:                kmem_free(l->l_name, MAXCOMLEN);
1.135     rmind    1260:
1.52      ad       1261:        cpu_lwp_free2(l);
1.19      jdolecek 1262:        uvm_lwp_exit(l);
1.134     rmind    1263:
1.60      yamt     1264:        KASSERT(SLIST_EMPTY(&l->l_pi_lenders));
1.75      ad       1265:        KASSERT(l->l_inheritedprio == -1);
1.155     matt     1266:        KASSERT(l->l_blcnt == 0);
1.138     darran   1267:        kdtrace_thread_dtor(NULL, l);
1.52      ad       1268:        if (!recycle)
1.87      ad       1269:                pool_cache_put(lwp_cache, l);
1.2       thorpej  1270: }
                   1271:
                   1272: /*
1.91      rmind    1273:  * Migrate the LWP to the another CPU.  Unlocks the LWP.
                   1274:  */
                   1275: void
1.114     rmind    1276: lwp_migrate(lwp_t *l, struct cpu_info *tci)
1.91      rmind    1277: {
1.114     rmind    1278:        struct schedstate_percpu *tspc;
1.121     rmind    1279:        int lstat = l->l_stat;
                   1280:
1.91      rmind    1281:        KASSERT(lwp_locked(l, NULL));
1.114     rmind    1282:        KASSERT(tci != NULL);
                   1283:
1.121     rmind    1284:        /* If LWP is still on the CPU, it must be handled like LSONPROC */
                   1285:        if ((l->l_pflag & LP_RUNNING) != 0) {
                   1286:                lstat = LSONPROC;
                   1287:        }
                   1288:
1.114     rmind    1289:        /*
                   1290:         * The destination CPU could be changed while previous migration
                   1291:         * was not finished.
                   1292:         */
1.121     rmind    1293:        if (l->l_target_cpu != NULL) {
1.114     rmind    1294:                l->l_target_cpu = tci;
                   1295:                lwp_unlock(l);
                   1296:                return;
                   1297:        }
1.91      rmind    1298:
1.114     rmind    1299:        /* Nothing to do if trying to migrate to the same CPU */
                   1300:        if (l->l_cpu == tci) {
1.91      rmind    1301:                lwp_unlock(l);
                   1302:                return;
                   1303:        }
                   1304:
1.114     rmind    1305:        KASSERT(l->l_target_cpu == NULL);
                   1306:        tspc = &tci->ci_schedstate;
1.121     rmind    1307:        switch (lstat) {
1.91      rmind    1308:        case LSRUN:
1.134     rmind    1309:                l->l_target_cpu = tci;
                   1310:                break;
1.91      rmind    1311:        case LSIDL:
1.114     rmind    1312:                l->l_cpu = tci;
                   1313:                lwp_unlock_to(l, tspc->spc_mutex);
1.91      rmind    1314:                return;
                   1315:        case LSSLEEP:
1.114     rmind    1316:                l->l_cpu = tci;
1.91      rmind    1317:                break;
                   1318:        case LSSTOP:
                   1319:        case LSSUSPENDED:
1.114     rmind    1320:                l->l_cpu = tci;
                   1321:                if (l->l_wchan == NULL) {
                   1322:                        lwp_unlock_to(l, tspc->spc_lwplock);
                   1323:                        return;
1.91      rmind    1324:                }
1.114     rmind    1325:                break;
1.91      rmind    1326:        case LSONPROC:
1.114     rmind    1327:                l->l_target_cpu = tci;
                   1328:                spc_lock(l->l_cpu);
                   1329:                cpu_need_resched(l->l_cpu, RESCHED_KPREEMPT);
                   1330:                spc_unlock(l->l_cpu);
1.91      rmind    1331:                break;
                   1332:        }
                   1333:        lwp_unlock(l);
                   1334: }
                   1335:
                   1336: /*
1.94      rmind    1337:  * Find the LWP in the process.  Arguments may be zero, in such case,
                   1338:  * the calling process and first LWP in the list will be used.
1.103     ad       1339:  * On success - returns proc locked.
1.91      rmind    1340:  */
                   1341: struct lwp *
                   1342: lwp_find2(pid_t pid, lwpid_t lid)
                   1343: {
                   1344:        proc_t *p;
                   1345:        lwp_t *l;
                   1346:
1.150     rmind    1347:        /* Find the process. */
1.94      rmind    1348:        if (pid != 0) {
1.150     rmind    1349:                mutex_enter(proc_lock);
                   1350:                p = proc_find(pid);
                   1351:                if (p == NULL) {
                   1352:                        mutex_exit(proc_lock);
                   1353:                        return NULL;
                   1354:                }
                   1355:                mutex_enter(p->p_lock);
1.102     ad       1356:                mutex_exit(proc_lock);
1.150     rmind    1357:        } else {
                   1358:                p = curlwp->l_proc;
                   1359:                mutex_enter(p->p_lock);
                   1360:        }
                   1361:        /* Find the thread. */
                   1362:        if (lid != 0) {
                   1363:                l = lwp_find(p, lid);
                   1364:        } else {
                   1365:                l = LIST_FIRST(&p->p_lwps);
1.94      rmind    1366:        }
1.103     ad       1367:        if (l == NULL) {
                   1368:                mutex_exit(p->p_lock);
                   1369:        }
1.91      rmind    1370:        return l;
                   1371: }
                   1372:
                   1373: /*
1.168     yamt     1374:  * Look up a live LWP within the specified process.
1.52      ad       1375:  *
1.103     ad       1376:  * Must be called with p->p_lock held.
1.52      ad       1377:  */
                   1378: struct lwp *
1.151     chs      1379: lwp_find(struct proc *p, lwpid_t id)
1.52      ad       1380: {
                   1381:        struct lwp *l;
                   1382:
1.103     ad       1383:        KASSERT(mutex_owned(p->p_lock));
1.52      ad       1384:
                   1385:        LIST_FOREACH(l, &p->p_lwps, l_sibling) {
                   1386:                if (l->l_lid == id)
                   1387:                        break;
                   1388:        }
                   1389:
                   1390:        /*
                   1391:         * No need to lock - all of these conditions will
                   1392:         * be visible with the process level mutex held.
                   1393:         */
                   1394:        if (l != NULL && (l->l_stat == LSIDL || l->l_stat == LSZOMB))
                   1395:                l = NULL;
                   1396:
                   1397:        return l;
                   1398: }
                   1399:
                   1400: /*
1.37      ad       1401:  * Update an LWP's cached credentials to mirror the process' master copy.
                   1402:  *
                   1403:  * This happens early in the syscall path, on user trap, and on LWP
                   1404:  * creation.  A long-running LWP can also voluntarily choose to update
1.179     snj      1405:  * its credentials by calling this routine.  This may be called from
1.37      ad       1406:  * LWP_CACHE_CREDS(), which checks l->l_cred != p->p_cred beforehand.
                   1407:  */
                   1408: void
                   1409: lwp_update_creds(struct lwp *l)
                   1410: {
                   1411:        kauth_cred_t oc;
                   1412:        struct proc *p;
                   1413:
                   1414:        p = l->l_proc;
                   1415:        oc = l->l_cred;
                   1416:
1.103     ad       1417:        mutex_enter(p->p_lock);
1.37      ad       1418:        kauth_cred_hold(p->p_cred);
                   1419:        l->l_cred = p->p_cred;
1.98      ad       1420:        l->l_prflag &= ~LPR_CRMOD;
1.103     ad       1421:        mutex_exit(p->p_lock);
1.88      ad       1422:        if (oc != NULL)
1.37      ad       1423:                kauth_cred_free(oc);
1.52      ad       1424: }
                   1425:
                   1426: /*
                   1427:  * Verify that an LWP is locked, and optionally verify that the lock matches
                   1428:  * one we specify.
                   1429:  */
                   1430: int
                   1431: lwp_locked(struct lwp *l, kmutex_t *mtx)
                   1432: {
                   1433:        kmutex_t *cur = l->l_mutex;
                   1434:
                   1435:        return mutex_owned(cur) && (mtx == cur || mtx == NULL);
                   1436: }
                   1437:
                   1438: /*
                   1439:  * Lend a new mutex to an LWP.  The old mutex must be held.
                   1440:  */
                   1441: void
1.178     matt     1442: lwp_setlock(struct lwp *l, kmutex_t *mtx)
1.52      ad       1443: {
                   1444:
1.63      ad       1445:        KASSERT(mutex_owned(l->l_mutex));
1.52      ad       1446:
1.107     ad       1447:        membar_exit();
1.178     matt     1448:        l->l_mutex = mtx;
1.52      ad       1449: }
                   1450:
                   1451: /*
                   1452:  * Lend a new mutex to an LWP, and release the old mutex.  The old mutex
                   1453:  * must be held.
                   1454:  */
                   1455: void
1.178     matt     1456: lwp_unlock_to(struct lwp *l, kmutex_t *mtx)
1.52      ad       1457: {
                   1458:        kmutex_t *old;
                   1459:
1.152     rmind    1460:        KASSERT(lwp_locked(l, NULL));
1.52      ad       1461:
                   1462:        old = l->l_mutex;
1.107     ad       1463:        membar_exit();
1.178     matt     1464:        l->l_mutex = mtx;
1.52      ad       1465:        mutex_spin_exit(old);
                   1466: }
                   1467:
1.60      yamt     1468: int
                   1469: lwp_trylock(struct lwp *l)
                   1470: {
                   1471:        kmutex_t *old;
                   1472:
                   1473:        for (;;) {
                   1474:                if (!mutex_tryenter(old = l->l_mutex))
                   1475:                        return 0;
                   1476:                if (__predict_true(l->l_mutex == old))
                   1477:                        return 1;
                   1478:                mutex_spin_exit(old);
                   1479:        }
                   1480: }
                   1481:
1.134     rmind    1482: void
1.96      ad       1483: lwp_unsleep(lwp_t *l, bool cleanup)
                   1484: {
                   1485:
                   1486:        KASSERT(mutex_owned(l->l_mutex));
1.134     rmind    1487:        (*l->l_syncobj->sobj_unsleep)(l, cleanup);
1.96      ad       1488: }
                   1489:
1.52      ad       1490: /*
1.56      pavel    1491:  * Handle exceptions for mi_userret().  Called if a member of LW_USERRET is
1.52      ad       1492:  * set.
                   1493:  */
                   1494: void
                   1495: lwp_userret(struct lwp *l)
                   1496: {
                   1497:        struct proc *p;
                   1498:        int sig;
                   1499:
1.114     rmind    1500:        KASSERT(l == curlwp);
                   1501:        KASSERT(l->l_stat == LSONPROC);
1.52      ad       1502:        p = l->l_proc;
                   1503:
1.75      ad       1504: #ifndef __HAVE_FAST_SOFTINTS
                   1505:        /* Run pending soft interrupts. */
                   1506:        if (l->l_cpu->ci_data.cpu_softints != 0)
                   1507:                softint_overlay();
                   1508: #endif
                   1509:
1.52      ad       1510:        /*
1.167     rmind    1511:         * It is safe to do this read unlocked on a MP system..
1.52      ad       1512:         */
1.167     rmind    1513:        while ((l->l_flag & LW_USERRET) != 0) {
1.52      ad       1514:                /*
                   1515:                 * Process pending signals first, unless the process
1.61      ad       1516:                 * is dumping core or exiting, where we will instead
1.101     rmind    1517:                 * enter the LW_WSUSPEND case below.
1.52      ad       1518:                 */
1.61      ad       1519:                if ((l->l_flag & (LW_PENDSIG | LW_WCORE | LW_WEXIT)) ==
                   1520:                    LW_PENDSIG) {
1.103     ad       1521:                        mutex_enter(p->p_lock);
1.52      ad       1522:                        while ((sig = issignal(l)) != 0)
                   1523:                                postsig(sig);
1.103     ad       1524:                        mutex_exit(p->p_lock);
1.52      ad       1525:                }
                   1526:
                   1527:                /*
                   1528:                 * Core-dump or suspend pending.
                   1529:                 *
1.159     matt     1530:                 * In case of core dump, suspend ourselves, so that the kernel
                   1531:                 * stack and therefore the userland registers saved in the
                   1532:                 * trapframe are around for coredump() to write them out.
                   1533:                 * We also need to save any PCU resources that we have so that
                   1534:                 * they accessible for coredump().  We issue a wakeup on
                   1535:                 * p->p_lwpcv so that sigexit() will write the core file out
                   1536:                 * once all other LWPs are suspended.
1.52      ad       1537:                 */
1.56      pavel    1538:                if ((l->l_flag & LW_WSUSPEND) != 0) {
1.159     matt     1539:                        pcu_save_all(l);
1.103     ad       1540:                        mutex_enter(p->p_lock);
1.52      ad       1541:                        p->p_nrlwps--;
                   1542:                        cv_broadcast(&p->p_lwpcv);
                   1543:                        lwp_lock(l);
                   1544:                        l->l_stat = LSSUSPENDED;
1.104     ad       1545:                        lwp_unlock(l);
1.103     ad       1546:                        mutex_exit(p->p_lock);
1.104     ad       1547:                        lwp_lock(l);
1.64      yamt     1548:                        mi_switch(l);
1.52      ad       1549:                }
                   1550:
                   1551:                /* Process is exiting. */
1.56      pavel    1552:                if ((l->l_flag & LW_WEXIT) != 0) {
1.52      ad       1553:                        lwp_exit(l);
                   1554:                        KASSERT(0);
                   1555:                        /* NOTREACHED */
                   1556:                }
1.156     pooka    1557:
                   1558:                /* update lwpctl processor (for vfork child_return) */
                   1559:                if (l->l_flag & LW_LWPCTL) {
                   1560:                        lwp_lock(l);
                   1561:                        KASSERT(kpreempt_disabled());
                   1562:                        l->l_lwpctl->lc_curcpu = (int)cpu_index(l->l_cpu);
                   1563:                        l->l_lwpctl->lc_pctr++;
                   1564:                        l->l_flag &= ~LW_LWPCTL;
                   1565:                        lwp_unlock(l);
                   1566:                }
1.52      ad       1567:        }
                   1568: }
                   1569:
                   1570: /*
                   1571:  * Force an LWP to enter the kernel, to take a trip through lwp_userret().
                   1572:  */
                   1573: void
                   1574: lwp_need_userret(struct lwp *l)
                   1575: {
1.63      ad       1576:        KASSERT(lwp_locked(l, NULL));
1.52      ad       1577:
                   1578:        /*
                   1579:         * Since the tests in lwp_userret() are done unlocked, make sure
                   1580:         * that the condition will be seen before forcing the LWP to enter
                   1581:         * kernel mode.
                   1582:         */
1.81      ad       1583:        membar_producer();
1.52      ad       1584:        cpu_signotify(l);
                   1585: }
                   1586:
                   1587: /*
                   1588:  * Add one reference to an LWP.  This will prevent the LWP from
                   1589:  * exiting, thus keep the lwp structure and PCB around to inspect.
                   1590:  */
                   1591: void
                   1592: lwp_addref(struct lwp *l)
                   1593: {
                   1594:
1.103     ad       1595:        KASSERT(mutex_owned(l->l_proc->p_lock));
1.52      ad       1596:        KASSERT(l->l_stat != LSZOMB);
                   1597:        KASSERT(l->l_refcnt != 0);
                   1598:
                   1599:        l->l_refcnt++;
                   1600: }
                   1601:
                   1602: /*
                   1603:  * Remove one reference to an LWP.  If this is the last reference,
                   1604:  * then we must finalize the LWP's death.
                   1605:  */
                   1606: void
                   1607: lwp_delref(struct lwp *l)
                   1608: {
                   1609:        struct proc *p = l->l_proc;
                   1610:
1.103     ad       1611:        mutex_enter(p->p_lock);
1.142     christos 1612:        lwp_delref2(l);
                   1613:        mutex_exit(p->p_lock);
                   1614: }
                   1615:
                   1616: /*
                   1617:  * Remove one reference to an LWP.  If this is the last reference,
                   1618:  * then we must finalize the LWP's death.  The proc mutex is held
                   1619:  * on entry.
                   1620:  */
                   1621: void
                   1622: lwp_delref2(struct lwp *l)
                   1623: {
                   1624:        struct proc *p = l->l_proc;
                   1625:
                   1626:        KASSERT(mutex_owned(p->p_lock));
1.72      ad       1627:        KASSERT(l->l_stat != LSZOMB);
                   1628:        KASSERT(l->l_refcnt > 0);
1.52      ad       1629:        if (--l->l_refcnt == 0)
1.76      ad       1630:                cv_broadcast(&p->p_lwpcv);
1.52      ad       1631: }
                   1632:
                   1633: /*
                   1634:  * Drain all references to the current LWP.
                   1635:  */
                   1636: void
                   1637: lwp_drainrefs(struct lwp *l)
                   1638: {
                   1639:        struct proc *p = l->l_proc;
                   1640:
1.103     ad       1641:        KASSERT(mutex_owned(p->p_lock));
1.52      ad       1642:        KASSERT(l->l_refcnt != 0);
                   1643:
                   1644:        l->l_refcnt--;
                   1645:        while (l->l_refcnt != 0)
1.103     ad       1646:                cv_wait(&p->p_lwpcv, p->p_lock);
1.37      ad       1647: }
1.41      thorpej  1648:
                   1649: /*
1.127     ad       1650:  * Return true if the specified LWP is 'alive'.  Only p->p_lock need
                   1651:  * be held.
                   1652:  */
                   1653: bool
                   1654: lwp_alive(lwp_t *l)
                   1655: {
                   1656:
                   1657:        KASSERT(mutex_owned(l->l_proc->p_lock));
                   1658:
                   1659:        switch (l->l_stat) {
                   1660:        case LSSLEEP:
                   1661:        case LSRUN:
                   1662:        case LSONPROC:
                   1663:        case LSSTOP:
                   1664:        case LSSUSPENDED:
                   1665:                return true;
                   1666:        default:
                   1667:                return false;
                   1668:        }
                   1669: }
                   1670:
                   1671: /*
                   1672:  * Return first live LWP in the process.
                   1673:  */
                   1674: lwp_t *
                   1675: lwp_find_first(proc_t *p)
                   1676: {
                   1677:        lwp_t *l;
                   1678:
                   1679:        KASSERT(mutex_owned(p->p_lock));
                   1680:
                   1681:        LIST_FOREACH(l, &p->p_lwps, l_sibling) {
                   1682:                if (lwp_alive(l)) {
                   1683:                        return l;
                   1684:                }
                   1685:        }
                   1686:
                   1687:        return NULL;
                   1688: }
                   1689:
                   1690: /*
1.78      ad       1691:  * Allocate a new lwpctl structure for a user LWP.
                   1692:  */
                   1693: int
                   1694: lwp_ctl_alloc(vaddr_t *uaddr)
                   1695: {
                   1696:        lcproc_t *lp;
                   1697:        u_int bit, i, offset;
                   1698:        struct uvm_object *uao;
                   1699:        int error;
                   1700:        lcpage_t *lcp;
                   1701:        proc_t *p;
                   1702:        lwp_t *l;
                   1703:
                   1704:        l = curlwp;
                   1705:        p = l->l_proc;
                   1706:
1.156     pooka    1707:        /* don't allow a vforked process to create lwp ctls */
                   1708:        if (p->p_lflag & PL_PPWAIT)
                   1709:                return EBUSY;
                   1710:
1.81      ad       1711:        if (l->l_lcpage != NULL) {
                   1712:                lcp = l->l_lcpage;
                   1713:                *uaddr = lcp->lcp_uaddr + (vaddr_t)l->l_lwpctl - lcp->lcp_kaddr;
1.143     njoly    1714:                return 0;
1.81      ad       1715:        }
1.78      ad       1716:
                   1717:        /* First time around, allocate header structure for the process. */
                   1718:        if ((lp = p->p_lwpctl) == NULL) {
                   1719:                lp = kmem_alloc(sizeof(*lp), KM_SLEEP);
                   1720:                mutex_init(&lp->lp_lock, MUTEX_DEFAULT, IPL_NONE);
                   1721:                lp->lp_uao = NULL;
                   1722:                TAILQ_INIT(&lp->lp_pages);
1.103     ad       1723:                mutex_enter(p->p_lock);
1.78      ad       1724:                if (p->p_lwpctl == NULL) {
                   1725:                        p->p_lwpctl = lp;
1.103     ad       1726:                        mutex_exit(p->p_lock);
1.78      ad       1727:                } else {
1.103     ad       1728:                        mutex_exit(p->p_lock);
1.78      ad       1729:                        mutex_destroy(&lp->lp_lock);
                   1730:                        kmem_free(lp, sizeof(*lp));
                   1731:                        lp = p->p_lwpctl;
                   1732:                }
                   1733:        }
                   1734:
                   1735:        /*
                   1736:         * Set up an anonymous memory region to hold the shared pages.
                   1737:         * Map them into the process' address space.  The user vmspace
                   1738:         * gets the first reference on the UAO.
                   1739:         */
                   1740:        mutex_enter(&lp->lp_lock);
                   1741:        if (lp->lp_uao == NULL) {
                   1742:                lp->lp_uao = uao_create(LWPCTL_UAREA_SZ, 0);
                   1743:                lp->lp_cur = 0;
                   1744:                lp->lp_max = LWPCTL_UAREA_SZ;
                   1745:                lp->lp_uva = p->p_emul->e_vm_default_addr(p,
                   1746:                     (vaddr_t)p->p_vmspace->vm_daddr, LWPCTL_UAREA_SZ);
                   1747:                error = uvm_map(&p->p_vmspace->vm_map, &lp->lp_uva,
                   1748:                    LWPCTL_UAREA_SZ, lp->lp_uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
                   1749:                    UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL, 0));
                   1750:                if (error != 0) {
                   1751:                        uao_detach(lp->lp_uao);
                   1752:                        lp->lp_uao = NULL;
                   1753:                        mutex_exit(&lp->lp_lock);
                   1754:                        return error;
                   1755:                }
                   1756:        }
                   1757:
                   1758:        /* Get a free block and allocate for this LWP. */
                   1759:        TAILQ_FOREACH(lcp, &lp->lp_pages, lcp_chain) {
                   1760:                if (lcp->lcp_nfree != 0)
                   1761:                        break;
                   1762:        }
                   1763:        if (lcp == NULL) {
                   1764:                /* Nothing available - try to set up a free page. */
                   1765:                if (lp->lp_cur == lp->lp_max) {
                   1766:                        mutex_exit(&lp->lp_lock);
                   1767:                        return ENOMEM;
                   1768:                }
                   1769:                lcp = kmem_alloc(LWPCTL_LCPAGE_SZ, KM_SLEEP);
1.79      yamt     1770:                if (lcp == NULL) {
                   1771:                        mutex_exit(&lp->lp_lock);
1.78      ad       1772:                        return ENOMEM;
1.79      yamt     1773:                }
1.78      ad       1774:                /*
                   1775:                 * Wire the next page down in kernel space.  Since this
                   1776:                 * is a new mapping, we must add a reference.
                   1777:                 */
                   1778:                uao = lp->lp_uao;
                   1779:                (*uao->pgops->pgo_reference)(uao);
1.99      ad       1780:                lcp->lcp_kaddr = vm_map_min(kernel_map);
1.78      ad       1781:                error = uvm_map(kernel_map, &lcp->lcp_kaddr, PAGE_SIZE,
                   1782:                    uao, lp->lp_cur, PAGE_SIZE,
                   1783:                    UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
                   1784:                    UVM_INH_NONE, UVM_ADV_RANDOM, 0));
                   1785:                if (error != 0) {
                   1786:                        mutex_exit(&lp->lp_lock);
                   1787:                        kmem_free(lcp, LWPCTL_LCPAGE_SZ);
                   1788:                        (*uao->pgops->pgo_detach)(uao);
                   1789:                        return error;
                   1790:                }
1.89      yamt     1791:                error = uvm_map_pageable(kernel_map, lcp->lcp_kaddr,
                   1792:                    lcp->lcp_kaddr + PAGE_SIZE, FALSE, 0);
                   1793:                if (error != 0) {
                   1794:                        mutex_exit(&lp->lp_lock);
                   1795:                        uvm_unmap(kernel_map, lcp->lcp_kaddr,
                   1796:                            lcp->lcp_kaddr + PAGE_SIZE);
                   1797:                        kmem_free(lcp, LWPCTL_LCPAGE_SZ);
                   1798:                        return error;
                   1799:                }
1.78      ad       1800:                /* Prepare the page descriptor and link into the list. */
                   1801:                lcp->lcp_uaddr = lp->lp_uva + lp->lp_cur;
                   1802:                lp->lp_cur += PAGE_SIZE;
                   1803:                lcp->lcp_nfree = LWPCTL_PER_PAGE;
                   1804:                lcp->lcp_rotor = 0;
                   1805:                memset(lcp->lcp_bitmap, 0xff, LWPCTL_BITMAP_SZ);
                   1806:                TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
                   1807:        }
                   1808:        for (i = lcp->lcp_rotor; lcp->lcp_bitmap[i] == 0;) {
                   1809:                if (++i >= LWPCTL_BITMAP_ENTRIES)
                   1810:                        i = 0;
                   1811:        }
                   1812:        bit = ffs(lcp->lcp_bitmap[i]) - 1;
                   1813:        lcp->lcp_bitmap[i] ^= (1 << bit);
                   1814:        lcp->lcp_rotor = i;
                   1815:        lcp->lcp_nfree--;
                   1816:        l->l_lcpage = lcp;
                   1817:        offset = (i << 5) + bit;
                   1818:        l->l_lwpctl = (lwpctl_t *)lcp->lcp_kaddr + offset;
                   1819:        *uaddr = lcp->lcp_uaddr + offset * sizeof(lwpctl_t);
                   1820:        mutex_exit(&lp->lp_lock);
                   1821:
1.107     ad       1822:        KPREEMPT_DISABLE(l);
1.111     ad       1823:        l->l_lwpctl->lc_curcpu = (int)curcpu()->ci_data.cpu_index;
1.107     ad       1824:        KPREEMPT_ENABLE(l);
1.78      ad       1825:
                   1826:        return 0;
                   1827: }
                   1828:
                   1829: /*
                   1830:  * Free an lwpctl structure back to the per-process list.
                   1831:  */
                   1832: void
                   1833: lwp_ctl_free(lwp_t *l)
                   1834: {
1.156     pooka    1835:        struct proc *p = l->l_proc;
1.78      ad       1836:        lcproc_t *lp;
                   1837:        lcpage_t *lcp;
                   1838:        u_int map, offset;
                   1839:
1.156     pooka    1840:        /* don't free a lwp context we borrowed for vfork */
                   1841:        if (p->p_lflag & PL_PPWAIT) {
                   1842:                l->l_lwpctl = NULL;
                   1843:                return;
                   1844:        }
                   1845:
                   1846:        lp = p->p_lwpctl;
1.78      ad       1847:        KASSERT(lp != NULL);
                   1848:
                   1849:        lcp = l->l_lcpage;
                   1850:        offset = (u_int)((lwpctl_t *)l->l_lwpctl - (lwpctl_t *)lcp->lcp_kaddr);
                   1851:        KASSERT(offset < LWPCTL_PER_PAGE);
                   1852:
                   1853:        mutex_enter(&lp->lp_lock);
                   1854:        lcp->lcp_nfree++;
                   1855:        map = offset >> 5;
                   1856:        lcp->lcp_bitmap[map] |= (1 << (offset & 31));
                   1857:        if (lcp->lcp_bitmap[lcp->lcp_rotor] == 0)
                   1858:                lcp->lcp_rotor = map;
                   1859:        if (TAILQ_FIRST(&lp->lp_pages)->lcp_nfree == 0) {
                   1860:                TAILQ_REMOVE(&lp->lp_pages, lcp, lcp_chain);
                   1861:                TAILQ_INSERT_HEAD(&lp->lp_pages, lcp, lcp_chain);
                   1862:        }
                   1863:        mutex_exit(&lp->lp_lock);
                   1864: }
                   1865:
                   1866: /*
                   1867:  * Process is exiting; tear down lwpctl state.  This can only be safely
                   1868:  * called by the last LWP in the process.
                   1869:  */
                   1870: void
                   1871: lwp_ctl_exit(void)
                   1872: {
                   1873:        lcpage_t *lcp, *next;
                   1874:        lcproc_t *lp;
                   1875:        proc_t *p;
                   1876:        lwp_t *l;
                   1877:
                   1878:        l = curlwp;
                   1879:        l->l_lwpctl = NULL;
1.95      ad       1880:        l->l_lcpage = NULL;
1.78      ad       1881:        p = l->l_proc;
                   1882:        lp = p->p_lwpctl;
                   1883:
                   1884:        KASSERT(lp != NULL);
                   1885:        KASSERT(p->p_nlwps == 1);
                   1886:
                   1887:        for (lcp = TAILQ_FIRST(&lp->lp_pages); lcp != NULL; lcp = next) {
                   1888:                next = TAILQ_NEXT(lcp, lcp_chain);
                   1889:                uvm_unmap(kernel_map, lcp->lcp_kaddr,
                   1890:                    lcp->lcp_kaddr + PAGE_SIZE);
                   1891:                kmem_free(lcp, LWPCTL_LCPAGE_SZ);
                   1892:        }
                   1893:
                   1894:        if (lp->lp_uao != NULL) {
                   1895:                uvm_unmap(&p->p_vmspace->vm_map, lp->lp_uva,
                   1896:                    lp->lp_uva + LWPCTL_UAREA_SZ);
                   1897:        }
                   1898:
                   1899:        mutex_destroy(&lp->lp_lock);
                   1900:        kmem_free(lp, sizeof(*lp));
                   1901:        p->p_lwpctl = NULL;
                   1902: }
1.84      yamt     1903:
1.130     ad       1904: /*
                   1905:  * Return the current LWP's "preemption counter".  Used to detect
                   1906:  * preemption across operations that can tolerate preemption without
                   1907:  * crashing, but which may generate incorrect results if preempted.
                   1908:  */
                   1909: uint64_t
                   1910: lwp_pctr(void)
                   1911: {
                   1912:
                   1913:        return curlwp->l_ncsw;
                   1914: }
                   1915:
1.151     chs      1916: /*
                   1917:  * Set an LWP's private data pointer.
                   1918:  */
                   1919: int
                   1920: lwp_setprivate(struct lwp *l, void *ptr)
                   1921: {
                   1922:        int error = 0;
                   1923:
                   1924:        l->l_private = ptr;
                   1925: #ifdef __HAVE_CPU_LWP_SETPRIVATE
                   1926:        error = cpu_lwp_setprivate(l, ptr);
                   1927: #endif
                   1928:        return error;
                   1929: }
                   1930:
1.84      yamt     1931: #if defined(DDB)
1.153     rmind    1932: #include <machine/pcb.h>
                   1933:
1.84      yamt     1934: void
                   1935: lwp_whatis(uintptr_t addr, void (*pr)(const char *, ...))
                   1936: {
                   1937:        lwp_t *l;
                   1938:
                   1939:        LIST_FOREACH(l, &alllwp, l_list) {
                   1940:                uintptr_t stack = (uintptr_t)KSTACK_LOWEST_ADDR(l);
                   1941:
                   1942:                if (addr < stack || stack + KSTACK_SIZE <= addr) {
                   1943:                        continue;
                   1944:                }
                   1945:                (*pr)("%p is %p+%zu, LWP %p's stack\n",
                   1946:                    (void *)addr, (void *)stack,
                   1947:                    (size_t)(addr - stack), l);
                   1948:        }
                   1949: }
                   1950: #endif /* defined(DDB) */

CVSweb <webmaster@jp.NetBSD.org>