[BACK]Return to lwp.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / sys

Annotation of src/sys/sys/lwp.h, Revision 1.89.2.6

1.89.2.6! yamt        1: /*     $NetBSD: lwp.h,v 1.89.2.5 2010/03/11 15:04:42 yamt Exp $        */
1.2       thorpej     2:
                      3: /*-
1.89.2.6! yamt        4:  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010
        !             5:  *    The NetBSD Foundation, Inc.
1.2       thorpej     6:  * All rights reserved.
                      7:  *
                      8:  * This code is derived from software contributed to The NetBSD Foundation
1.47      ad          9:  * by Nathan J. Williams and Andrew Doran.
1.2       thorpej    10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  *
                     20:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     21:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     22:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     23:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     24:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     25:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     26:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     27:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     28:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     29:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     30:  * POSSIBILITY OF SUCH DAMAGE.
                     31:  */
                     32:
1.31      christos   33: #ifndef _SYS_LWP_H_
                     34: #define _SYS_LWP_H_
1.2       thorpej    35:
1.89.2.6! yamt       36: #include <sys/param.h>
1.47      ad         37: #include <sys/time.h>
                     38: #include <sys/queue.h>
                     39: #include <sys/callout.h>
                     40: #include <sys/mutex.h>
                     41: #include <sys/condvar.h>
                     42: #include <sys/signalvar.h>
1.78      rmind      43: #include <sys/sched.h>
1.47      ad         44: #include <sys/specificdata.h>
1.53      yamt       45: #include <sys/syncobj.h>
1.85      ad         46: #include <sys/resource.h>
1.2       thorpej    47:
                     48: #if defined(_KERNEL)
                     49: #include <machine/cpu.h>               /* curcpu() and cpu_info */
                     50: #endif
1.47      ad         51:
1.2       thorpej    52: #include <machine/proc.h>              /* Machine-dependent proc substruct. */
                     53:
1.47      ad         54: /*
                     55:  * Lightweight process.  Field markings and the corresponding locks:
                     56:  *
1.89.2.5  yamt       57:  * a:  proclist_lock
1.58      ad         58:  * c:  condition variable interlock, passed to cv_wait()
1.47      ad         59:  * l:  *l_mutex
1.88      ad         60:  * p:  l_proc->p_lock
1.60      yamt       61:  * s:  spc_mutex, which may or may not be referenced by l_mutex
1.89.2.6! yamt       62:  * S:  l_selcluster->sc_lock
1.47      ad         63:  * (:  unlocked, stable
1.68      ad         64:  * !:  unlocked, may only be reliably accessed by the LWP itself
1.47      ad         65:  * ?:  undecided
                     66:  *
                     67:  * Fields are clustered together by usage (to increase the likelyhood
                     68:  * of cache hits) and by size (to reduce dead space in the structure).
                     69:  */
1.89.2.1  yamt       70: struct lockdebug;
1.89.2.2  yamt       71: struct sadata_vp;
                     72: struct sysent;
1.89.2.1  yamt       73:
1.60      yamt       74: struct lwp {
1.47      ad         75:        /* Scheduling and overall state */
1.60      yamt       76:        TAILQ_ENTRY(lwp) l_runq;        /* s: run queue */
1.89.2.2  yamt       77:        union {
                     78:                void *  info;           /* s: scheduler-specific structure */
                     79:                u_int   timeslice;      /* l: time-quantum for SCHED_M2 */
                     80:        } l_sched;
1.47      ad         81:        struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
                     82:        kmutex_t * volatile l_mutex;    /* l: ptr to mutex on sched state */
1.70      ad         83:        int             l_ctxswtch;     /* l: performing a context switch */
1.89.2.5  yamt       84:        struct user     *l_addr;        /* l: PCB address; use lwp_getpcb() */
1.47      ad         85:        struct mdlwp    l_md;           /* l: machine-dependent fields. */
                     86:        int             l_flag;         /* l: misc flag values */
                     87:        int             l_stat;         /* l: overall LWP status */
1.73      yamt       88:        struct bintime  l_rtime;        /* l: real time */
                     89:        struct bintime  l_stime;        /* l: start time (while ONPROC) */
1.47      ad         90:        u_int           l_swtime;       /* l: time swapped in or out */
1.89.2.5  yamt       91:        u_int           _reserved1;
1.86      ad         92:        u_int           l_rticks;       /* l: Saved start time of run */
                     93:        u_int           l_rticksum;     /* l: Sum of ticks spent running */
1.89.2.2  yamt       94:        u_int           l_slpticks;     /* l: Saved start time of sleep */
                     95:        u_int           l_slpticksum;   /* l: Sum of ticks spent sleeping */
1.47      ad         96:        int             l_biglocks;     /* l: biglock count before sleep */
1.68      ad         97:        int             l_class;        /* l: scheduling class */
                     98:        int             l_kpriority;    /* !: has kernel priority boost */
1.71      ad         99:        pri_t           l_kpribase;     /* !: kernel priority base level */
1.68      ad        100:        pri_t           l_priority;     /* l: scheduler priority */
1.54      yamt      101:        pri_t           l_inheritedprio;/* l: inherited priority */
1.52      yamt      102:        SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
1.60      yamt      103:        uint64_t        l_ncsw;         /* l: total context switches */
                    104:        uint64_t        l_nivcsw;       /* l: involuntary context switches */
1.89.2.2  yamt      105:        u_int           l_cpticks;      /* (: Ticks of CPU time */
                    106:        fixpt_t         l_pctcpu;       /* p: %cpu during l_swtime */
1.68      ad        107:        fixpt_t         l_estcpu;       /* l: cpu time for SCHED_4BSD */
1.77      rmind     108:        psetid_t        l_psid;         /* l: assigned processor-set ID */
                    109:        struct cpu_info *l_target_cpu;  /* l: target CPU to migrate */
1.69      ad        110:        struct lwpctl   *l_lwpctl;      /* p: lwpctl block kernel address */
                    111:        struct lcpage   *l_lcpage;      /* p: lwpctl containing page */
1.89.2.2  yamt      112:        kcpuset_t       *l_affinity;    /* l: CPU set for affinity */
                    113:        struct sadata_vp *l_savp;       /* p: SA "virtual processor" */
1.47      ad        114:
                    115:        /* Synchronisation */
                    116:        struct turnstile *l_ts;         /* l: current turnstile */
                    117:        struct syncobj  *l_syncobj;     /* l: sync object operations set */
                    118:        TAILQ_ENTRY(lwp) l_sleepchain;  /* l: sleep queue */
                    119:        wchan_t         l_wchan;        /* l: sleep address */
                    120:        const char      *l_wmesg;       /* l: reason for sleep */
                    121:        struct sleepq   *l_sleepq;      /* l: current sleep queue */
                    122:        int             l_sleeperr;     /* !: error before unblock */
                    123:        u_int           l_slptime;      /* l: time since last blocked */
1.68      ad        124:        callout_t       l_timeout_ch;   /* !: callout for tsleep */
1.89.2.4  yamt      125:        u_int           l_emap_gen;     /* !: emap generation number */
1.47      ad        126:
1.61      ad        127:        /* Process level and global state, misc. */
1.47      ad        128:        LIST_ENTRY(lwp) l_list;         /* a: entry on list of all LWPs */
                    129:        void            *l_ctxlink;     /* p: uc_link {get,set}context */
                    130:        struct proc     *l_proc;        /* p: parent process */
                    131:        LIST_ENTRY(lwp) l_sibling;      /* p: entry on proc's list of LWPs */
1.57      ad        132:        lwpid_t         l_waiter;       /* p: first LWP waiting on us */
                    133:        lwpid_t         l_waitingfor;   /* p: specific LWP we are waiting on */
1.47      ad        134:        int             l_prflag;       /* p: process level flags */
                    135:        u_int           l_refcnt;       /* p: reference count on this LWP */
                    136:        lwpid_t         l_lid;          /* (: LWP identifier; local to proc */
1.61      ad        137:        char            *l_name;        /* (: name, optional */
1.47      ad        138:
1.89.2.6! yamt      139:        /* State of select() or poll() */
        !           140:        int             l_selflag;      /* S: polling state flags */
        !           141:        SLIST_HEAD(,selinfo) l_selwait; /* S: descriptors waited on */
        !           142:        int             l_selret;       /* S: return value of select/poll */
        !           143:        uintptr_t       l_selrec;       /* (: argument for selrecord() */
        !           144:        struct selcluster *l_selcluster;/* (: associated cluster data */
        !           145:        void *          l_selbits;      /* (: select() bit-field */
        !           146:        size_t          l_selni;        /* (: size of a single bit-field */
        !           147:
1.47      ad        148:        /* Signals */
                    149:        int             l_sigrestore;   /* p: need to restore old sig mask */
                    150:        sigset_t        l_sigwaitset;   /* p: signals being waited for */
                    151:        kcondvar_t      l_sigcv;        /* p: for sigsuspend() */
                    152:        struct ksiginfo *l_sigwaited;   /* p: delivered signals from set */
                    153:        sigpend_t       *l_sigpendset;  /* p: XXX issignal()/postsig() baton */
                    154:        LIST_ENTRY(lwp) l_sigwaiter;    /* p: chain on list of waiting LWPs */
                    155:        stack_t         l_sigstk;       /* p: sp & on stack state variable */
                    156:        sigset_t        l_sigmask;      /* p: signal mask */
                    157:        sigpend_t       l_sigpend;      /* p: signals to this LWP */
                    158:        sigset_t        l_sigoldmask;   /* p: mask for sigpause */
1.2       thorpej   159:
1.47      ad        160:        /* Private data */
1.42      thorpej   161:        specificdata_reference
1.47      ad        162:                l_specdataref;          /* !: subsystem lwp-specific data */
1.89.2.2  yamt      163:        struct timespec l_ktrcsw;       /* !: for ktrace CSW trace XXX */
1.47      ad        164:        void            *l_private;     /* !: svr4-style lwp-private data */
1.62      ad        165:        struct lwp      *l_switchto;    /* !: mi_switch: switch to this LWP */
1.47      ad        166:        struct kauth_cred *l_cred;      /* !: cached credentials */
1.81      ad        167:        struct filedesc *l_fd;          /* !: cached copy of proc::p_fd */
1.47      ad        168:        void            *l_emuldata;    /* !: kernel lwp-private data */
1.61      ad        169:        u_int           l_cv_signalled; /* c: restarted by cv_signal() */
1.47      ad        170:        u_short         l_shlocks;      /* !: lockdebug: shared locks held */
                    171:        u_short         l_exlocks;      /* !: lockdebug: excl. locks held */
1.89.2.1  yamt      172:        u_short         l_unused;       /* !: unused */
1.61      ad        173:        u_short         l_blcnt;        /* !: count of kernel_lock held */
1.89.2.1  yamt      174:        int             l_nopreempt;    /* !: don't preempt me! */
                    175:        u_int           l_dopreempt;    /* s: kernel preemption pending */
1.47      ad        176:        int             l_pflag;        /* !: LWP private flags */
                    177:        int             l_dupfd;        /* !: side return from cloning devs XXX */
1.89.2.2  yamt      178:        const struct sysent * volatile l_sysent;/* !: currently active syscall */
1.85      ad        179:        struct rusage   l_ru;           /* !: accounting information */
1.89.2.1  yamt      180:        uint64_t        l_pfailtime;    /* !: for kernel preemption */
                    181:        uintptr_t       l_pfailaddr;    /* !: for kernel preemption */
                    182:        uintptr_t       l_pfaillock;    /* !: for kernel preemption */
                    183:        _TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */
1.89.2.3  yamt      184:        int             l_tcgen;        /* !: for timecounter removal */
                    185:        int             l_unused2;      /* !: for future use */
1.50      dsl       186:
                    187:        /* These are only used by 'options SYSCALL_TIMES' */
                    188:        uint32_t        l_syscall_time; /* !: time epoch for current syscall */
                    189:        uint64_t        *l_syscall_counter; /* !: counter for current process */
1.89.2.5  yamt      190:
                    191:        struct kdtrace_thread *l_dtrace; /* ?: DTrace-specific data. */
1.60      yamt      192: };
1.2       thorpej   193:
1.89.2.3  yamt      194: /*
                    195:  * USER_TO_UAREA/UAREA_TO_USER: macros to convert between
                    196:  * the lowest address of the uarea (UAREA) and lwp::l_addr (USER).
                    197:  *
                    198:  * the default is just a cast.  MD code can modify it by defining
                    199:  * either these macros or UAREA_USER_OFFSET in <machine/proc.h>.
                    200:  */
                    201:
1.37      yamt      202: #if !defined(USER_TO_UAREA)
                    203: #if !defined(UAREA_USER_OFFSET)
                    204: #define        UAREA_USER_OFFSET       0
                    205: #endif /* !defined(UAREA_USER_OFFSET) */
                    206: #define        USER_TO_UAREA(user)     ((vaddr_t)(user) - UAREA_USER_OFFSET)
                    207: #define        UAREA_TO_USER(uarea)    ((struct user *)((uarea) + UAREA_USER_OFFSET))
                    208: #endif /* !defined(UAREA_TO_USER) */
                    209:
1.89.2.5  yamt      210: static __inline void *
                    211: lwp_getpcb(struct lwp *l)
                    212: {
                    213:
                    214:        return &l->l_addr->u_pcb;
                    215: }
                    216:
1.2       thorpej   217: LIST_HEAD(lwplist, lwp);               /* a list of LWPs */
                    218:
1.29      christos  219: #ifdef _KERNEL
1.2       thorpej   220: extern struct lwplist alllwp;          /* List of all LWPs. */
1.58      ad        221: extern lwp_t lwp0;                     /* LWP for proc0 */
1.29      christos  222: #endif
1.2       thorpej   223:
1.47      ad        224: /* These flags are kept in l_flag. */
1.60      yamt      225: #define        LW_IDLE         0x00000001 /* Idle lwp. */
1.49      pavel     226: #define        LW_SINTR        0x00000080 /* Sleep is interruptible. */
1.89.2.2  yamt      227: #define        LW_SA_SWITCHING 0x00000100 /* SA LWP in context switch */
1.49      pavel     228: #define        LW_SYSTEM       0x00000200 /* Kernel thread */
1.89.2.2  yamt      229: #define        LW_SA           0x00000400 /* Scheduler activations LWP */
1.49      pavel     230: #define        LW_WSUSPEND     0x00020000 /* Suspend before return to user */
1.89.2.2  yamt      231: #define        LW_BATCH        0x00040000 /* LWP tends to hog CPU */
1.49      pavel     232: #define        LW_WCORE        0x00080000 /* Stop for core dump on return to user */
                    233: #define        LW_WEXIT        0x00100000 /* Exit before return to user */
1.77      rmind     234: #define        LW_AFFINITY     0x00200000 /* Affinity is assigned to the thread */
1.89.2.2  yamt      235: #define        LW_SA_UPCALL    0x00400000 /* SA upcall is pending */
                    236: #define        LW_SA_BLOCKING  0x00800000 /* Blocking in tsleep() */
1.49      pavel     237: #define        LW_PENDSIG      0x01000000 /* Pending signal for us */
                    238: #define        LW_CANCELLED    0x02000000 /* tsleep should not sleep */
                    239: #define        LW_WREBOOT      0x08000000 /* System is rebooting, please suspend */
1.60      yamt      240: #define        LW_UNPARKED     0x10000000 /* Unpark op pending */
1.89.2.2  yamt      241: #define        LW_SA_YIELD     0x40000000 /* LWP on VP is yielding */
                    242: #define        LW_SA_IDLE      0x80000000 /* VP is idle */
1.47      ad        243:
                    244: /* The second set of flags is kept in l_pflag. */
                    245: #define        LP_KTRACTIVE    0x00000001 /* Executing ktrace operation */
                    246: #define        LP_KTRCSW       0x00000002 /* ktrace context switch marker */
                    247: #define        LP_KTRCSWUSER   0x00000004 /* ktrace context switch marker */
1.89.2.6! yamt      248: #define        LP_PIDLID       0x00000008 /* free LID from PID space on exit */
1.47      ad        249: #define        LP_OWEUPC       0x00000010 /* Owe user profiling tick */
1.61      ad        250: #define        LP_MPSAFE       0x00000020 /* Starts life without kernel_lock */
1.68      ad        251: #define        LP_INTR         0x00000040 /* Soft interrupt handler */
1.76      ad        252: #define        LP_SYSCTLWRITE  0x00000080 /* sysctl write lock held */
1.89.2.2  yamt      253: #define        LP_SA_PAGEFAULT 0x00000200 /* SA LWP in pagefault handler */
                    254: #define        LP_SA_NOBLOCK   0x00000400 /* SA don't upcall on block */
                    255: #define        LP_TIMEINTR     0x00010000 /* Time this soft interrupt */
                    256: #define        LP_RUNNING      0x20000000 /* Active on a CPU */
1.87      ad        257: #define        LP_BOUND        0x80000000 /* Bound to a CPU */
1.47      ad        258:
                    259: /* The third set is kept in l_prflag. */
                    260: #define        LPR_DETACHED    0x00800000 /* Won't be waited for. */
1.82      ad        261: #define        LPR_CRMOD       0x00000100 /* Credentials modified */
1.47      ad        262:
                    263: /*
                    264:  * Mask indicating that there is "exceptional" work to be done on return to
                    265:  * user.
                    266:  */
1.49      pavel     267: #define        LW_USERRET (LW_WEXIT|LW_PENDSIG|LW_WREBOOT|LW_WSUSPEND|LW_WCORE|\
1.89.2.6! yamt      268:                    LW_SA_BLOCKING|LW_SA_UPCALL)
1.2       thorpej   269:
                    270: /*
                    271:  * Status values.
                    272:  *
                    273:  * A note about SRUN and SONPROC: SRUN indicates that a process is
                    274:  * runnable but *not* yet running, i.e. is on a run queue.  SONPROC
                    275:  * indicates that the process is actually executing on a CPU, i.e.
                    276:  * it is no longer on a run queue.
                    277:  */
1.47      ad        278: #define        LSIDL           1       /* Process being created by fork. */
                    279: #define        LSRUN           2       /* Currently runnable. */
                    280: #define        LSSLEEP         3       /* Sleeping on an address. */
                    281: #define        LSSTOP          4       /* Process debugging or suspension. */
                    282: #define        LSZOMB          5       /* Awaiting collection by parent. */
1.49      pavel     283: /* unused, for source compatibility with NetBSD 4.0 and earlier. */
                    284: #define        LSDEAD          6       /* Process is almost a zombie. */
1.2       thorpej   285: #define        LSONPROC        7       /* Process is currently on a CPU. */
                    286: #define        LSSUSPENDED     8       /* Not running, not signalable. */
                    287:
                    288: #ifdef _KERNEL
1.38      ad        289: #define        LWP_CACHE_CREDS(l, p)                                           \
                    290: do {                                                                   \
1.82      ad        291:        (void)p;                                                        \
                    292:        if (__predict_false((l)->l_prflag & LPR_CRMOD))                 \
1.38      ad        293:                lwp_update_creds(l);                                    \
                    294: } while (/* CONSTCOND */ 0)
1.2       thorpej   295:
1.60      yamt      296: void   lwp_startup(lwp_t *, lwp_t *);
1.2       thorpej   297:
1.58      ad        298: int    lwp_locked(lwp_t *, kmutex_t *);
                    299: void   lwp_setlock(lwp_t *, kmutex_t *);
                    300: void   lwp_unlock_to(lwp_t *, kmutex_t *);
1.89.2.2  yamt      301: kmutex_t *lwp_lock_retry(lwp_t *, kmutex_t *);
1.58      ad        302: void   lwp_relock(lwp_t *, kmutex_t *);
                    303: int    lwp_trylock(lwp_t *);
                    304: void   lwp_addref(lwp_t *);
                    305: void   lwp_delref(lwp_t *);
1.89.2.6! yamt      306: void   lwp_delref2(lwp_t *);
1.58      ad        307: void   lwp_drainrefs(lwp_t *);
1.89.2.2  yamt      308: bool   lwp_alive(lwp_t *);
                    309: lwp_t  *lwp_find_first(proc_t *);
1.2       thorpej   310:
                    311: /* Flags for _lwp_wait1 */
                    312: #define LWPWAIT_EXITCONTROL    0x00000001
1.42      thorpej   313: void   lwpinit(void);
1.89.2.6! yamt      314: void   lwp0_init(void);
1.58      ad        315: int    lwp_wait1(lwp_t *, lwpid_t, lwpid_t *, int);
                    316: void   lwp_continue(lwp_t *);
1.89.2.6! yamt      317: void   lwp_unstop(lwp_t *);
1.58      ad        318: void   cpu_setfunc(lwp_t *, void (*)(void *), void *);
1.2       thorpej   319: void   startlwp(void *);
1.58      ad        320: void   upcallret(lwp_t *);
1.89.2.6! yamt      321: void   lwp_exit(lwp_t *);
1.89.2.2  yamt      322: void   lwp_exit_switchaway(lwp_t *) __dead;
1.58      ad        323: int    lwp_suspend(lwp_t *, lwp_t *);
                    324: int    lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
                    325: void   lwp_update_creds(lwp_t *);
1.77      rmind     326: void   lwp_migrate(lwp_t *, struct cpu_info *);
                    327: lwp_t *lwp_find2(pid_t, lwpid_t);
                    328: lwp_t *lwp_find(proc_t *, int);
1.58      ad        329: void   lwp_userret(lwp_t *);
                    330: void   lwp_need_userret(lwp_t *);
                    331: void   lwp_free(lwp_t *, bool, bool);
1.47      ad        332: void   lwp_sys_init(void);
1.89.2.5  yamt      333: void   lwp_unsleep(lwp_t *, bool);
1.89.2.3  yamt      334: uint64_t lwp_pctr(void);
1.89.2.6! yamt      335: int    lwp_setprivate(lwp_t *, void *);
1.42      thorpej   336:
1.89.2.6! yamt      337: void   lwpinit_specificdata(void);
1.42      thorpej   338: int    lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
                    339: void   lwp_specific_key_delete(specificdata_key_t);
1.58      ad        340: void   lwp_initspecific(lwp_t *);
                    341: void   lwp_finispecific(lwp_t *);
                    342: void   *lwp_getspecific(specificdata_key_t);
1.46      hannken   343: #if defined(_LWP_API_PRIVATE)
1.58      ad        344: void   *_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t);
1.46      hannken   345: #endif
1.44      thorpej   346: void   lwp_setspecific(specificdata_key_t, void *);
1.47      ad        347:
1.63      ad        348: /* Syscalls */
                    349: int    lwp_park(struct timespec *, const void *);
                    350: int    lwp_unpark(lwpid_t, const void *);
                    351:
1.72      yamt      352: /* ddb */
                    353: void lwp_whatis(uintptr_t, void (*)(const char *, ...));
                    354:
1.63      ad        355:
1.47      ad        356: /*
1.89.2.2  yamt      357:  * Lock an LWP. XXX _MODULE
1.47      ad        358:  */
                    359: static inline void
1.58      ad        360: lwp_lock(lwp_t *l)
1.47      ad        361: {
                    362:        kmutex_t *old;
                    363:
                    364:        mutex_spin_enter(old = l->l_mutex);
                    365:
                    366:        /*
                    367:         * mutex_enter() will have posted a read barrier.  Re-test
                    368:         * l->l_mutex.  If it has changed, we need to try again.
                    369:         */
                    370:        if (__predict_false(l->l_mutex != old))
                    371:                lwp_lock_retry(l, old);
                    372: }
                    373:
                    374: /*
1.89.2.2  yamt      375:  * Unlock an LWP. XXX _MODULE
1.47      ad        376:  */
                    377: static inline void
1.58      ad        378: lwp_unlock(lwp_t *l)
1.47      ad        379: {
                    380:        mutex_spin_exit(l->l_mutex);
                    381: }
                    382:
                    383: static inline void
1.58      ad        384: lwp_changepri(lwp_t *l, pri_t pri)
1.47      ad        385: {
1.60      yamt      386:        KASSERT(mutex_owned(l->l_mutex));
1.47      ad        387:
                    388:        (*l->l_syncobj->sobj_changepri)(l, pri);
                    389: }
                    390:
                    391: static inline void
1.58      ad        392: lwp_lendpri(lwp_t *l, pri_t pri)
1.52      yamt      393: {
1.60      yamt      394:        KASSERT(mutex_owned(l->l_mutex));
1.52      yamt      395:
                    396:        if (l->l_inheritedprio == pri)
                    397:                return;
                    398:
                    399:        (*l->l_syncobj->sobj_lendpri)(l, pri);
                    400: }
                    401:
1.68      ad        402: static inline pri_t
1.58      ad        403: lwp_eprio(lwp_t *l)
1.52      yamt      404: {
1.68      ad        405:        pri_t pri;
1.52      yamt      406:
1.68      ad        407:        pri = l->l_priority;
                    408:        if (l->l_kpriority && pri < PRI_KERNEL)
1.71      ad        409:                pri = (pri >> 1) + l->l_kpribase;
1.68      ad        410:        return MAX(l->l_inheritedprio, pri);
1.52      yamt      411: }
                    412:
1.89.2.5  yamt      413: int lwp_create(lwp_t *, struct proc *, vaddr_t, int,
1.68      ad        414:     void *, size_t, void (*)(void *), void *, lwp_t **, int);
1.47      ad        415:
                    416: /*
1.89.2.2  yamt      417:  * XXX _MODULE
                    418:  * We should provide real stubs for the below that modules can use.
1.47      ad        419:  */
                    420:
                    421: static inline void
1.60      yamt      422: spc_lock(struct cpu_info *ci)
1.47      ad        423: {
1.60      yamt      424:        mutex_spin_enter(ci->ci_schedstate.spc_mutex);
1.47      ad        425: }
                    426:
                    427: static inline void
1.60      yamt      428: spc_unlock(struct cpu_info *ci)
1.47      ad        429: {
1.60      yamt      430:        mutex_spin_exit(ci->ci_schedstate.spc_mutex);
1.47      ad        431: }
                    432:
1.77      rmind     433: static inline void
                    434: spc_dlock(struct cpu_info *ci1, struct cpu_info *ci2)
                    435: {
                    436:        struct schedstate_percpu *spc1 = &ci1->ci_schedstate;
                    437:        struct schedstate_percpu *spc2 = &ci2->ci_schedstate;
                    438:
                    439:        KASSERT(ci1 != ci2);
1.86      ad        440:        if (ci1 < ci2) {
1.77      rmind     441:                mutex_spin_enter(spc1->spc_mutex);
                    442:                mutex_spin_enter(spc2->spc_mutex);
                    443:        } else {
                    444:                mutex_spin_enter(spc2->spc_mutex);
                    445:                mutex_spin_enter(spc1->spc_mutex);
                    446:        }
                    447: }
                    448:
1.89      ad        449: /*
1.89.2.1  yamt      450:  * Allow machine-dependent code to override curlwp in <machine/cpu.h> for
                    451:  * its own convenience.  Otherwise, we declare it as appropriate.
                    452:  */
                    453: #if !defined(curlwp)
                    454: #if defined(MULTIPROCESSOR)
                    455: #define        curlwp          curcpu()->ci_curlwp     /* Current running LWP */
                    456: #else
                    457: extern struct lwp      *curlwp;                /* Current running LWP */
                    458: #endif /* MULTIPROCESSOR */
                    459: #endif /* ! curlwp */
                    460: #define        curproc         (curlwp->l_proc)
                    461:
                    462: static inline bool
                    463: CURCPU_IDLE_P(void)
                    464: {
                    465:        struct cpu_info *ci = curcpu();
                    466:        return ci->ci_data.cpu_onproc == ci->ci_data.cpu_idlelwp;
                    467: }
                    468:
                    469: /*
1.89      ad        470:  * Disable and re-enable preemption.  Only for low-level kernel
1.89.2.2  yamt      471:  * use.  Device drivers and anything that could potentially be
                    472:  * compiled as a module should use kpreempt_disable() and
1.89      ad        473:  * kpreempt_enable().
                    474:  */
                    475: static inline void
1.89.2.1  yamt      476: KPREEMPT_DISABLE(lwp_t *l)
1.89      ad        477: {
                    478:
1.89.2.1  yamt      479:        KASSERT(l == curlwp);
                    480:        l->l_nopreempt++;
1.89      ad        481:        __insn_barrier();
                    482: }
                    483:
                    484: static inline void
1.89.2.1  yamt      485: KPREEMPT_ENABLE(lwp_t *l)
1.89      ad        486: {
                    487:
1.89.2.1  yamt      488:        KASSERT(l == curlwp);
                    489:        KASSERT(l->l_nopreempt > 0);
                    490:        __insn_barrier();
                    491:        if (--l->l_nopreempt != 0)
                    492:                return;
                    493:        __insn_barrier();
                    494:        if (__predict_false(l->l_dopreempt))
                    495:                kpreempt(0);
1.89      ad        496:        __insn_barrier();
                    497: }
                    498:
1.89.2.1  yamt      499: /* For lwp::l_dopreempt */
                    500: #define        DOPREEMPT_ACTIVE        0x01
                    501: #define        DOPREEMPT_COUNTED       0x02
                    502:
1.60      yamt      503: #endif /* _KERNEL */
1.2       thorpej   504:
                    505: /* Flags for _lwp_create(), as per Solaris. */
                    506: #define LWP_DETACHED    0x00000040
                    507: #define LWP_SUSPENDED   0x00000080
1.89.2.6! yamt      508:
        !           509: /* Kernel-internal flags for LWP creation. */
        !           510: #define        LWP_PIDLID      0x40000000
1.89.2.2  yamt      511: #define        LWP_VFORK       0x80000000
1.2       thorpej   512:
                    513: #endif /* !_SYS_LWP_H_ */

CVSweb <webmaster@jp.NetBSD.org>