[BACK]Return to lwp.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / sys

Annotation of src/sys/sys/lwp.h, Revision 1.151

1.151   ! rmind       1: /*     $NetBSD: lwp.h,v 1.150 2011/03/08 12:39:29 pooka Exp $  */
1.2       thorpej     2:
                      3: /*-
1.132     ad          4:  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010
                      5:  *    The NetBSD Foundation, Inc.
1.2       thorpej     6:  * All rights reserved.
                      7:  *
                      8:  * This code is derived from software contributed to The NetBSD Foundation
1.47      ad          9:  * by Nathan J. Williams and Andrew Doran.
1.2       thorpej    10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  *
                     20:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     21:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     22:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     23:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     24:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     25:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     26:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     27:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     28:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     29:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     30:  * POSSIBILITY OF SUCH DAMAGE.
                     31:  */
                     32:
1.31      christos   33: #ifndef _SYS_LWP_H_
                     34: #define _SYS_LWP_H_
1.2       thorpej    35:
1.133     dholland   36: #include <sys/param.h>
1.47      ad         37: #include <sys/time.h>
                     38: #include <sys/queue.h>
                     39: #include <sys/callout.h>
                     40: #include <sys/mutex.h>
                     41: #include <sys/condvar.h>
                     42: #include <sys/signalvar.h>
1.78      rmind      43: #include <sys/sched.h>
1.47      ad         44: #include <sys/specificdata.h>
1.53      yamt       45: #include <sys/syncobj.h>
1.85      ad         46: #include <sys/resource.h>
1.2       thorpej    47:
                     48: #if defined(_KERNEL)
                     49: #include <machine/cpu.h>               /* curcpu() and cpu_info */
                     50: #endif
1.47      ad         51:
1.2       thorpej    52: #include <machine/proc.h>              /* Machine-dependent proc substruct. */
                     53:
1.47      ad         54: /*
                     55:  * Lightweight process.  Field markings and the corresponding locks:
                     56:  *
1.122     rmind      57:  * a:  proclist_lock
1.58      ad         58:  * c:  condition variable interlock, passed to cv_wait()
1.47      ad         59:  * l:  *l_mutex
1.88      ad         60:  * p:  l_proc->p_lock
1.60      yamt       61:  * s:  spc_mutex, which may or may not be referenced by l_mutex
1.132     ad         62:  * S:  l_selcluster->sc_lock
1.47      ad         63:  * (:  unlocked, stable
1.68      ad         64:  * !:  unlocked, may only be reliably accessed by the LWP itself
1.47      ad         65:  *
                     66:  * Fields are clustered together by usage (to increase the likelyhood
                     67:  * of cache hits) and by size (to reduce dead space in the structure).
                     68:  */
1.144     matt       69: #if defined(_KERNEL) || defined(_KMEMUSER)
1.151   ! rmind      70:
        !            71: #include <sys/pcu.h>
        !            72:
1.94      ad         73: struct lockdebug;
1.110     wrstuden   74: struct sadata_vp;
1.113     ad         75: struct sysent;
1.94      ad         76:
1.60      yamt       77: struct lwp {
1.141     rmind      78:        /* Scheduling and overall state. */
1.60      yamt       79:        TAILQ_ENTRY(lwp) l_runq;        /* s: run queue */
1.108     rmind      80:        union {
                     81:                void *  info;           /* s: scheduler-specific structure */
                     82:                u_int   timeslice;      /* l: time-quantum for SCHED_M2 */
                     83:        } l_sched;
1.47      ad         84:        struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
                     85:        kmutex_t * volatile l_mutex;    /* l: ptr to mutex on sched state */
1.70      ad         86:        int             l_ctxswtch;     /* l: performing a context switch */
1.140     rmind      87:        void            *l_addr;        /* l: PCB address; use lwp_getpcb() */
1.47      ad         88:        struct mdlwp    l_md;           /* l: machine-dependent fields. */
                     89:        int             l_flag;         /* l: misc flag values */
                     90:        int             l_stat;         /* l: overall LWP status */
1.73      yamt       91:        struct bintime  l_rtime;        /* l: real time */
                     92:        struct bintime  l_stime;        /* l: start time (while ONPROC) */
1.47      ad         93:        u_int           l_swtime;       /* l: time swapped in or out */
1.121     rmind      94:        u_int           _reserved1;
1.86      ad         95:        u_int           l_rticks;       /* l: Saved start time of run */
                     96:        u_int           l_rticksum;     /* l: Sum of ticks spent running */
1.96      rmind      97:        u_int           l_slpticks;     /* l: Saved start time of sleep */
                     98:        u_int           l_slpticksum;   /* l: Sum of ticks spent sleeping */
1.47      ad         99:        int             l_biglocks;     /* l: biglock count before sleep */
1.68      ad        100:        int             l_class;        /* l: scheduling class */
                    101:        int             l_kpriority;    /* !: has kernel priority boost */
1.71      ad        102:        pri_t           l_kpribase;     /* !: kernel priority base level */
1.68      ad        103:        pri_t           l_priority;     /* l: scheduler priority */
1.54      yamt      104:        pri_t           l_inheritedprio;/* l: inherited priority */
1.52      yamt      105:        SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
1.60      yamt      106:        uint64_t        l_ncsw;         /* l: total context switches */
                    107:        uint64_t        l_nivcsw;       /* l: involuntary context switches */
1.103     rmind     108:        u_int           l_cpticks;      /* (: Ticks of CPU time */
                    109:        fixpt_t         l_pctcpu;       /* p: %cpu during l_swtime */
1.68      ad        110:        fixpt_t         l_estcpu;       /* l: cpu time for SCHED_4BSD */
1.77      rmind     111:        psetid_t        l_psid;         /* l: assigned processor-set ID */
                    112:        struct cpu_info *l_target_cpu;  /* l: target CPU to migrate */
1.69      ad        113:        struct lwpctl   *l_lwpctl;      /* p: lwpctl block kernel address */
                    114:        struct lcpage   *l_lcpage;      /* p: lwpctl containing page */
1.102     christos  115:        kcpuset_t       *l_affinity;    /* l: CPU set for affinity */
1.109     wrstuden  116:        struct sadata_vp *l_savp;       /* p: SA "virtual processor" */
1.47      ad        117:
1.141     rmind     118:        /* Synchronisation. */
1.47      ad        119:        struct turnstile *l_ts;         /* l: current turnstile */
                    120:        struct syncobj  *l_syncobj;     /* l: sync object operations set */
                    121:        TAILQ_ENTRY(lwp) l_sleepchain;  /* l: sleep queue */
                    122:        wchan_t         l_wchan;        /* l: sleep address */
                    123:        const char      *l_wmesg;       /* l: reason for sleep */
                    124:        struct sleepq   *l_sleepq;      /* l: current sleep queue */
                    125:        int             l_sleeperr;     /* !: error before unblock */
                    126:        u_int           l_slptime;      /* l: time since last blocked */
1.68      ad        127:        callout_t       l_timeout_ch;   /* !: callout for tsleep */
1.120     rmind     128:        u_int           l_emap_gen;     /* !: emap generation number */
1.47      ad        129:
1.148     matt      130: #if PCU_UNIT_COUNT > 0
1.143     rmind     131:        struct cpu_info * volatile l_pcu_cpu[PCU_UNIT_COUNT];
                    132:        uint32_t        l_pcu_used;
1.148     matt      133: #endif
1.143     rmind     134:
1.61      ad        135:        /* Process level and global state, misc. */
1.47      ad        136:        LIST_ENTRY(lwp) l_list;         /* a: entry on list of all LWPs */
                    137:        void            *l_ctxlink;     /* p: uc_link {get,set}context */
                    138:        struct proc     *l_proc;        /* p: parent process */
                    139:        LIST_ENTRY(lwp) l_sibling;      /* p: entry on proc's list of LWPs */
1.57      ad        140:        lwpid_t         l_waiter;       /* p: first LWP waiting on us */
                    141:        lwpid_t         l_waitingfor;   /* p: specific LWP we are waiting on */
1.47      ad        142:        int             l_prflag;       /* p: process level flags */
                    143:        u_int           l_refcnt;       /* p: reference count on this LWP */
                    144:        lwpid_t         l_lid;          /* (: LWP identifier; local to proc */
1.137     rmind     145:        char            *l_name;        /* (: name, optional */
                    146:
1.141     rmind     147:        /* State of select() or poll(). */
1.137     rmind     148:        int             l_selflag;      /* S: polling state flags */
1.61      ad        149:        SLIST_HEAD(,selinfo) l_selwait; /* S: descriptors waited on */
1.137     rmind     150:        int             l_selret;       /* S: return value of select/poll */
1.141     rmind     151:        uintptr_t       l_selrec;       /* !: argument for selrecord() */
                    152:        struct selcluster *l_selcluster;/* !: associated cluster data */
1.137     rmind     153:        void *          l_selbits;      /* (: select() bit-field */
                    154:        size_t          l_selni;        /* (: size of a single bit-field */
1.47      ad        155:
1.141     rmind     156:        /* Signals. */
1.47      ad        157:        int             l_sigrestore;   /* p: need to restore old sig mask */
                    158:        sigset_t        l_sigwaitset;   /* p: signals being waited for */
                    159:        kcondvar_t      l_sigcv;        /* p: for sigsuspend() */
                    160:        struct ksiginfo *l_sigwaited;   /* p: delivered signals from set */
                    161:        sigpend_t       *l_sigpendset;  /* p: XXX issignal()/postsig() baton */
                    162:        LIST_ENTRY(lwp) l_sigwaiter;    /* p: chain on list of waiting LWPs */
                    163:        stack_t         l_sigstk;       /* p: sp & on stack state variable */
                    164:        sigset_t        l_sigmask;      /* p: signal mask */
                    165:        sigpend_t       l_sigpend;      /* p: signals to this LWP */
                    166:        sigset_t        l_sigoldmask;   /* p: mask for sigpause */
1.2       thorpej   167:
1.141     rmind     168:        /* Private data. */
1.42      thorpej   169:        specificdata_reference
1.47      ad        170:                l_specdataref;          /* !: subsystem lwp-specific data */
1.116     christos  171:        struct timespec l_ktrcsw;       /* !: for ktrace CSW trace XXX */
1.47      ad        172:        void            *l_private;     /* !: svr4-style lwp-private data */
1.62      ad        173:        struct lwp      *l_switchto;    /* !: mi_switch: switch to this LWP */
1.47      ad        174:        struct kauth_cred *l_cred;      /* !: cached credentials */
1.81      ad        175:        struct filedesc *l_fd;          /* !: cached copy of proc::p_fd */
1.47      ad        176:        void            *l_emuldata;    /* !: kernel lwp-private data */
1.61      ad        177:        u_int           l_cv_signalled; /* c: restarted by cv_signal() */
1.47      ad        178:        u_short         l_shlocks;      /* !: lockdebug: shared locks held */
                    179:        u_short         l_exlocks;      /* !: lockdebug: excl. locks held */
1.90      ad        180:        u_short         l_unused;       /* !: unused */
1.61      ad        181:        u_short         l_blcnt;        /* !: count of kernel_lock held */
1.90      ad        182:        int             l_nopreempt;    /* !: don't preempt me! */
                    183:        u_int           l_dopreempt;    /* s: kernel preemption pending */
1.47      ad        184:        int             l_pflag;        /* !: LWP private flags */
                    185:        int             l_dupfd;        /* !: side return from cloning devs XXX */
1.114     ad        186:        const struct sysent * volatile l_sysent;/* !: currently active syscall */
1.85      ad        187:        struct rusage   l_ru;           /* !: accounting information */
1.90      ad        188:        uint64_t        l_pfailtime;    /* !: for kernel preemption */
                    189:        uintptr_t       l_pfailaddr;    /* !: for kernel preemption */
                    190:        uintptr_t       l_pfaillock;    /* !: for kernel preemption */
1.94      ad        191:        _TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */
1.118     ad        192:        int             l_tcgen;        /* !: for timecounter removal */
                    193:        int             l_unused2;      /* !: for future use */
1.50      dsl       194:
1.141     rmind     195:        /* These are only used by 'options SYSCALL_TIMES'. */
                    196:        uint32_t        l_syscall_time; /* !: time epoch for current syscall */
                    197:        uint64_t        *l_syscall_counter; /* !: counter for current process */
1.128     darran    198:
1.141     rmind     199:        struct kdtrace_thread *l_dtrace; /* (: DTrace-specific data. */
1.60      yamt      200: };
1.144     matt      201: #endif /* _KERNEL || _KMEMUSER */
1.2       thorpej   202:
1.119     yamt      203: /*
1.140     rmind     204:  * UAREA_PCB_OFFSET: an offset of PCB structure in the uarea.  MD code may
                    205:  * define it in <machine/proc.h>, to indicate a different uarea layout.
1.119     yamt      206:  */
1.140     rmind     207: #ifndef UAREA_PCB_OFFSET
                    208: #define        UAREA_PCB_OFFSET        0
                    209: #endif
1.37      yamt      210:
1.141     rmind     211: LIST_HEAD(lwplist, lwp);               /* A list of LWPs. */
1.2       thorpej   212:
1.29      christos  213: #ifdef _KERNEL
1.141     rmind     214: extern struct lwplist  alllwp;         /* List of all LWPs. */
                    215: extern lwp_t           lwp0;           /* LWP for proc0. */
1.29      christos  216: #endif
1.2       thorpej   217:
1.47      ad        218: /* These flags are kept in l_flag. */
1.60      yamt      219: #define        LW_IDLE         0x00000001 /* Idle lwp. */
1.149     pooka     220: #define        LW_LWPCTL       0x00000002 /* Adjust lwpctl in userret */
1.49      pavel     221: #define        LW_SINTR        0x00000080 /* Sleep is interruptible. */
1.109     wrstuden  222: #define        LW_SA_SWITCHING 0x00000100 /* SA LWP in context switch */
1.49      pavel     223: #define        LW_SYSTEM       0x00000200 /* Kernel thread */
1.109     wrstuden  224: #define        LW_SA           0x00000400 /* Scheduler activations LWP */
1.49      pavel     225: #define        LW_WSUSPEND     0x00020000 /* Suspend before return to user */
1.96      rmind     226: #define        LW_BATCH        0x00040000 /* LWP tends to hog CPU */
1.49      pavel     227: #define        LW_WCORE        0x00080000 /* Stop for core dump on return to user */
                    228: #define        LW_WEXIT        0x00100000 /* Exit before return to user */
1.77      rmind     229: #define        LW_AFFINITY     0x00200000 /* Affinity is assigned to the thread */
1.109     wrstuden  230: #define        LW_SA_UPCALL    0x00400000 /* SA upcall is pending */
                    231: #define        LW_SA_BLOCKING  0x00800000 /* Blocking in tsleep() */
1.49      pavel     232: #define        LW_PENDSIG      0x01000000 /* Pending signal for us */
                    233: #define        LW_CANCELLED    0x02000000 /* tsleep should not sleep */
                    234: #define        LW_WREBOOT      0x08000000 /* System is rebooting, please suspend */
1.60      yamt      235: #define        LW_UNPARKED     0x10000000 /* Unpark op pending */
1.109     wrstuden  236: #define        LW_SA_YIELD     0x40000000 /* LWP on VP is yielding */
                    237: #define        LW_SA_IDLE      0x80000000 /* VP is idle */
1.138     pooka     238: #define        LW_RUMP_CLEAR   LW_SA_IDLE /* clear curlwp in rump scheduler */
1.150     pooka     239: #define        LW_RUMP_QEXIT   LW_SA_YIELD/* lwp should exit ASAP */
1.47      ad        240:
                    241: /* The second set of flags is kept in l_pflag. */
                    242: #define        LP_KTRACTIVE    0x00000001 /* Executing ktrace operation */
                    243: #define        LP_KTRCSW       0x00000002 /* ktrace context switch marker */
                    244: #define        LP_KTRCSWUSER   0x00000004 /* ktrace context switch marker */
1.136     chs       245: #define        LP_PIDLID       0x00000008 /* free LID from PID space on exit */
1.47      ad        246: #define        LP_OWEUPC       0x00000010 /* Owe user profiling tick */
1.61      ad        247: #define        LP_MPSAFE       0x00000020 /* Starts life without kernel_lock */
1.68      ad        248: #define        LP_INTR         0x00000040 /* Soft interrupt handler */
1.76      ad        249: #define        LP_SYSCTLWRITE  0x00000080 /* sysctl write lock held */
1.109     wrstuden  250: #define        LP_SA_PAGEFAULT 0x00000200 /* SA LWP in pagefault handler */
                    251: #define        LP_SA_NOBLOCK   0x00000400 /* SA don't upcall on block */
1.97      ad        252: #define        LP_TIMEINTR     0x00010000 /* Time this soft interrupt */
                    253: #define        LP_RUNNING      0x20000000 /* Active on a CPU */
1.87      ad        254: #define        LP_BOUND        0x80000000 /* Bound to a CPU */
1.47      ad        255:
                    256: /* The third set is kept in l_prflag. */
                    257: #define        LPR_DETACHED    0x00800000 /* Won't be waited for. */
1.82      ad        258: #define        LPR_CRMOD       0x00000100 /* Credentials modified */
1.47      ad        259:
                    260: /*
                    261:  * Mask indicating that there is "exceptional" work to be done on return to
                    262:  * user.
                    263:  */
1.49      pavel     264: #define        LW_USERRET (LW_WEXIT|LW_PENDSIG|LW_WREBOOT|LW_WSUSPEND|LW_WCORE|\
1.149     pooka     265:                    LW_SA_BLOCKING|LW_SA_UPCALL|LW_LWPCTL)
1.2       thorpej   266:
                    267: /*
                    268:  * Status values.
                    269:  *
                    270:  * A note about SRUN and SONPROC: SRUN indicates that a process is
                    271:  * runnable but *not* yet running, i.e. is on a run queue.  SONPROC
                    272:  * indicates that the process is actually executing on a CPU, i.e.
                    273:  * it is no longer on a run queue.
                    274:  */
1.47      ad        275: #define        LSIDL           1       /* Process being created by fork. */
                    276: #define        LSRUN           2       /* Currently runnable. */
                    277: #define        LSSLEEP         3       /* Sleeping on an address. */
                    278: #define        LSSTOP          4       /* Process debugging or suspension. */
                    279: #define        LSZOMB          5       /* Awaiting collection by parent. */
1.49      pavel     280: /* unused, for source compatibility with NetBSD 4.0 and earlier. */
                    281: #define        LSDEAD          6       /* Process is almost a zombie. */
1.2       thorpej   282: #define        LSONPROC        7       /* Process is currently on a CPU. */
                    283: #define        LSSUSPENDED     8       /* Not running, not signalable. */
                    284:
1.147     jakllsch  285: #if defined(_KERNEL) || defined(_KMEMUSER)
                    286: static inline void *
                    287: lwp_getpcb(struct lwp *l)
                    288: {
                    289:
                    290:        return l->l_addr;
                    291: }
                    292: #endif /* _KERNEL || _KMEMUSER */
                    293:
1.2       thorpej   294: #ifdef _KERNEL
1.38      ad        295: #define        LWP_CACHE_CREDS(l, p)                                           \
                    296: do {                                                                   \
1.82      ad        297:        (void)p;                                                        \
                    298:        if (__predict_false((l)->l_prflag & LPR_CRMOD))                 \
1.38      ad        299:                lwp_update_creds(l);                                    \
                    300: } while (/* CONSTCOND */ 0)
1.2       thorpej   301:
1.141     rmind     302: void   lwpinit(void);
                    303: void   lwp0_init(void);
                    304: void   lwp_sys_init(void);
                    305:
1.60      yamt      306: void   lwp_startup(lwp_t *, lwp_t *);
1.141     rmind     307: void   startlwp(void *);
                    308: void   cpu_setfunc(lwp_t *, void (*)(void *), void *);
                    309: void   upcallret(lwp_t *);
1.2       thorpej   310:
1.58      ad        311: int    lwp_locked(lwp_t *, kmutex_t *);
                    312: void   lwp_setlock(lwp_t *, kmutex_t *);
                    313: void   lwp_unlock_to(lwp_t *, kmutex_t *);
                    314: int    lwp_trylock(lwp_t *);
                    315: void   lwp_addref(lwp_t *);
                    316: void   lwp_delref(lwp_t *);
1.129     christos  317: void   lwp_delref2(lwp_t *);
1.58      ad        318: void   lwp_drainrefs(lwp_t *);
1.117     ad        319: bool   lwp_alive(lwp_t *);
                    320: lwp_t  *lwp_find_first(proc_t *);
1.2       thorpej   321:
                    322: /* Flags for _lwp_wait1 */
                    323: #define LWPWAIT_EXITCONTROL    0x00000001
1.141     rmind     324: int    lwp_wait1(lwp_t *, lwpid_t, lwpid_t *, int);
1.58      ad        325: void   lwp_continue(lwp_t *);
1.141     rmind     326: void   lwp_unsleep(lwp_t *, bool);
1.129     christos  327: void   lwp_unstop(lwp_t *);
1.135     yamt      328: void   lwp_exit(lwp_t *);
1.106     uwe       329: void   lwp_exit_switchaway(lwp_t *) __dead;
1.58      ad        330: int    lwp_suspend(lwp_t *, lwp_t *);
                    331: int    lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
                    332: void   lwp_update_creds(lwp_t *);
1.77      rmind     333: void   lwp_migrate(lwp_t *, struct cpu_info *);
1.141     rmind     334: lwp_t *        lwp_find2(pid_t, lwpid_t);
                    335: lwp_t *        lwp_find(proc_t *, int);
1.58      ad        336: void   lwp_userret(lwp_t *);
                    337: void   lwp_need_userret(lwp_t *);
                    338: void   lwp_free(lwp_t *, bool, bool);
1.118     ad        339: uint64_t lwp_pctr(void);
1.136     chs       340: int    lwp_setprivate(lwp_t *, void *);
1.42      thorpej   341:
1.130     pooka     342: void   lwpinit_specificdata(void);
1.42      thorpej   343: int    lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
                    344: void   lwp_specific_key_delete(specificdata_key_t);
1.141     rmind     345: void   lwp_initspecific(lwp_t *);
                    346: void   lwp_finispecific(lwp_t *);
1.58      ad        347: void   *lwp_getspecific(specificdata_key_t);
1.46      hannken   348: #if defined(_LWP_API_PRIVATE)
1.58      ad        349: void   *_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t);
1.46      hannken   350: #endif
1.44      thorpej   351: void   lwp_setspecific(specificdata_key_t, void *);
1.47      ad        352:
1.141     rmind     353: /* Syscalls. */
1.63      ad        354: int    lwp_park(struct timespec *, const void *);
                    355: int    lwp_unpark(lwpid_t, const void *);
                    356:
1.141     rmind     357: /* DDB. */
                    358: void   lwp_whatis(uintptr_t, void (*)(const char *, ...));
1.63      ad        359:
1.47      ad        360: /*
1.115     ad        361:  * Lock an LWP. XXX _MODULE
1.47      ad        362:  */
                    363: static inline void
1.58      ad        364: lwp_lock(lwp_t *l)
1.47      ad        365: {
1.139     rmind     366:        kmutex_t *old = l->l_mutex;
1.47      ad        367:
                    368:        /*
1.139     rmind     369:         * Note: mutex_spin_enter() will have posted a read barrier.
                    370:         * Re-test l->l_mutex.  If it has changed, we need to try again.
1.47      ad        371:         */
1.139     rmind     372:        mutex_spin_enter(old);
                    373:        while (__predict_false(l->l_mutex != old)) {
                    374:                mutex_spin_exit(old);
                    375:                old = l->l_mutex;
                    376:                mutex_spin_enter(old);
                    377:        }
1.47      ad        378: }
                    379:
                    380: /*
1.115     ad        381:  * Unlock an LWP. XXX _MODULE
1.47      ad        382:  */
                    383: static inline void
1.58      ad        384: lwp_unlock(lwp_t *l)
1.47      ad        385: {
                    386:        mutex_spin_exit(l->l_mutex);
                    387: }
                    388:
                    389: static inline void
1.58      ad        390: lwp_changepri(lwp_t *l, pri_t pri)
1.47      ad        391: {
1.60      yamt      392:        KASSERT(mutex_owned(l->l_mutex));
1.47      ad        393:
                    394:        (*l->l_syncobj->sobj_changepri)(l, pri);
                    395: }
                    396:
                    397: static inline void
1.58      ad        398: lwp_lendpri(lwp_t *l, pri_t pri)
1.52      yamt      399: {
1.60      yamt      400:        KASSERT(mutex_owned(l->l_mutex));
1.52      yamt      401:
                    402:        if (l->l_inheritedprio == pri)
                    403:                return;
                    404:
                    405:        (*l->l_syncobj->sobj_lendpri)(l, pri);
                    406: }
                    407:
1.68      ad        408: static inline pri_t
1.58      ad        409: lwp_eprio(lwp_t *l)
1.52      yamt      410: {
1.68      ad        411:        pri_t pri;
1.52      yamt      412:
1.68      ad        413:        pri = l->l_priority;
                    414:        if (l->l_kpriority && pri < PRI_KERNEL)
1.71      ad        415:                pri = (pri >> 1) + l->l_kpribase;
1.68      ad        416:        return MAX(l->l_inheritedprio, pri);
1.52      yamt      417: }
                    418:
1.121     rmind     419: int lwp_create(lwp_t *, struct proc *, vaddr_t, int,
1.68      ad        420:     void *, size_t, void (*)(void *), void *, lwp_t **, int);
1.47      ad        421:
                    422: /*
1.115     ad        423:  * XXX _MODULE
                    424:  * We should provide real stubs for the below that modules can use.
1.47      ad        425:  */
                    426:
                    427: static inline void
1.60      yamt      428: spc_lock(struct cpu_info *ci)
1.47      ad        429: {
1.60      yamt      430:        mutex_spin_enter(ci->ci_schedstate.spc_mutex);
1.47      ad        431: }
                    432:
                    433: static inline void
1.60      yamt      434: spc_unlock(struct cpu_info *ci)
1.47      ad        435: {
1.60      yamt      436:        mutex_spin_exit(ci->ci_schedstate.spc_mutex);
1.47      ad        437: }
                    438:
1.77      rmind     439: static inline void
                    440: spc_dlock(struct cpu_info *ci1, struct cpu_info *ci2)
                    441: {
                    442:        struct schedstate_percpu *spc1 = &ci1->ci_schedstate;
                    443:        struct schedstate_percpu *spc2 = &ci2->ci_schedstate;
                    444:
                    445:        KASSERT(ci1 != ci2);
1.86      ad        446:        if (ci1 < ci2) {
1.77      rmind     447:                mutex_spin_enter(spc1->spc_mutex);
                    448:                mutex_spin_enter(spc2->spc_mutex);
                    449:        } else {
                    450:                mutex_spin_enter(spc2->spc_mutex);
                    451:                mutex_spin_enter(spc1->spc_mutex);
                    452:        }
                    453: }
                    454:
1.89      ad        455: /*
1.92      ad        456:  * Allow machine-dependent code to override curlwp in <machine/cpu.h> for
                    457:  * its own convenience.  Otherwise, we declare it as appropriate.
                    458:  */
                    459: #if !defined(curlwp)
                    460: #if defined(MULTIPROCESSOR)
                    461: #define        curlwp          curcpu()->ci_curlwp     /* Current running LWP */
                    462: #else
                    463: extern struct lwp      *curlwp;                /* Current running LWP */
                    464: #endif /* MULTIPROCESSOR */
                    465: #endif /* ! curlwp */
                    466: #define        curproc         (curlwp->l_proc)
                    467:
                    468: static inline bool
                    469: CURCPU_IDLE_P(void)
                    470: {
                    471:        struct cpu_info *ci = curcpu();
                    472:        return ci->ci_data.cpu_onproc == ci->ci_data.cpu_idlelwp;
                    473: }
                    474:
                    475: /*
1.89      ad        476:  * Disable and re-enable preemption.  Only for low-level kernel
1.101     ad        477:  * use.  Device drivers and anything that could potentially be
                    478:  * compiled as a module should use kpreempt_disable() and
1.89      ad        479:  * kpreempt_enable().
                    480:  */
                    481: static inline void
1.90      ad        482: KPREEMPT_DISABLE(lwp_t *l)
1.89      ad        483: {
                    484:
1.90      ad        485:        KASSERT(l == curlwp);
                    486:        l->l_nopreempt++;
1.89      ad        487:        __insn_barrier();
                    488: }
                    489:
                    490: static inline void
1.90      ad        491: KPREEMPT_ENABLE(lwp_t *l)
1.89      ad        492: {
                    493:
1.90      ad        494:        KASSERT(l == curlwp);
                    495:        KASSERT(l->l_nopreempt > 0);
                    496:        __insn_barrier();
                    497:        if (--l->l_nopreempt != 0)
                    498:                return;
                    499:        __insn_barrier();
                    500:        if (__predict_false(l->l_dopreempt))
                    501:                kpreempt(0);
1.89      ad        502:        __insn_barrier();
                    503: }
                    504:
1.90      ad        505: /* For lwp::l_dopreempt */
                    506: #define        DOPREEMPT_ACTIVE        0x01
                    507: #define        DOPREEMPT_COUNTED       0x02
                    508:
1.60      yamt      509: #endif /* _KERNEL */
1.2       thorpej   510:
                    511: /* Flags for _lwp_create(), as per Solaris. */
1.141     rmind     512: #define        LWP_DETACHED    0x00000040
                    513: #define        LWP_SUSPENDED   0x00000080
1.136     chs       514:
                    515: /* Kernel-internal flags for LWP creation. */
                    516: #define        LWP_PIDLID      0x40000000
1.98      ad        517: #define        LWP_VFORK       0x80000000
1.2       thorpej   518:
                    519: #endif /* !_SYS_LWP_H_ */

CVSweb <webmaster@jp.NetBSD.org>