[BACK]Return to lwp.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / sys

Annotation of src/sys/sys/lwp.h, Revision 1.48.2.3

1.48.2.3! rmind       1: /*     $NetBSD: lwp.h,v 1.48.2.2 2007/02/18 13:32:34 yamt Exp $        */
1.2       thorpej     2:
                      3: /*-
1.47      ad          4:  * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
1.2       thorpej     5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
1.47      ad          8:  * by Nathan J. Williams and Andrew Doran.
1.2       thorpej     9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
                     20:  *        This product includes software developed by the NetBSD
                     21:  *        Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
                     25:  *
                     26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
                     37:  */
                     38:
1.31      christos   39: #ifndef _SYS_LWP_H_
                     40: #define _SYS_LWP_H_
1.2       thorpej    41:
1.47      ad         42: #include <sys/time.h>
                     43: #include <sys/queue.h>
                     44: #include <sys/callout.h>
                     45: #include <sys/mutex.h>
                     46: #include <sys/condvar.h>
                     47: #include <sys/signalvar.h>
                     48: #include <sys/specificdata.h>
1.2       thorpej    49:
                     50: #if defined(_KERNEL)
                     51: #include <machine/cpu.h>               /* curcpu() and cpu_info */
                     52: #endif
1.47      ad         53:
1.2       thorpej    54: #include <machine/proc.h>              /* Machine-dependent proc substruct. */
                     55:
1.47      ad         56: typedef volatile const void *wchan_t;
                     57:
                     58: /*
                     59:  * Lightweight process.  Field markings and the corresponding locks:
                     60:  *
                     61:  * a:  proclist_mutex
                     62:  * l:  *l_mutex
                     63:  * p:  l_proc->p_smutex
                     64:  * s:  sched_mutex, which may or may not be referenced by l_mutex
                     65:  * (:  unlocked, stable
                     66:  * !:  unlocked, may only be safely accessed by the LWP itself
                     67:  * ?:  undecided
                     68:  *
                     69:  * Fields are clustered together by usage (to increase the likelyhood
                     70:  * of cache hits) and by size (to reduce dead space in the structure).
                     71:  */
1.2       thorpej    72: struct lwp {
1.47      ad         73:        /* Scheduling and overall state */
                     74:        struct lwp      *l_forw;        /* s: run queue */
                     75:        struct lwp      *l_back;        /* s: run queue */
                     76:        struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
                     77:        kmutex_t * volatile l_mutex;    /* l: ptr to mutex on sched state */
                     78:        struct user     *l_addr;        /* l: KVA of u-area (PROC ONLY) */
                     79:        struct mdlwp    l_md;           /* l: machine-dependent fields. */
                     80:        int             l_flag;         /* l: misc flag values */
                     81:        int             l_stat;         /* l: overall LWP status */
                     82:        struct timeval  l_rtime;        /* l: real time */
                     83:        u_int           l_swtime;       /* l: time swapped in or out */
                     84:        int             l_holdcnt;      /* l: if non-zero, don't swap */
                     85:        int             l_biglocks;     /* l: biglock count before sleep */
                     86:        u_char          l_priority;     /* l: process priority */
                     87:        u_char          l_usrpri;       /* l: user-priority */
                     88:        long            l_nvcsw;        /* l: voluntary context switches */
                     89:        long            l_nivcsw;       /* l: involuntary context switches */
                     90:
                     91:        /* Synchronisation */
                     92:        struct turnstile *l_ts;         /* l: current turnstile */
                     93:        struct syncobj  *l_syncobj;     /* l: sync object operations set */
                     94:        TAILQ_ENTRY(lwp) l_sleepchain;  /* l: sleep queue */
                     95:        wchan_t         l_wchan;        /* l: sleep address */
                     96:        const char      *l_wmesg;       /* l: reason for sleep */
                     97:        struct sleepq   *l_sleepq;      /* l: current sleep queue */
                     98:        int             l_sleeperr;     /* !: error before unblock */
                     99:        u_int           l_slptime;      /* l: time since last blocked */
                    100:        struct callout  l_tsleep_ch;    /* !: callout for tsleep */
                    101:
                    102:        /* Process level and global state */
                    103:        LIST_ENTRY(lwp) l_list;         /* a: entry on list of all LWPs */
                    104:        void            *l_ctxlink;     /* p: uc_link {get,set}context */
                    105:        struct proc     *l_proc;        /* p: parent process */
                    106:        LIST_ENTRY(lwp) l_sibling;      /* p: entry on proc's list of LWPs */
                    107:        int             l_prflag;       /* p: process level flags */
                    108:        u_int           l_refcnt;       /* p: reference count on this LWP */
                    109:        lwpid_t         l_lid;          /* (: LWP identifier; local to proc */
                    110:
                    111:        /* Signals */
                    112:        int             l_sigrestore;   /* p: need to restore old sig mask */
                    113:        sigset_t        l_sigwaitset;   /* p: signals being waited for */
                    114:        kcondvar_t      l_sigcv;        /* p: for sigsuspend() */
                    115:        struct ksiginfo *l_sigwaited;   /* p: delivered signals from set */
                    116:        sigpend_t       *l_sigpendset;  /* p: XXX issignal()/postsig() baton */
                    117:        LIST_ENTRY(lwp) l_sigwaiter;    /* p: chain on list of waiting LWPs */
                    118:        stack_t         l_sigstk;       /* p: sp & on stack state variable */
                    119:        sigset_t        l_sigmask;      /* p: signal mask */
                    120:        sigpend_t       l_sigpend;      /* p: signals to this LWP */
                    121:        sigset_t        l_sigoldmask;   /* p: mask for sigpause */
1.2       thorpej   122:
1.47      ad        123:        /* Private data */
1.42      thorpej   124:        specificdata_reference
1.47      ad        125:                l_specdataref;          /* !: subsystem lwp-specific data */
                    126:        union {
                    127:                struct timeval tv;
                    128:                struct timespec ts;
                    129:        } l_ktrcsw;                     /* !: for ktrace CSW trace XXX */
                    130:        void            *l_private;     /* !: svr4-style lwp-private data */
                    131:        struct kauth_cred *l_cred;      /* !: cached credentials */
                    132:        void            *l_emuldata;    /* !: kernel lwp-private data */
                    133:        u_short         l_acflag;       /* !: accounting flags */
                    134:        u_short         l_shlocks;      /* !: lockdebug: shared locks held */
                    135:        u_short         l_exlocks;      /* !: lockdebug: excl. locks held */
                    136:        u_short         l_locks;        /* !: lockmgr count of held locks */
                    137:        int             l_pflag;        /* !: LWP private flags */
                    138:        int             l_dupfd;        /* !: side return from cloning devs XXX */
1.2       thorpej   139: };
                    140:
1.37      yamt      141: #if !defined(USER_TO_UAREA)
                    142: #if !defined(UAREA_USER_OFFSET)
                    143: #define        UAREA_USER_OFFSET       0
                    144: #endif /* !defined(UAREA_USER_OFFSET) */
                    145: #define        USER_TO_UAREA(user)     ((vaddr_t)(user) - UAREA_USER_OFFSET)
                    146: #define        UAREA_TO_USER(uarea)    ((struct user *)((uarea) + UAREA_USER_OFFSET))
                    147: #endif /* !defined(UAREA_TO_USER) */
                    148:
1.2       thorpej   149: LIST_HEAD(lwplist, lwp);               /* a list of LWPs */
                    150:
1.29      christos  151: #ifdef _KERNEL
1.47      ad        152: extern kmutex_t        sched_mutex;            /* Mutex on global run queue */
                    153: extern kmutex_t alllwp_mutex;          /* Mutex on alllwp */
1.2       thorpej   154: extern struct lwplist alllwp;          /* List of all LWPs. */
                    155:
                    156: extern struct pool lwp_uc_pool;                /* memory pool for LWP startup args */
                    157:
                    158: extern struct lwp lwp0;                        /* LWP for proc0 */
1.29      christos  159: #endif
1.2       thorpej   160:
1.47      ad        161: /* These flags are kept in l_flag. */
1.48.2.1  yamt      162: #define        L_IDLE          0x00000001 /* Idle lwp. */
1.47      ad        163: #define        L_INMEM         0x00000004 /* Loaded into memory. */
                    164: #define        L_SELECT        0x00000040 /* Selecting; wakeup/waiting danger. */
                    165: #define        L_SINTR         0x00000080 /* Sleep is interruptible. */
                    166: #define        L_SYSTEM        0x00000200 /* Kernel thread */
                    167: #define        L_WSUSPEND      0x00020000 /* Suspend before return to user */
                    168: #define        L_WCORE         0x00080000 /* Stop for core dump on return to user */
                    169: #define        L_WEXIT         0x00100000 /* Exit before return to user */
                    170: #define        L_PENDSIG       0x01000000 /* Pending signal for us */
1.35      pavel     171: #define        L_CANCELLED     0x02000000 /* tsleep should not sleep */
1.48      ad        172: #define        L_WUSERRET      0x04000000 /* Call proc::p_userret on return to user */
1.47      ad        173: #define        L_WREBOOT       0x08000000 /* System is rebooting, please suspend */
                    174:
                    175: /* The second set of flags is kept in l_pflag. */
                    176: #define        LP_KTRACTIVE    0x00000001 /* Executing ktrace operation */
                    177: #define        LP_KTRCSW       0x00000002 /* ktrace context switch marker */
                    178: #define        LP_KTRCSWUSER   0x00000004 /* ktrace context switch marker */
                    179: #define        LP_UFSCOW       0x00000008 /* UFS: doing copy on write */
                    180: #define        LP_OWEUPC       0x00000010 /* Owe user profiling tick */
                    181:
                    182: /* The third set is kept in l_prflag. */
                    183: #define        LPR_DETACHED    0x00800000 /* Won't be waited for. */
                    184:
                    185: /*
                    186:  * Mask indicating that there is "exceptional" work to be done on return to
                    187:  * user.
                    188:  */
1.48      ad        189: #define        L_USERRET (L_WEXIT|L_PENDSIG|L_WREBOOT|L_WSUSPEND|L_WCORE|L_WUSERRET)
1.2       thorpej   190:
                    191: /*
                    192:  * Status values.
                    193:  *
                    194:  * A note about SRUN and SONPROC: SRUN indicates that a process is
                    195:  * runnable but *not* yet running, i.e. is on a run queue.  SONPROC
                    196:  * indicates that the process is actually executing on a CPU, i.e.
                    197:  * it is no longer on a run queue.
                    198:  */
1.47      ad        199: #define        LSIDL           1       /* Process being created by fork. */
                    200: #define        LSRUN           2       /* Currently runnable. */
                    201: #define        LSSLEEP         3       /* Sleeping on an address. */
                    202: #define        LSSTOP          4       /* Process debugging or suspension. */
                    203: #define        LSZOMB          5       /* Awaiting collection by parent. */
1.2       thorpej   204: #define        LSONPROC        7       /* Process is currently on a CPU. */
                    205: #define        LSSUSPENDED     8       /* Not running, not signalable. */
                    206:
                    207: #ifdef _KERNEL
                    208: #define        PHOLD(l)                                                        \
                    209: do {                                                                   \
                    210:        if ((l)->l_holdcnt++ == 0 && ((l)->l_flag & L_INMEM) == 0)      \
                    211:                uvm_swapin(l);                                          \
                    212: } while (/* CONSTCOND */ 0)
                    213: #define        PRELE(l)        (--(l)->l_holdcnt)
                    214:
1.38      ad        215: #define        LWP_CACHE_CREDS(l, p)                                           \
                    216: do {                                                                   \
                    217:        if ((l)->l_cred != (p)->p_cred)                                 \
                    218:                lwp_update_creds(l);                                    \
                    219: } while (/* CONSTCOND */ 0)
1.2       thorpej   220:
1.48.2.1  yamt      221: void   lwp_startup(struct lwp *, struct lwp *);
                    222:
1.47      ad        223: int    lwp_locked(struct lwp *, kmutex_t *);
                    224: void   lwp_setlock(struct lwp *, kmutex_t *);
                    225: void   lwp_unlock_to(struct lwp *, kmutex_t *);
                    226: void   lwp_lock_retry(struct lwp *, kmutex_t *);
                    227: void   lwp_relock(struct lwp *, kmutex_t *);
                    228: void   lwp_addref(struct lwp *);
                    229: void   lwp_delref(struct lwp *);
                    230: void   lwp_drainrefs(struct lwp *);
1.2       thorpej   231:
                    232: /* Flags for _lwp_wait1 */
                    233: #define LWPWAIT_EXITCONTROL    0x00000001
1.42      thorpej   234: void   lwpinit(void);
1.2       thorpej   235: int    lwp_wait1(struct lwp *, lwpid_t, lwpid_t *, int);
                    236: void   lwp_continue(struct lwp *);
                    237: void   cpu_setfunc(struct lwp *, void (*)(void *), void *);
                    238: void   startlwp(void *);
                    239: void   upcallret(struct lwp *);
1.47      ad        240: void   lwp_exit(struct lwp *);
1.48.2.1  yamt      241: void   lwp_exit_switchaway(struct lwp *);
1.47      ad        242: struct lwp *proc_representative_lwp(struct proc *, int *, int);
                    243: int    lwp_suspend(struct lwp *, struct lwp *);
1.34      cube      244: int    lwp_create1(struct lwp *, const void *, size_t, u_long, lwpid_t *);
1.38      ad        245: void   lwp_update_creds(struct lwp *);
1.47      ad        246: struct lwp *lwp_find(struct proc *, int);
                    247: void   lwp_userret(struct lwp *);
                    248: void   lwp_need_userret(struct lwp *);
                    249: void   lwp_free(struct lwp *, int, int);
                    250: void   lwp_sys_init(void);
1.42      thorpej   251:
                    252: int    lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
                    253: void   lwp_specific_key_delete(specificdata_key_t);
1.43      christos  254: void   lwp_initspecific(struct lwp *);
1.45      thorpej   255: void   lwp_finispecific(struct lwp *);
1.44      thorpej   256: void * lwp_getspecific(specificdata_key_t);
1.46      hannken   257: #if defined(_LWP_API_PRIVATE)
                    258: void * _lwp_getspecific_by_lwp(struct lwp *, specificdata_key_t);
                    259: #endif
1.44      thorpej   260: void   lwp_setspecific(specificdata_key_t, void *);
1.47      ad        261:
                    262: /*
                    263:  * Lock an LWP. XXXLKM
                    264:  */
                    265: static inline void
                    266: lwp_lock(struct lwp *l)
                    267: {
                    268: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
                    269:        kmutex_t *old;
                    270:
                    271:        mutex_spin_enter(old = l->l_mutex);
                    272:
                    273:        /*
                    274:         * mutex_enter() will have posted a read barrier.  Re-test
                    275:         * l->l_mutex.  If it has changed, we need to try again.
                    276:         */
                    277:        if (__predict_false(l->l_mutex != old))
                    278:                lwp_lock_retry(l, old);
                    279: #else
                    280:        mutex_spin_enter(l->l_mutex);
                    281: #endif
                    282: }
                    283:
                    284: /*
                    285:  * Unlock an LWP. XXXLKM
                    286:  */
                    287: static inline void
                    288: lwp_unlock(struct lwp *l)
                    289: {
                    290:        LOCK_ASSERT(mutex_owned(l->l_mutex));
                    291:
                    292:        mutex_spin_exit(l->l_mutex);
                    293: }
                    294:
                    295: static inline void
                    296: lwp_changepri(struct lwp *l, int pri)
                    297: {
                    298:        LOCK_ASSERT(mutex_owned(l->l_mutex));
                    299:
                    300:        (*l->l_syncobj->sobj_changepri)(l, pri);
                    301: }
                    302:
                    303: static inline void
                    304: lwp_unsleep(struct lwp *l)
                    305: {
                    306:        LOCK_ASSERT(mutex_owned(l->l_mutex));
                    307:
                    308:        (*l->l_syncobj->sobj_unsleep)(l);
                    309: }
                    310:
                    311: int newlwp(struct lwp *, struct proc *, vaddr_t, int, int,
                    312:     void *, size_t, void (*)(void *), void *, struct lwp **);
                    313:
                    314: /*
                    315:  * Once we have per-CPU run queues and a modular scheduler interface,
                    316:  * we should provide real stubs for the below that LKMs can use.
                    317:  */
                    318: extern kmutex_t        sched_mutex;
                    319:
                    320: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
                    321:
                    322: static inline void
                    323: sched_lock(const int heldmutex)
                    324: {
                    325:        (void)heldmutex;
                    326:        mutex_enter(&sched_mutex);
                    327: }
                    328:
                    329: static inline void
                    330: sched_unlock(const int heldmutex)
                    331: {
                    332:        (void)heldmutex;
                    333:        mutex_exit(&sched_mutex);
                    334: }
                    335:
                    336: #else  /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
                    337:
                    338: static inline void
                    339: sched_lock(const int heldmutex)
                    340: {
                    341:        if (!heldmutex)
                    342:                mutex_enter(&sched_mutex);
                    343: }
                    344:
                    345: static inline void
                    346: sched_unlock(int heldmutex)
                    347: {
                    348:        if (!heldmutex)
                    349:                mutex_exit(&sched_mutex);
                    350: }
                    351:
                    352: #endif /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
                    353:
1.48.2.1  yamt      354: void sched_switch_unlock(struct lwp *, struct lwp *);
1.47      ad        355:
1.2       thorpej   356: #endif /* _KERNEL */
                    357:
                    358: /* Flags for _lwp_create(), as per Solaris. */
                    359:
                    360: #define LWP_DETACHED    0x00000040
                    361: #define LWP_SUSPENDED   0x00000080
                    362:
                    363: #endif /* !_SYS_LWP_H_ */

CVSweb <webmaster@jp.NetBSD.org>