Annotation of src/sys/sys/lwp.h, Revision 1.56.2.10
1.56.2.10! ad 1: /* $NetBSD: lwp.h,v 1.56.2.9 2007/06/17 21:32:01 ad Exp $ */
1.2 thorpej 2:
3: /*-
1.47 ad 4: * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
1.2 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.47 ad 8: * by Nathan J. Williams and Andrew Doran.
1.2 thorpej 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
1.31 christos 39: #ifndef _SYS_LWP_H_
40: #define _SYS_LWP_H_
1.2 thorpej 41:
1.47 ad 42: #include <sys/time.h>
43: #include <sys/queue.h>
44: #include <sys/callout.h>
45: #include <sys/mutex.h>
46: #include <sys/condvar.h>
47: #include <sys/signalvar.h>
48: #include <sys/specificdata.h>
1.53 yamt 49: #include <sys/syncobj.h>
1.2 thorpej 50:
51: #if defined(_KERNEL)
52: #include <machine/cpu.h> /* curcpu() and cpu_info */
53: #endif
1.47 ad 54:
1.2 thorpej 55: #include <machine/proc.h> /* Machine-dependent proc substruct. */
56:
1.47 ad 57: /*
58: * Lightweight process. Field markings and the corresponding locks:
59: *
60: * a: proclist_mutex
1.56.2.5 ad 61: * c: condition variable interlock, passed to cv_wait()
1.47 ad 62: * l: *l_mutex
63: * p: l_proc->p_smutex
1.56.2.8 ad 64: * s: spc_mutex, which may or may not be referenced by l_mutex
65: * t: l_proc->p_stmutex
1.56.2.4 ad 66: * S: select_lock
1.47 ad 67: * (: unlocked, stable
68: * !: unlocked, may only be safely accessed by the LWP itself
69: * ?: undecided
70: *
71: * Fields are clustered together by usage (to increase the likelyhood
72: * of cache hits) and by size (to reduce dead space in the structure).
73: */
1.56.2.7 ad 74: struct lwp {
1.47 ad 75: /* Scheduling and overall state */
1.56.2.8 ad 76: TAILQ_ENTRY(lwp) l_runq; /* s: run queue */
77: void *l_sched_info; /* s: Scheduler-specific structure */
1.47 ad 78: struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
79: kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */
80: struct user *l_addr; /* l: KVA of u-area (PROC ONLY) */
81: struct mdlwp l_md; /* l: machine-dependent fields. */
82: int l_flag; /* l: misc flag values */
83: int l_stat; /* l: overall LWP status */
84: struct timeval l_rtime; /* l: real time */
85: u_int l_swtime; /* l: time swapped in or out */
86: int l_holdcnt; /* l: if non-zero, don't swap */
87: int l_biglocks; /* l: biglock count before sleep */
1.54 yamt 88: pri_t l_priority; /* l: process priority */
89: pri_t l_usrpri; /* l: user-priority */
90: pri_t l_inheritedprio;/* l: inherited priority */
1.52 yamt 91: SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
1.56.2.2 ad 92: uint64_t l_ncsw; /* l: total context switches */
93: uint64_t l_nivcsw; /* l: involuntary context switches */
1.56.2.8 ad 94: int l_cpticks; /* t: Ticks of CPU time */
95: fixpt_t l_pctcpu; /* t: %cpu during l_swtime */
1.56.2.2 ad 96: kmutex_t l_swaplock; /* l: lock to prevent swapping */
1.47 ad 97:
98: /* Synchronisation */
99: struct turnstile *l_ts; /* l: current turnstile */
100: struct syncobj *l_syncobj; /* l: sync object operations set */
101: TAILQ_ENTRY(lwp) l_sleepchain; /* l: sleep queue */
102: wchan_t l_wchan; /* l: sleep address */
103: const char *l_wmesg; /* l: reason for sleep */
104: struct sleepq *l_sleepq; /* l: current sleep queue */
105: int l_sleeperr; /* !: error before unblock */
106: u_int l_slptime; /* l: time since last blocked */
1.56.2.10! ad 107: callout_t l_tsleep_ch; /* !: callout for tsleep */
1.47 ad 108:
1.56.2.4 ad 109: /* Process level and global state, misc. */
1.47 ad 110: LIST_ENTRY(lwp) l_list; /* a: entry on list of all LWPs */
111: void *l_ctxlink; /* p: uc_link {get,set}context */
112: struct proc *l_proc; /* p: parent process */
113: LIST_ENTRY(lwp) l_sibling; /* p: entry on proc's list of LWPs */
1.56.2.5 ad 114: lwpid_t l_waiter; /* p: first LWP waiting on us */
115: lwpid_t l_waitingfor; /* p: specific LWP we are waiting on */
1.47 ad 116: int l_prflag; /* p: process level flags */
117: u_int l_refcnt; /* p: reference count on this LWP */
118: lwpid_t l_lid; /* (: LWP identifier; local to proc */
1.56.2.4 ad 119: int l_selflag; /* S: select() flags */
1.56.2.6 ad 120: SLIST_HEAD(,selinfo) l_selwait; /* S: descriptors waited on */
1.56.2.8 ad 121: char *l_name; /* (: name, optional */
1.47 ad 122:
123: /* Signals */
124: int l_sigrestore; /* p: need to restore old sig mask */
125: sigset_t l_sigwaitset; /* p: signals being waited for */
126: kcondvar_t l_sigcv; /* p: for sigsuspend() */
127: struct ksiginfo *l_sigwaited; /* p: delivered signals from set */
128: sigpend_t *l_sigpendset; /* p: XXX issignal()/postsig() baton */
129: LIST_ENTRY(lwp) l_sigwaiter; /* p: chain on list of waiting LWPs */
130: stack_t l_sigstk; /* p: sp & on stack state variable */
131: sigset_t l_sigmask; /* p: signal mask */
132: sigpend_t l_sigpend; /* p: signals to this LWP */
133: sigset_t l_sigoldmask; /* p: mask for sigpause */
1.2 thorpej 134:
1.47 ad 135: /* Private data */
1.42 thorpej 136: specificdata_reference
1.47 ad 137: l_specdataref; /* !: subsystem lwp-specific data */
138: union {
139: struct timeval tv;
140: struct timespec ts;
141: } l_ktrcsw; /* !: for ktrace CSW trace XXX */
142: void *l_private; /* !: svr4-style lwp-private data */
1.56.2.9 ad 143: lwp_t *l_pinned; /* !: lwp under this one */
1.47 ad 144: struct kauth_cred *l_cred; /* !: cached credentials */
145: void *l_emuldata; /* !: kernel lwp-private data */
1.56.2.9 ad 146: u_int l_cv_signalled; /* c: restarted by cv_signal() */
1.47 ad 147: u_short l_shlocks; /* !: lockdebug: shared locks held */
148: u_short l_exlocks; /* !: lockdebug: excl. locks held */
149: u_short l_locks; /* !: lockmgr count of held locks */
1.56.2.9 ad 150: u_short l_blcnt; /* !: count of kernel_lock held */
1.47 ad 151: int l_pflag; /* !: LWP private flags */
152: int l_dupfd; /* !: side return from cloning devs XXX */
1.50 dsl 153:
154: /* These are only used by 'options SYSCALL_TIMES' */
155: uint32_t l_syscall_time; /* !: time epoch for current syscall */
156: uint64_t *l_syscall_counter; /* !: counter for current process */
1.56.2.7 ad 157: };
1.2 thorpej 158:
1.37 yamt 159: #if !defined(USER_TO_UAREA)
160: #if !defined(UAREA_USER_OFFSET)
161: #define UAREA_USER_OFFSET 0
162: #endif /* !defined(UAREA_USER_OFFSET) */
163: #define USER_TO_UAREA(user) ((vaddr_t)(user) - UAREA_USER_OFFSET)
164: #define UAREA_TO_USER(uarea) ((struct user *)((uarea) + UAREA_USER_OFFSET))
165: #endif /* !defined(UAREA_TO_USER) */
166:
1.2 thorpej 167: LIST_HEAD(lwplist, lwp); /* a list of LWPs */
168:
1.29 christos 169: #ifdef _KERNEL
1.47 ad 170: extern kmutex_t alllwp_mutex; /* Mutex on alllwp */
1.2 thorpej 171: extern struct lwplist alllwp; /* List of all LWPs. */
172:
173: extern struct pool lwp_uc_pool; /* memory pool for LWP startup args */
174:
1.56.2.5 ad 175: extern lwp_t lwp0; /* LWP for proc0 */
1.29 christos 176: #endif
1.2 thorpej 177:
1.47 ad 178: /* These flags are kept in l_flag. */
1.56.2.8 ad 179: #define LW_IDLE 0x00000001 /* Idle lwp. */
1.49 pavel 180: #define LW_INMEM 0x00000004 /* Loaded into memory. */
181: #define LW_SINTR 0x00000080 /* Sleep is interruptible. */
182: #define LW_SYSTEM 0x00000200 /* Kernel thread */
183: #define LW_WSUSPEND 0x00020000 /* Suspend before return to user */
184: #define LW_WCORE 0x00080000 /* Stop for core dump on return to user */
185: #define LW_WEXIT 0x00100000 /* Exit before return to user */
186: #define LW_PENDSIG 0x01000000 /* Pending signal for us */
187: #define LW_CANCELLED 0x02000000 /* tsleep should not sleep */
188: #define LW_WUSERRET 0x04000000 /* Call proc::p_userret on return to user */
189: #define LW_WREBOOT 0x08000000 /* System is rebooting, please suspend */
1.56.2.8 ad 190: #define LW_UNPARKED 0x10000000 /* Unpark op pending */
191: #define LW_RUNNING 0x20000000 /* Active on a CPU (except if LSZOMB) */
1.56.2.9 ad 192: #define LW_INTR 0x40000000 /* Soft interrupt handler */
1.56.2.8 ad 193: #define LW_BOUND 0x80000000 /* Bound to a CPU */
1.47 ad 194:
195: /* The second set of flags is kept in l_pflag. */
196: #define LP_KTRACTIVE 0x00000001 /* Executing ktrace operation */
197: #define LP_KTRCSW 0x00000002 /* ktrace context switch marker */
198: #define LP_KTRCSWUSER 0x00000004 /* ktrace context switch marker */
199: #define LP_UFSCOW 0x00000008 /* UFS: doing copy on write */
200: #define LP_OWEUPC 0x00000010 /* Owe user profiling tick */
1.56.2.3 ad 201: #define LP_MPSAFE 0x00000020 /* Starts life without kernel_lock */
1.47 ad 202:
203: /* The third set is kept in l_prflag. */
204: #define LPR_DETACHED 0x00800000 /* Won't be waited for. */
205:
206: /*
207: * Mask indicating that there is "exceptional" work to be done on return to
208: * user.
209: */
1.49 pavel 210: #define LW_USERRET (LW_WEXIT|LW_PENDSIG|LW_WREBOOT|LW_WSUSPEND|LW_WCORE|\
211: LW_WUSERRET)
1.2 thorpej 212:
213: /*
214: * Status values.
215: *
216: * A note about SRUN and SONPROC: SRUN indicates that a process is
217: * runnable but *not* yet running, i.e. is on a run queue. SONPROC
218: * indicates that the process is actually executing on a CPU, i.e.
219: * it is no longer on a run queue.
220: */
1.47 ad 221: #define LSIDL 1 /* Process being created by fork. */
222: #define LSRUN 2 /* Currently runnable. */
223: #define LSSLEEP 3 /* Sleeping on an address. */
224: #define LSSTOP 4 /* Process debugging or suspension. */
225: #define LSZOMB 5 /* Awaiting collection by parent. */
1.49 pavel 226: /* unused, for source compatibility with NetBSD 4.0 and earlier. */
227: #define LSDEAD 6 /* Process is almost a zombie. */
1.2 thorpej 228: #define LSONPROC 7 /* Process is currently on a CPU. */
229: #define LSSUSPENDED 8 /* Not running, not signalable. */
230:
231: #ifdef _KERNEL
1.38 ad 232: #define LWP_CACHE_CREDS(l, p) \
233: do { \
234: if ((l)->l_cred != (p)->p_cred) \
235: lwp_update_creds(l); \
236: } while (/* CONSTCOND */ 0)
1.2 thorpej 237:
1.56.2.8 ad 238: void lwp_startup(lwp_t *, lwp_t *);
1.2 thorpej 239:
1.56.2.5 ad 240: int lwp_locked(lwp_t *, kmutex_t *);
241: void lwp_setlock(lwp_t *, kmutex_t *);
242: void lwp_unlock_to(lwp_t *, kmutex_t *);
243: void lwp_lock_retry(lwp_t *, kmutex_t *);
244: void lwp_relock(lwp_t *, kmutex_t *);
245: int lwp_trylock(lwp_t *);
246: void lwp_addref(lwp_t *);
247: void lwp_delref(lwp_t *);
248: void lwp_drainrefs(lwp_t *);
1.2 thorpej 249:
250: /* Flags for _lwp_wait1 */
251: #define LWPWAIT_EXITCONTROL 0x00000001
1.42 thorpej 252: void lwpinit(void);
1.56.2.5 ad 253: int lwp_wait1(lwp_t *, lwpid_t, lwpid_t *, int);
254: void lwp_continue(lwp_t *);
255: void cpu_setfunc(lwp_t *, void (*)(void *), void *);
1.2 thorpej 256: void startlwp(void *);
1.56.2.5 ad 257: void upcallret(lwp_t *);
1.56.2.8 ad 258: void lwp_exit(lwp_t *) __attribute__((__noreturn__));
259: void lwp_exit_switchaway(lwp_t *);
260: lwp_t *proc_representative_lwp(struct proc *, int *, int);
1.56.2.5 ad 261: int lwp_suspend(lwp_t *, lwp_t *);
262: int lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
263: void lwp_update_creds(lwp_t *);
1.56.2.8 ad 264: lwp_t *lwp_find(struct proc *, int);
1.56.2.5 ad 265: void lwp_userret(lwp_t *);
266: void lwp_need_userret(lwp_t *);
267: void lwp_free(lwp_t *, bool, bool);
1.47 ad 268: void lwp_sys_init(void);
1.42 thorpej 269:
270: int lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
271: void lwp_specific_key_delete(specificdata_key_t);
1.56.2.5 ad 272: void lwp_initspecific(lwp_t *);
273: void lwp_finispecific(lwp_t *);
274: void *lwp_getspecific(specificdata_key_t);
1.46 hannken 275: #if defined(_LWP_API_PRIVATE)
1.56.2.5 ad 276: void *_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t);
1.46 hannken 277: #endif
1.44 thorpej 278: void lwp_setspecific(specificdata_key_t, void *);
1.47 ad 279:
280: /*
281: * Lock an LWP. XXXLKM
282: */
283: static inline void
1.56.2.5 ad 284: lwp_lock(lwp_t *l)
1.47 ad 285: {
286: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
287: kmutex_t *old;
288:
289: mutex_spin_enter(old = l->l_mutex);
290:
291: /*
292: * mutex_enter() will have posted a read barrier. Re-test
293: * l->l_mutex. If it has changed, we need to try again.
294: */
295: if (__predict_false(l->l_mutex != old))
296: lwp_lock_retry(l, old);
297: #else
298: mutex_spin_enter(l->l_mutex);
299: #endif
300: }
301:
302: /*
303: * Unlock an LWP. XXXLKM
304: */
305: static inline void
1.56.2.5 ad 306: lwp_unlock(lwp_t *l)
1.47 ad 307: {
1.56.2.1 ad 308: KASSERT(mutex_owned(l->l_mutex));
1.47 ad 309:
310: mutex_spin_exit(l->l_mutex);
311: }
312:
313: static inline void
1.56.2.5 ad 314: lwp_changepri(lwp_t *l, pri_t pri)
1.47 ad 315: {
1.56.2.1 ad 316: KASSERT(mutex_owned(l->l_mutex));
1.47 ad 317:
1.52 yamt 318: if (l->l_priority == pri)
319: return;
320:
1.47 ad 321: (*l->l_syncobj->sobj_changepri)(l, pri);
322: }
323:
324: static inline void
1.56.2.5 ad 325: lwp_lendpri(lwp_t *l, pri_t pri)
1.52 yamt 326: {
1.56.2.1 ad 327: KASSERT(mutex_owned(l->l_mutex));
1.52 yamt 328:
329: if (l->l_inheritedprio == pri)
330: return;
331:
332: (*l->l_syncobj->sobj_lendpri)(l, pri);
333: }
334:
335: static inline void
1.56.2.5 ad 336: lwp_unsleep(lwp_t *l)
1.47 ad 337: {
1.56.2.1 ad 338: KASSERT(mutex_owned(l->l_mutex));
1.47 ad 339:
340: (*l->l_syncobj->sobj_unsleep)(l);
341: }
342:
1.52 yamt 343: static inline int
1.56.2.5 ad 344: lwp_eprio(lwp_t *l)
1.52 yamt 345: {
346:
1.56.2.9 ad 347: return MAX(l->l_inheritedprio, l->l_priority);
1.52 yamt 348: }
349:
1.56.2.5 ad 350: int newlwp(lwp_t *, struct proc *, vaddr_t, bool, int,
351: void *, size_t, void (*)(void *), void *, lwp_t **);
1.47 ad 352:
353: /*
1.56.2.8 ad 354: * We should provide real stubs for the below that LKMs can use.
1.47 ad 355: */
356:
357: static inline void
1.56.2.8 ad 358: spc_lock(struct cpu_info *ci)
1.47 ad 359: {
1.56.2.8 ad 360: mutex_spin_enter(ci->ci_schedstate.spc_mutex);
1.47 ad 361: }
362:
363: static inline void
1.56.2.8 ad 364: spc_unlock(struct cpu_info *ci)
1.47 ad 365: {
1.56.2.8 ad 366: mutex_spin_exit(ci->ci_schedstate.spc_mutex);
1.47 ad 367: }
368:
1.56.2.8 ad 369: #endif /* _KERNEL */
1.2 thorpej 370:
371: /* Flags for _lwp_create(), as per Solaris. */
372:
373: #define LWP_DETACHED 0x00000040
374: #define LWP_SUSPENDED 0x00000080
375:
376: #endif /* !_SYS_LWP_H_ */
CVSweb <webmaster@jp.NetBSD.org>