Annotation of src/sys/sys/lwp.h, Revision 1.56.2.1
1.56.2.1! ad 1: /* $NetBSD: lwp.h,v 1.56 2007/03/02 15:57:06 skd Exp $ */
1.2 thorpej 2:
3: /*-
1.47 ad 4: * Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
1.2 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.47 ad 8: * by Nathan J. Williams and Andrew Doran.
1.2 thorpej 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
1.31 christos 39: #ifndef _SYS_LWP_H_
40: #define _SYS_LWP_H_
1.2 thorpej 41:
1.47 ad 42: #include <sys/time.h>
43: #include <sys/queue.h>
44: #include <sys/callout.h>
45: #include <sys/mutex.h>
46: #include <sys/condvar.h>
47: #include <sys/signalvar.h>
48: #include <sys/specificdata.h>
1.53 yamt 49: #include <sys/syncobj.h>
1.2 thorpej 50:
51: #if defined(_KERNEL)
52: #include <machine/cpu.h> /* curcpu() and cpu_info */
53: #endif
1.47 ad 54:
1.2 thorpej 55: #include <machine/proc.h> /* Machine-dependent proc substruct. */
56:
1.47 ad 57: /*
58: * Lightweight process. Field markings and the corresponding locks:
59: *
60: * a: proclist_mutex
61: * l: *l_mutex
62: * p: l_proc->p_smutex
63: * s: sched_mutex, which may or may not be referenced by l_mutex
64: * (: unlocked, stable
65: * !: unlocked, may only be safely accessed by the LWP itself
66: * ?: undecided
67: *
68: * Fields are clustered together by usage (to increase the likelyhood
69: * of cache hits) and by size (to reduce dead space in the structure).
70: */
1.2 thorpej 71: struct lwp {
1.47 ad 72: /* Scheduling and overall state */
73: struct lwp *l_forw; /* s: run queue */
74: struct lwp *l_back; /* s: run queue */
75: struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
76: kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */
77: struct user *l_addr; /* l: KVA of u-area (PROC ONLY) */
78: struct mdlwp l_md; /* l: machine-dependent fields. */
79: int l_flag; /* l: misc flag values */
80: int l_stat; /* l: overall LWP status */
81: struct timeval l_rtime; /* l: real time */
82: u_int l_swtime; /* l: time swapped in or out */
83: int l_holdcnt; /* l: if non-zero, don't swap */
84: int l_biglocks; /* l: biglock count before sleep */
1.54 yamt 85: pri_t l_priority; /* l: process priority */
86: pri_t l_usrpri; /* l: user-priority */
87: pri_t l_inheritedprio;/* l: inherited priority */
1.52 yamt 88: SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
1.47 ad 89: long l_nvcsw; /* l: voluntary context switches */
90: long l_nivcsw; /* l: involuntary context switches */
91:
92: /* Synchronisation */
93: struct turnstile *l_ts; /* l: current turnstile */
94: struct syncobj *l_syncobj; /* l: sync object operations set */
95: TAILQ_ENTRY(lwp) l_sleepchain; /* l: sleep queue */
96: wchan_t l_wchan; /* l: sleep address */
97: const char *l_wmesg; /* l: reason for sleep */
98: struct sleepq *l_sleepq; /* l: current sleep queue */
99: int l_sleeperr; /* !: error before unblock */
100: u_int l_slptime; /* l: time since last blocked */
101: struct callout l_tsleep_ch; /* !: callout for tsleep */
102:
103: /* Process level and global state */
104: LIST_ENTRY(lwp) l_list; /* a: entry on list of all LWPs */
105: void *l_ctxlink; /* p: uc_link {get,set}context */
106: struct proc *l_proc; /* p: parent process */
107: LIST_ENTRY(lwp) l_sibling; /* p: entry on proc's list of LWPs */
108: int l_prflag; /* p: process level flags */
109: u_int l_refcnt; /* p: reference count on this LWP */
110: lwpid_t l_lid; /* (: LWP identifier; local to proc */
111:
112: /* Signals */
113: int l_sigrestore; /* p: need to restore old sig mask */
114: sigset_t l_sigwaitset; /* p: signals being waited for */
115: kcondvar_t l_sigcv; /* p: for sigsuspend() */
116: struct ksiginfo *l_sigwaited; /* p: delivered signals from set */
117: sigpend_t *l_sigpendset; /* p: XXX issignal()/postsig() baton */
118: LIST_ENTRY(lwp) l_sigwaiter; /* p: chain on list of waiting LWPs */
119: stack_t l_sigstk; /* p: sp & on stack state variable */
120: sigset_t l_sigmask; /* p: signal mask */
121: sigpend_t l_sigpend; /* p: signals to this LWP */
122: sigset_t l_sigoldmask; /* p: mask for sigpause */
1.2 thorpej 123:
1.47 ad 124: /* Private data */
1.42 thorpej 125: specificdata_reference
1.47 ad 126: l_specdataref; /* !: subsystem lwp-specific data */
127: union {
128: struct timeval tv;
129: struct timespec ts;
130: } l_ktrcsw; /* !: for ktrace CSW trace XXX */
131: void *l_private; /* !: svr4-style lwp-private data */
132: struct kauth_cred *l_cred; /* !: cached credentials */
133: void *l_emuldata; /* !: kernel lwp-private data */
134: u_short l_acflag; /* !: accounting flags */
135: u_short l_shlocks; /* !: lockdebug: shared locks held */
136: u_short l_exlocks; /* !: lockdebug: excl. locks held */
137: u_short l_locks; /* !: lockmgr count of held locks */
138: int l_pflag; /* !: LWP private flags */
139: int l_dupfd; /* !: side return from cloning devs XXX */
1.50 dsl 140:
141: /* These are only used by 'options SYSCALL_TIMES' */
142: uint32_t l_syscall_time; /* !: time epoch for current syscall */
143: uint64_t *l_syscall_counter; /* !: counter for current process */
1.2 thorpej 144: };
145:
1.37 yamt 146: #if !defined(USER_TO_UAREA)
147: #if !defined(UAREA_USER_OFFSET)
148: #define UAREA_USER_OFFSET 0
149: #endif /* !defined(UAREA_USER_OFFSET) */
150: #define USER_TO_UAREA(user) ((vaddr_t)(user) - UAREA_USER_OFFSET)
151: #define UAREA_TO_USER(uarea) ((struct user *)((uarea) + UAREA_USER_OFFSET))
152: #endif /* !defined(UAREA_TO_USER) */
153:
1.2 thorpej 154: LIST_HEAD(lwplist, lwp); /* a list of LWPs */
155:
1.29 christos 156: #ifdef _KERNEL
1.47 ad 157: extern kmutex_t sched_mutex; /* Mutex on global run queue */
158: extern kmutex_t alllwp_mutex; /* Mutex on alllwp */
1.2 thorpej 159: extern struct lwplist alllwp; /* List of all LWPs. */
160:
161: extern struct pool lwp_uc_pool; /* memory pool for LWP startup args */
162:
163: extern struct lwp lwp0; /* LWP for proc0 */
1.29 christos 164: #endif
1.2 thorpej 165:
1.47 ad 166: /* These flags are kept in l_flag. */
1.49 pavel 167: #define LW_INMEM 0x00000004 /* Loaded into memory. */
168: #define LW_SELECT 0x00000040 /* Selecting; wakeup/waiting danger. */
169: #define LW_SINTR 0x00000080 /* Sleep is interruptible. */
170: #define LW_SYSTEM 0x00000200 /* Kernel thread */
171: #define LW_WSUSPEND 0x00020000 /* Suspend before return to user */
172: #define LW_WCORE 0x00080000 /* Stop for core dump on return to user */
173: #define LW_WEXIT 0x00100000 /* Exit before return to user */
174: #define LW_PENDSIG 0x01000000 /* Pending signal for us */
175: #define LW_CANCELLED 0x02000000 /* tsleep should not sleep */
176: #define LW_WUSERRET 0x04000000 /* Call proc::p_userret on return to user */
177: #define LW_WREBOOT 0x08000000 /* System is rebooting, please suspend */
1.56 skd 178: #define LW_UNPARKED 0x10000000 /* unpark op pending */
1.47 ad 179:
180: /* The second set of flags is kept in l_pflag. */
181: #define LP_KTRACTIVE 0x00000001 /* Executing ktrace operation */
182: #define LP_KTRCSW 0x00000002 /* ktrace context switch marker */
183: #define LP_KTRCSWUSER 0x00000004 /* ktrace context switch marker */
184: #define LP_UFSCOW 0x00000008 /* UFS: doing copy on write */
185: #define LP_OWEUPC 0x00000010 /* Owe user profiling tick */
186:
187: /* The third set is kept in l_prflag. */
188: #define LPR_DETACHED 0x00800000 /* Won't be waited for. */
189:
190: /*
191: * Mask indicating that there is "exceptional" work to be done on return to
192: * user.
193: */
1.49 pavel 194: #define LW_USERRET (LW_WEXIT|LW_PENDSIG|LW_WREBOOT|LW_WSUSPEND|LW_WCORE|\
195: LW_WUSERRET)
1.2 thorpej 196:
197: /*
198: * Status values.
199: *
200: * A note about SRUN and SONPROC: SRUN indicates that a process is
201: * runnable but *not* yet running, i.e. is on a run queue. SONPROC
202: * indicates that the process is actually executing on a CPU, i.e.
203: * it is no longer on a run queue.
204: */
1.47 ad 205: #define LSIDL 1 /* Process being created by fork. */
206: #define LSRUN 2 /* Currently runnable. */
207: #define LSSLEEP 3 /* Sleeping on an address. */
208: #define LSSTOP 4 /* Process debugging or suspension. */
209: #define LSZOMB 5 /* Awaiting collection by parent. */
1.49 pavel 210: /* unused, for source compatibility with NetBSD 4.0 and earlier. */
211: #define LSDEAD 6 /* Process is almost a zombie. */
1.2 thorpej 212: #define LSONPROC 7 /* Process is currently on a CPU. */
213: #define LSSUSPENDED 8 /* Not running, not signalable. */
214:
215: #ifdef _KERNEL
216: #define PHOLD(l) \
217: do { \
1.49 pavel 218: if ((l)->l_holdcnt++ == 0 && ((l)->l_flag & LW_INMEM) == 0) \
1.2 thorpej 219: uvm_swapin(l); \
220: } while (/* CONSTCOND */ 0)
221: #define PRELE(l) (--(l)->l_holdcnt)
222:
1.38 ad 223: #define LWP_CACHE_CREDS(l, p) \
224: do { \
225: if ((l)->l_cred != (p)->p_cred) \
226: lwp_update_creds(l); \
227: } while (/* CONSTCOND */ 0)
1.2 thorpej 228:
1.47 ad 229: void preempt (void);
1.2 thorpej 230: int mi_switch (struct lwp *, struct lwp *);
231: #ifndef remrunqueue
232: void remrunqueue (struct lwp *);
233: #endif
234: void resetpriority (struct lwp *);
235: void setrunnable (struct lwp *);
236: #ifndef setrunqueue
237: void setrunqueue (struct lwp *);
238: #endif
239: #ifndef nextrunqueue
240: struct lwp *nextrunqueue(void);
241: #endif
242: void unsleep (struct lwp *);
243: #ifndef cpu_switch
244: int cpu_switch (struct lwp *, struct lwp *);
245: #endif
246: #ifndef cpu_switchto
247: void cpu_switchto (struct lwp *, struct lwp *);
248: #endif
249:
1.47 ad 250: int lwp_locked(struct lwp *, kmutex_t *);
251: void lwp_setlock(struct lwp *, kmutex_t *);
252: void lwp_unlock_to(struct lwp *, kmutex_t *);
253: void lwp_lock_retry(struct lwp *, kmutex_t *);
254: void lwp_relock(struct lwp *, kmutex_t *);
1.52 yamt 255: int lwp_trylock(struct lwp *);
1.47 ad 256: void lwp_addref(struct lwp *);
257: void lwp_delref(struct lwp *);
258: void lwp_drainrefs(struct lwp *);
1.2 thorpej 259:
260: /* Flags for _lwp_wait1 */
261: #define LWPWAIT_EXITCONTROL 0x00000001
1.42 thorpej 262: void lwpinit(void);
1.2 thorpej 263: int lwp_wait1(struct lwp *, lwpid_t, lwpid_t *, int);
264: void lwp_continue(struct lwp *);
265: void cpu_setfunc(struct lwp *, void (*)(void *), void *);
266: void startlwp(void *);
267: void upcallret(struct lwp *);
1.47 ad 268: void lwp_exit(struct lwp *);
269: void lwp_exit2(struct lwp *);
270: struct lwp *proc_representative_lwp(struct proc *, int *, int);
271: int lwp_suspend(struct lwp *, struct lwp *);
1.34 cube 272: int lwp_create1(struct lwp *, const void *, size_t, u_long, lwpid_t *);
1.38 ad 273: void lwp_update_creds(struct lwp *);
1.47 ad 274: struct lwp *lwp_find(struct proc *, int);
275: void lwp_userret(struct lwp *);
276: void lwp_need_userret(struct lwp *);
277: void lwp_free(struct lwp *, int, int);
278: void lwp_sys_init(void);
1.42 thorpej 279:
280: int lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
281: void lwp_specific_key_delete(specificdata_key_t);
1.43 christos 282: void lwp_initspecific(struct lwp *);
1.45 thorpej 283: void lwp_finispecific(struct lwp *);
1.44 thorpej 284: void * lwp_getspecific(specificdata_key_t);
1.46 hannken 285: #if defined(_LWP_API_PRIVATE)
286: void * _lwp_getspecific_by_lwp(struct lwp *, specificdata_key_t);
287: #endif
1.44 thorpej 288: void lwp_setspecific(specificdata_key_t, void *);
1.47 ad 289:
290: /*
291: * Lock an LWP. XXXLKM
292: */
293: static inline void
294: lwp_lock(struct lwp *l)
295: {
296: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
297: kmutex_t *old;
298:
299: mutex_spin_enter(old = l->l_mutex);
300:
301: /*
302: * mutex_enter() will have posted a read barrier. Re-test
303: * l->l_mutex. If it has changed, we need to try again.
304: */
305: if (__predict_false(l->l_mutex != old))
306: lwp_lock_retry(l, old);
307: #else
308: mutex_spin_enter(l->l_mutex);
309: #endif
310: }
311:
312: /*
313: * Unlock an LWP. XXXLKM
314: */
315: static inline void
316: lwp_unlock(struct lwp *l)
317: {
1.56.2.1! ad 318: KASSERT(mutex_owned(l->l_mutex));
1.47 ad 319:
320: mutex_spin_exit(l->l_mutex);
321: }
322:
323: static inline void
1.54 yamt 324: lwp_changepri(struct lwp *l, pri_t pri)
1.47 ad 325: {
1.56.2.1! ad 326: KASSERT(mutex_owned(l->l_mutex));
1.47 ad 327:
1.52 yamt 328: if (l->l_priority == pri)
329: return;
330:
1.47 ad 331: (*l->l_syncobj->sobj_changepri)(l, pri);
332: }
333:
334: static inline void
1.54 yamt 335: lwp_lendpri(struct lwp *l, pri_t pri)
1.52 yamt 336: {
1.56.2.1! ad 337: KASSERT(mutex_owned(l->l_mutex));
1.52 yamt 338:
339: if (l->l_inheritedprio == pri)
340: return;
341:
342: (*l->l_syncobj->sobj_lendpri)(l, pri);
343: }
344:
345: static inline void
1.47 ad 346: lwp_unsleep(struct lwp *l)
347: {
1.56.2.1! ad 348: KASSERT(mutex_owned(l->l_mutex));
1.47 ad 349:
350: (*l->l_syncobj->sobj_unsleep)(l);
351: }
352:
1.52 yamt 353: static inline int
354: lwp_eprio(struct lwp *l)
355: {
356:
357: return MIN(l->l_inheritedprio, l->l_priority);
358: }
359:
1.51 matt 360: int newlwp(struct lwp *, struct proc *, vaddr_t, bool, int,
1.47 ad 361: void *, size_t, void (*)(void *), void *, struct lwp **);
362:
363: /*
364: * Once we have per-CPU run queues and a modular scheduler interface,
365: * we should provide real stubs for the below that LKMs can use.
366: */
367: extern kmutex_t sched_mutex;
368:
369: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
370:
371: static inline void
372: sched_lock(const int heldmutex)
373: {
374: (void)heldmutex;
375: mutex_enter(&sched_mutex);
376: }
377:
378: static inline void
379: sched_unlock(const int heldmutex)
380: {
381: (void)heldmutex;
382: mutex_exit(&sched_mutex);
383: }
384:
385: #else /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
386:
387: static inline void
388: sched_lock(const int heldmutex)
389: {
390: if (!heldmutex)
391: mutex_enter(&sched_mutex);
392: }
393:
394: static inline void
395: sched_unlock(int heldmutex)
396: {
397: if (!heldmutex)
398: mutex_exit(&sched_mutex);
399: }
400:
401: #endif /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
402:
403: void sched_lock_idle(void);
404: void sched_unlock_idle(void);
405:
1.2 thorpej 406: #endif /* _KERNEL */
407:
408: /* Flags for _lwp_create(), as per Solaris. */
409:
410: #define LWP_DETACHED 0x00000040
411: #define LWP_SUSPENDED 0x00000080
412:
413: #endif /* !_SYS_LWP_H_ */
CVSweb <webmaster@jp.NetBSD.org>