Annotation of src/sys/sys/lwp.h, Revision 1.105
1.105 ! ad 1: /* $NetBSD: lwp.h,v 1.104 2008/07/02 19:44:10 rmind Exp $ */
1.2 thorpej 2:
3: /*-
1.80 ad 4: * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
1.2 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.47 ad 8: * by Nathan J. Williams and Andrew Doran.
1.2 thorpej 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
1.31 christos 32: #ifndef _SYS_LWP_H_
33: #define _SYS_LWP_H_
1.2 thorpej 34:
1.47 ad 35: #include <sys/time.h>
36: #include <sys/queue.h>
37: #include <sys/callout.h>
38: #include <sys/mutex.h>
39: #include <sys/condvar.h>
40: #include <sys/signalvar.h>
1.78 rmind 41: #include <sys/sched.h>
1.47 ad 42: #include <sys/specificdata.h>
1.53 yamt 43: #include <sys/syncobj.h>
1.85 ad 44: #include <sys/resource.h>
1.2 thorpej 45:
46: #if defined(_KERNEL)
47: #include <machine/cpu.h> /* curcpu() and cpu_info */
48: #endif
1.47 ad 49:
1.2 thorpej 50: #include <machine/proc.h> /* Machine-dependent proc substruct. */
51:
1.47 ad 52: /*
53: * Lightweight process. Field markings and the corresponding locks:
54: *
55: * a: proclist_mutex
1.58 ad 56: * c: condition variable interlock, passed to cv_wait()
1.47 ad 57: * l: *l_mutex
1.88 ad 58: * p: l_proc->p_lock
1.60 yamt 59: * s: spc_mutex, which may or may not be referenced by l_mutex
1.83 ad 60: * S: l_selcpu->sc_lock
1.47 ad 61: * (: unlocked, stable
1.68 ad 62: * !: unlocked, may only be reliably accessed by the LWP itself
1.47 ad 63: * ?: undecided
64: *
65: * Fields are clustered together by usage (to increase the likelyhood
66: * of cache hits) and by size (to reduce dead space in the structure).
67: */
1.94 ad 68: struct lockdebug;
69:
1.60 yamt 70: struct lwp {
1.47 ad 71: /* Scheduling and overall state */
1.60 yamt 72: TAILQ_ENTRY(lwp) l_runq; /* s: run queue */
73: void *l_sched_info; /* s: Scheduler-specific structure */
1.47 ad 74: struct cpu_info *volatile l_cpu;/* s: CPU we're on if LSONPROC */
75: kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */
1.70 ad 76: int l_ctxswtch; /* l: performing a context switch */
1.47 ad 77: struct user *l_addr; /* l: KVA of u-area (PROC ONLY) */
78: struct mdlwp l_md; /* l: machine-dependent fields. */
79: int l_flag; /* l: misc flag values */
80: int l_stat; /* l: overall LWP status */
1.73 yamt 81: struct bintime l_rtime; /* l: real time */
82: struct bintime l_stime; /* l: start time (while ONPROC) */
1.47 ad 83: u_int l_swtime; /* l: time swapped in or out */
1.75 ad 84: u_int l_holdcnt; /* l: if non-zero, don't swap */
1.86 ad 85: u_int l_rticks; /* l: Saved start time of run */
86: u_int l_rticksum; /* l: Sum of ticks spent running */
1.96 rmind 87: u_int l_slpticks; /* l: Saved start time of sleep */
88: u_int l_slpticksum; /* l: Sum of ticks spent sleeping */
1.47 ad 89: int l_biglocks; /* l: biglock count before sleep */
1.68 ad 90: int l_class; /* l: scheduling class */
91: int l_kpriority; /* !: has kernel priority boost */
1.71 ad 92: pri_t l_kpribase; /* !: kernel priority base level */
1.68 ad 93: pri_t l_priority; /* l: scheduler priority */
1.54 yamt 94: pri_t l_inheritedprio;/* l: inherited priority */
1.52 yamt 95: SLIST_HEAD(, turnstile) l_pi_lenders; /* l: ts lending us priority */
1.60 yamt 96: uint64_t l_ncsw; /* l: total context switches */
97: uint64_t l_nivcsw; /* l: involuntary context switches */
1.103 rmind 98: u_int l_cpticks; /* (: Ticks of CPU time */
99: fixpt_t l_pctcpu; /* p: %cpu during l_swtime */
1.68 ad 100: fixpt_t l_estcpu; /* l: cpu time for SCHED_4BSD */
1.77 rmind 101: psetid_t l_psid; /* l: assigned processor-set ID */
102: struct cpu_info *l_target_cpu; /* l: target CPU to migrate */
1.61 ad 103: kmutex_t l_swaplock; /* l: lock to prevent swapping */
1.69 ad 104: struct lwpctl *l_lwpctl; /* p: lwpctl block kernel address */
105: struct lcpage *l_lcpage; /* p: lwpctl containing page */
1.102 christos 106: kcpuset_t *l_affinity; /* l: CPU set for affinity */
1.47 ad 107:
108: /* Synchronisation */
109: struct turnstile *l_ts; /* l: current turnstile */
110: struct syncobj *l_syncobj; /* l: sync object operations set */
111: TAILQ_ENTRY(lwp) l_sleepchain; /* l: sleep queue */
112: wchan_t l_wchan; /* l: sleep address */
113: const char *l_wmesg; /* l: reason for sleep */
114: struct sleepq *l_sleepq; /* l: current sleep queue */
115: int l_sleeperr; /* !: error before unblock */
116: u_int l_slptime; /* l: time since last blocked */
1.68 ad 117: callout_t l_timeout_ch; /* !: callout for tsleep */
1.47 ad 118:
1.61 ad 119: /* Process level and global state, misc. */
1.47 ad 120: LIST_ENTRY(lwp) l_list; /* a: entry on list of all LWPs */
121: void *l_ctxlink; /* p: uc_link {get,set}context */
122: struct proc *l_proc; /* p: parent process */
123: LIST_ENTRY(lwp) l_sibling; /* p: entry on proc's list of LWPs */
1.57 ad 124: lwpid_t l_waiter; /* p: first LWP waiting on us */
125: lwpid_t l_waitingfor; /* p: specific LWP we are waiting on */
1.47 ad 126: int l_prflag; /* p: process level flags */
127: u_int l_refcnt; /* p: reference count on this LWP */
128: lwpid_t l_lid; /* (: LWP identifier; local to proc */
1.61 ad 129: int l_selflag; /* S: select() flags */
130: SLIST_HEAD(,selinfo) l_selwait; /* S: descriptors waited on */
1.83 ad 131: struct selcpu *l_selcpu; /* !: associated per-CPU select data */
1.61 ad 132: char *l_name; /* (: name, optional */
1.47 ad 133:
134: /* Signals */
135: int l_sigrestore; /* p: need to restore old sig mask */
136: sigset_t l_sigwaitset; /* p: signals being waited for */
137: kcondvar_t l_sigcv; /* p: for sigsuspend() */
138: struct ksiginfo *l_sigwaited; /* p: delivered signals from set */
139: sigpend_t *l_sigpendset; /* p: XXX issignal()/postsig() baton */
140: LIST_ENTRY(lwp) l_sigwaiter; /* p: chain on list of waiting LWPs */
141: stack_t l_sigstk; /* p: sp & on stack state variable */
142: sigset_t l_sigmask; /* p: signal mask */
143: sigpend_t l_sigpend; /* p: signals to this LWP */
144: sigset_t l_sigoldmask; /* p: mask for sigpause */
1.2 thorpej 145:
1.47 ad 146: /* Private data */
1.42 thorpej 147: specificdata_reference
1.47 ad 148: l_specdataref; /* !: subsystem lwp-specific data */
149: union {
150: struct timeval tv;
151: struct timespec ts;
152: } l_ktrcsw; /* !: for ktrace CSW trace XXX */
153: void *l_private; /* !: svr4-style lwp-private data */
1.62 ad 154: struct lwp *l_switchto; /* !: mi_switch: switch to this LWP */
1.47 ad 155: struct kauth_cred *l_cred; /* !: cached credentials */
1.81 ad 156: struct filedesc *l_fd; /* !: cached copy of proc::p_fd */
1.47 ad 157: void *l_emuldata; /* !: kernel lwp-private data */
1.61 ad 158: u_int l_cv_signalled; /* c: restarted by cv_signal() */
1.47 ad 159: u_short l_shlocks; /* !: lockdebug: shared locks held */
160: u_short l_exlocks; /* !: lockdebug: excl. locks held */
1.90 ad 161: u_short l_unused; /* !: unused */
1.61 ad 162: u_short l_blcnt; /* !: count of kernel_lock held */
1.90 ad 163: int l_nopreempt; /* !: don't preempt me! */
164: u_int l_dopreempt; /* s: kernel preemption pending */
1.47 ad 165: int l_pflag; /* !: LWP private flags */
166: int l_dupfd; /* !: side return from cloning devs XXX */
1.85 ad 167: struct rusage l_ru; /* !: accounting information */
1.90 ad 168: uint64_t l_pfailtime; /* !: for kernel preemption */
169: uintptr_t l_pfailaddr; /* !: for kernel preemption */
170: uintptr_t l_pfaillock; /* !: for kernel preemption */
1.94 ad 171: _TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */
1.50 dsl 172:
173: /* These are only used by 'options SYSCALL_TIMES' */
174: uint32_t l_syscall_time; /* !: time epoch for current syscall */
175: uint64_t *l_syscall_counter; /* !: counter for current process */
1.60 yamt 176: };
1.2 thorpej 177:
1.37 yamt 178: #if !defined(USER_TO_UAREA)
179: #if !defined(UAREA_USER_OFFSET)
180: #define UAREA_USER_OFFSET 0
181: #endif /* !defined(UAREA_USER_OFFSET) */
182: #define USER_TO_UAREA(user) ((vaddr_t)(user) - UAREA_USER_OFFSET)
183: #define UAREA_TO_USER(uarea) ((struct user *)((uarea) + UAREA_USER_OFFSET))
184: #endif /* !defined(UAREA_TO_USER) */
185:
1.2 thorpej 186: LIST_HEAD(lwplist, lwp); /* a list of LWPs */
187:
1.29 christos 188: #ifdef _KERNEL
1.47 ad 189: extern kmutex_t alllwp_mutex; /* Mutex on alllwp */
1.2 thorpej 190: extern struct lwplist alllwp; /* List of all LWPs. */
191:
192: extern struct pool lwp_uc_pool; /* memory pool for LWP startup args */
193:
1.58 ad 194: extern lwp_t lwp0; /* LWP for proc0 */
1.29 christos 195: #endif
1.2 thorpej 196:
1.47 ad 197: /* These flags are kept in l_flag. */
1.60 yamt 198: #define LW_IDLE 0x00000001 /* Idle lwp. */
1.49 pavel 199: #define LW_INMEM 0x00000004 /* Loaded into memory. */
200: #define LW_SINTR 0x00000080 /* Sleep is interruptible. */
201: #define LW_SYSTEM 0x00000200 /* Kernel thread */
202: #define LW_WSUSPEND 0x00020000 /* Suspend before return to user */
1.96 rmind 203: #define LW_BATCH 0x00040000 /* LWP tends to hog CPU */
1.49 pavel 204: #define LW_WCORE 0x00080000 /* Stop for core dump on return to user */
205: #define LW_WEXIT 0x00100000 /* Exit before return to user */
1.77 rmind 206: #define LW_AFFINITY 0x00200000 /* Affinity is assigned to the thread */
1.49 pavel 207: #define LW_PENDSIG 0x01000000 /* Pending signal for us */
208: #define LW_CANCELLED 0x02000000 /* tsleep should not sleep */
209: #define LW_WUSERRET 0x04000000 /* Call proc::p_userret on return to user */
210: #define LW_WREBOOT 0x08000000 /* System is rebooting, please suspend */
1.60 yamt 211: #define LW_UNPARKED 0x10000000 /* Unpark op pending */
1.47 ad 212:
213: /* The second set of flags is kept in l_pflag. */
214: #define LP_KTRACTIVE 0x00000001 /* Executing ktrace operation */
215: #define LP_KTRCSW 0x00000002 /* ktrace context switch marker */
216: #define LP_KTRCSWUSER 0x00000004 /* ktrace context switch marker */
217: #define LP_OWEUPC 0x00000010 /* Owe user profiling tick */
1.61 ad 218: #define LP_MPSAFE 0x00000020 /* Starts life without kernel_lock */
1.68 ad 219: #define LP_INTR 0x00000040 /* Soft interrupt handler */
1.76 ad 220: #define LP_SYSCTLWRITE 0x00000080 /* sysctl write lock held */
1.97 ad 221: #define LP_TIMEINTR 0x00010000 /* Time this soft interrupt */
222: #define LP_RUNNING 0x20000000 /* Active on a CPU */
1.87 ad 223: #define LP_BOUND 0x80000000 /* Bound to a CPU */
1.47 ad 224:
225: /* The third set is kept in l_prflag. */
226: #define LPR_DETACHED 0x00800000 /* Won't be waited for. */
1.82 ad 227: #define LPR_CRMOD 0x00000100 /* Credentials modified */
1.47 ad 228:
229: /*
230: * Mask indicating that there is "exceptional" work to be done on return to
231: * user.
232: */
1.49 pavel 233: #define LW_USERRET (LW_WEXIT|LW_PENDSIG|LW_WREBOOT|LW_WSUSPEND|LW_WCORE|\
234: LW_WUSERRET)
1.2 thorpej 235:
236: /*
237: * Status values.
238: *
239: * A note about SRUN and SONPROC: SRUN indicates that a process is
240: * runnable but *not* yet running, i.e. is on a run queue. SONPROC
241: * indicates that the process is actually executing on a CPU, i.e.
242: * it is no longer on a run queue.
243: */
1.47 ad 244: #define LSIDL 1 /* Process being created by fork. */
245: #define LSRUN 2 /* Currently runnable. */
246: #define LSSLEEP 3 /* Sleeping on an address. */
247: #define LSSTOP 4 /* Process debugging or suspension. */
248: #define LSZOMB 5 /* Awaiting collection by parent. */
1.49 pavel 249: /* unused, for source compatibility with NetBSD 4.0 and earlier. */
250: #define LSDEAD 6 /* Process is almost a zombie. */
1.2 thorpej 251: #define LSONPROC 7 /* Process is currently on a CPU. */
252: #define LSSUSPENDED 8 /* Not running, not signalable. */
253:
254: #ifdef _KERNEL
1.38 ad 255: #define LWP_CACHE_CREDS(l, p) \
256: do { \
1.82 ad 257: (void)p; \
258: if (__predict_false((l)->l_prflag & LPR_CRMOD)) \
1.38 ad 259: lwp_update_creds(l); \
260: } while (/* CONSTCOND */ 0)
1.2 thorpej 261:
1.60 yamt 262: void lwp_startup(lwp_t *, lwp_t *);
1.2 thorpej 263:
1.58 ad 264: int lwp_locked(lwp_t *, kmutex_t *);
265: void lwp_setlock(lwp_t *, kmutex_t *);
266: void lwp_unlock_to(lwp_t *, kmutex_t *);
1.100 ad 267: kmutex_t *lwp_lock_retry(lwp_t *, kmutex_t *);
1.58 ad 268: void lwp_relock(lwp_t *, kmutex_t *);
269: int lwp_trylock(lwp_t *);
270: void lwp_addref(lwp_t *);
271: void lwp_delref(lwp_t *);
272: void lwp_drainrefs(lwp_t *);
1.2 thorpej 273:
274: /* Flags for _lwp_wait1 */
275: #define LWPWAIT_EXITCONTROL 0x00000001
1.42 thorpej 276: void lwpinit(void);
1.58 ad 277: int lwp_wait1(lwp_t *, lwpid_t, lwpid_t *, int);
278: void lwp_continue(lwp_t *);
279: void cpu_setfunc(lwp_t *, void (*)(void *), void *);
1.2 thorpej 280: void startlwp(void *);
1.58 ad 281: void upcallret(lwp_t *);
1.74 perry 282: void lwp_exit(lwp_t *) __dead;
1.60 yamt 283: void lwp_exit_switchaway(lwp_t *);
1.58 ad 284: int lwp_suspend(lwp_t *, lwp_t *);
285: int lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
286: void lwp_update_creds(lwp_t *);
1.77 rmind 287: void lwp_migrate(lwp_t *, struct cpu_info *);
288: lwp_t *lwp_find2(pid_t, lwpid_t);
289: lwp_t *lwp_find(proc_t *, int);
1.58 ad 290: void lwp_userret(lwp_t *);
291: void lwp_need_userret(lwp_t *);
292: void lwp_free(lwp_t *, bool, bool);
1.47 ad 293: void lwp_sys_init(void);
1.80 ad 294: u_int lwp_unsleep(lwp_t *, bool);
1.42 thorpej 295:
296: int lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
297: void lwp_specific_key_delete(specificdata_key_t);
1.58 ad 298: void lwp_initspecific(lwp_t *);
299: void lwp_finispecific(lwp_t *);
300: void *lwp_getspecific(specificdata_key_t);
1.46 hannken 301: #if defined(_LWP_API_PRIVATE)
1.58 ad 302: void *_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t);
1.46 hannken 303: #endif
1.44 thorpej 304: void lwp_setspecific(specificdata_key_t, void *);
1.47 ad 305:
1.63 ad 306: /* Syscalls */
307: int lwp_park(struct timespec *, const void *);
308: int lwp_unpark(lwpid_t, const void *);
309:
1.72 yamt 310: /* ddb */
311: void lwp_whatis(uintptr_t, void (*)(const char *, ...));
312:
1.63 ad 313:
1.47 ad 314: /*
315: * Lock an LWP. XXXLKM
316: */
317: static inline void
1.58 ad 318: lwp_lock(lwp_t *l)
1.47 ad 319: {
320: kmutex_t *old;
321:
322: mutex_spin_enter(old = l->l_mutex);
323:
324: /*
325: * mutex_enter() will have posted a read barrier. Re-test
326: * l->l_mutex. If it has changed, we need to try again.
327: */
328: if (__predict_false(l->l_mutex != old))
329: lwp_lock_retry(l, old);
330: }
331:
332: /*
333: * Unlock an LWP. XXXLKM
334: */
335: static inline void
1.58 ad 336: lwp_unlock(lwp_t *l)
1.47 ad 337: {
338: mutex_spin_exit(l->l_mutex);
339: }
340:
341: static inline void
1.58 ad 342: lwp_changepri(lwp_t *l, pri_t pri)
1.47 ad 343: {
1.60 yamt 344: KASSERT(mutex_owned(l->l_mutex));
1.47 ad 345:
346: (*l->l_syncobj->sobj_changepri)(l, pri);
347: }
348:
349: static inline void
1.58 ad 350: lwp_lendpri(lwp_t *l, pri_t pri)
1.52 yamt 351: {
1.60 yamt 352: KASSERT(mutex_owned(l->l_mutex));
1.52 yamt 353:
354: if (l->l_inheritedprio == pri)
355: return;
356:
357: (*l->l_syncobj->sobj_lendpri)(l, pri);
358: }
359:
1.68 ad 360: static inline pri_t
1.58 ad 361: lwp_eprio(lwp_t *l)
1.52 yamt 362: {
1.68 ad 363: pri_t pri;
1.52 yamt 364:
1.68 ad 365: pri = l->l_priority;
366: if (l->l_kpriority && pri < PRI_KERNEL)
1.71 ad 367: pri = (pri >> 1) + l->l_kpribase;
1.68 ad 368: return MAX(l->l_inheritedprio, pri);
1.52 yamt 369: }
370:
1.68 ad 371: int lwp_create(lwp_t *, struct proc *, vaddr_t, bool, int,
372: void *, size_t, void (*)(void *), void *, lwp_t **, int);
1.47 ad 373:
374: /*
1.60 yamt 375: * We should provide real stubs for the below that LKMs can use.
1.47 ad 376: */
377:
378: static inline void
1.60 yamt 379: spc_lock(struct cpu_info *ci)
1.47 ad 380: {
1.60 yamt 381: mutex_spin_enter(ci->ci_schedstate.spc_mutex);
1.47 ad 382: }
383:
384: static inline void
1.60 yamt 385: spc_unlock(struct cpu_info *ci)
1.47 ad 386: {
1.60 yamt 387: mutex_spin_exit(ci->ci_schedstate.spc_mutex);
1.47 ad 388: }
389:
1.77 rmind 390: static inline void
391: spc_dlock(struct cpu_info *ci1, struct cpu_info *ci2)
392: {
393: struct schedstate_percpu *spc1 = &ci1->ci_schedstate;
394: struct schedstate_percpu *spc2 = &ci2->ci_schedstate;
395:
396: KASSERT(ci1 != ci2);
1.86 ad 397: if (ci1 < ci2) {
1.77 rmind 398: mutex_spin_enter(spc1->spc_mutex);
399: mutex_spin_enter(spc2->spc_mutex);
400: } else {
401: mutex_spin_enter(spc2->spc_mutex);
402: mutex_spin_enter(spc1->spc_mutex);
403: }
404: }
405:
406: static inline void
407: spc_dunlock(struct cpu_info *ci1, struct cpu_info *ci2)
408: {
409: struct schedstate_percpu *spc1 = &ci1->ci_schedstate;
410: struct schedstate_percpu *spc2 = &ci2->ci_schedstate;
411:
412: KASSERT(ci1 != ci2);
1.86 ad 413: mutex_spin_exit(spc1->spc_mutex);
414: mutex_spin_exit(spc2->spc_mutex);
1.77 rmind 415: }
416:
1.89 ad 417: /*
1.92 ad 418: * Allow machine-dependent code to override curlwp in <machine/cpu.h> for
419: * its own convenience. Otherwise, we declare it as appropriate.
420: */
421: #if !defined(curlwp)
422: #if defined(MULTIPROCESSOR)
423: #define curlwp curcpu()->ci_curlwp /* Current running LWP */
424: #else
425: extern struct lwp *curlwp; /* Current running LWP */
426: #endif /* MULTIPROCESSOR */
427: #endif /* ! curlwp */
428: #define curproc (curlwp->l_proc)
429:
430: static inline bool
431: CURCPU_IDLE_P(void)
432: {
433: struct cpu_info *ci = curcpu();
434: return ci->ci_data.cpu_onproc == ci->ci_data.cpu_idlelwp;
435: }
436:
437: /*
1.89 ad 438: * Disable and re-enable preemption. Only for low-level kernel
1.101 ad 439: * use. Device drivers and anything that could potentially be
440: * compiled as a module should use kpreempt_disable() and
1.89 ad 441: * kpreempt_enable().
442: */
443: static inline void
1.90 ad 444: KPREEMPT_DISABLE(lwp_t *l)
1.89 ad 445: {
446:
1.90 ad 447: KASSERT(l == curlwp);
448: l->l_nopreempt++;
1.89 ad 449: __insn_barrier();
450: }
451:
452: static inline void
1.90 ad 453: KPREEMPT_ENABLE(lwp_t *l)
1.89 ad 454: {
455:
1.90 ad 456: KASSERT(l == curlwp);
457: KASSERT(l->l_nopreempt > 0);
458: __insn_barrier();
459: if (--l->l_nopreempt != 0)
460: return;
461: __insn_barrier();
462: if (__predict_false(l->l_dopreempt))
463: kpreempt(0);
1.89 ad 464: __insn_barrier();
465: }
466:
1.90 ad 467: /* For lwp::l_dopreempt */
468: #define DOPREEMPT_ACTIVE 0x01
469: #define DOPREEMPT_COUNTED 0x02
470:
1.60 yamt 471: #endif /* _KERNEL */
1.2 thorpej 472:
473: /* Flags for _lwp_create(), as per Solaris. */
474: #define LWP_DETACHED 0x00000040
475: #define LWP_SUSPENDED 0x00000080
1.98 ad 476: #define LWP_VFORK 0x80000000
1.2 thorpej 477:
478: #endif /* !_SYS_LWP_H_ */
CVSweb <webmaster@jp.NetBSD.org>