Annotation of src/sys/kern/sys_lwp.c, Revision 1.48
1.48 ! rmind 1: /* $NetBSD: sys_lwp.c,v 1.47 2009/10/22 13:12:47 rmind Exp $ */
1.2 ad 2:
3: /*-
1.36 ad 4: * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
1.2 ad 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Nathan J. Williams, and Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
33: * Lightweight process (LWP) system calls. See kern_lwp.c for a description
34: * of LWPs.
35: */
36:
37: #include <sys/cdefs.h>
1.48 ! rmind 38: __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.47 2009/10/22 13:12:47 rmind Exp $");
1.2 ad 39:
40: #include <sys/param.h>
41: #include <sys/systm.h>
42: #include <sys/pool.h>
43: #include <sys/proc.h>
44: #include <sys/types.h>
45: #include <sys/syscallargs.h>
46: #include <sys/kauth.h>
47: #include <sys/kmem.h>
48: #include <sys/sleepq.h>
1.30 ad 49: #include <sys/lwpctl.h>
1.45 ad 50: #include <sys/cpu.h>
1.2 ad 51:
52: #include <uvm/uvm_extern.h>
53:
1.42 wrstuden 54: #include "opt_sa.h"
55:
1.2 ad 56: #define LWP_UNPARK_MAX 1024
57:
1.47 rmind 58: static syncobj_t lwp_park_sobj = {
1.26 ad 59: SOBJ_SLEEPQ_LIFO,
1.2 ad 60: sleepq_unsleep,
1.7 yamt 61: sleepq_changepri,
62: sleepq_lendpri,
63: syncobj_noowner,
1.2 ad 64: };
65:
1.47 rmind 66: static sleeptab_t lwp_park_tab;
1.2 ad 67:
68: void
69: lwp_sys_init(void)
70: {
71: sleeptab_init(&lwp_park_tab);
72: }
73:
74: int
1.47 rmind 75: sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
76: register_t *retval)
1.2 ad 77: {
1.32 dsl 78: /* {
1.2 ad 79: syscallarg(const ucontext_t *) ucp;
80: syscallarg(u_long) flags;
81: syscallarg(lwpid_t *) new_lwp;
1.32 dsl 82: } */
1.2 ad 83: struct proc *p = l->l_proc;
84: struct lwp *l2;
85: vaddr_t uaddr;
86: ucontext_t *newuc;
87: int error, lid;
88:
1.42 wrstuden 89: #ifdef KERN_SA
90: mutex_enter(p->p_lock);
91: if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) {
92: mutex_exit(p->p_lock);
93: return EINVAL;
94: }
95: mutex_exit(p->p_lock);
96: #endif
97:
1.2 ad 98: newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
99:
100: error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
101: if (error) {
102: pool_put(&lwp_uc_pool, newuc);
103: return error;
104: }
105:
106: /* XXX check against resource limits */
107:
1.46 rmind 108: uaddr = uvm_uarea_alloc();
1.2 ad 109: if (__predict_false(uaddr == 0)) {
110: pool_put(&lwp_uc_pool, newuc);
111: return ENOMEM;
112: }
113:
1.46 rmind 114: error = lwp_create(l, p, uaddr, SCARG(uap, flags) & LWP_DETACHED,
1.27 ad 115: NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class);
1.46 rmind 116: if (__predict_false(error)) {
117: uvm_uarea_free(uaddr);
1.18 rmind 118: pool_put(&lwp_uc_pool, newuc);
119: return error;
120: }
1.2 ad 121:
1.21 rmind 122: lid = l2->l_lid;
123: error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
124: if (error) {
125: lwp_exit(l2);
126: pool_put(&lwp_uc_pool, newuc);
127: return error;
128: }
129:
1.2 ad 130: /*
131: * Set the new LWP running, unless the caller has requested that
132: * it be created in suspended state. If the process is stopping,
133: * then the LWP is created stopped.
134: */
1.39 ad 135: mutex_enter(p->p_lock);
1.2 ad 136: lwp_lock(l2);
137: if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 &&
1.4 pavel 138: (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
1.2 ad 139: if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0)
140: l2->l_stat = LSSTOP;
141: else {
1.19 yamt 142: KASSERT(lwp_locked(l2, l2->l_cpu->ci_schedstate.spc_mutex));
1.2 ad 143: p->p_nrlwps++;
144: l2->l_stat = LSRUN;
1.19 yamt 145: sched_enqueue(l2, false);
1.2 ad 146: }
1.31 ad 147: lwp_unlock(l2);
148: } else {
1.2 ad 149: l2->l_stat = LSSUSPENDED;
1.34 ad 150: lwp_unlock_to(l2, l2->l_cpu->ci_schedstate.spc_lwplock);
1.31 ad 151: }
1.39 ad 152: mutex_exit(p->p_lock);
1.2 ad 153:
154: return 0;
155: }
156:
157: int
1.32 dsl 158: sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
1.2 ad 159: {
160:
161: lwp_exit(l);
162: return 0;
163: }
164:
165: int
1.32 dsl 166: sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
1.2 ad 167: {
168:
169: *retval = l->l_lid;
170: return 0;
171: }
172:
173: int
1.32 dsl 174: sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
1.2 ad 175: {
176:
177: *retval = (uintptr_t)l->l_private;
178: return 0;
179: }
180:
181: int
1.47 rmind 182: sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
183: register_t *retval)
1.2 ad 184: {
1.32 dsl 185: /* {
1.2 ad 186: syscallarg(void *) ptr;
1.32 dsl 187: } */
1.2 ad 188:
189: l->l_private = SCARG(uap, ptr);
1.45 ad 190: #ifdef __HAVE_CPU_LWP_SETPRIVATE
191: cpu_lwp_setprivate(l, SCARG(uap, ptr));
192: #endif
193:
1.2 ad 194: return 0;
195: }
196:
197: int
1.47 rmind 198: sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
199: register_t *retval)
1.2 ad 200: {
1.32 dsl 201: /* {
1.2 ad 202: syscallarg(lwpid_t) target;
1.32 dsl 203: } */
1.2 ad 204: struct proc *p = l->l_proc;
205: struct lwp *t;
206: int error;
207:
1.39 ad 208: mutex_enter(p->p_lock);
1.42 wrstuden 209:
210: #ifdef KERN_SA
211: if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) {
212: mutex_exit(p->p_lock);
213: return EINVAL;
214: }
215: #endif
216:
1.2 ad 217: if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
1.39 ad 218: mutex_exit(p->p_lock);
1.2 ad 219: return ESRCH;
220: }
221:
222: /*
223: * Check for deadlock, which is only possible when we're suspending
224: * ourself. XXX There is a short race here, as p_nrlwps is only
225: * incremented when an LWP suspends itself on the kernel/user
226: * boundary. It's still possible to kill -9 the process so we
227: * don't bother checking further.
228: */
229: lwp_lock(t);
230: if ((t == l && p->p_nrlwps == 1) ||
1.4 pavel 231: (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
1.2 ad 232: lwp_unlock(t);
1.39 ad 233: mutex_exit(p->p_lock);
1.2 ad 234: return EDEADLK;
235: }
236:
237: /*
238: * Suspend the LWP. XXX If it's on a different CPU, we should wait
239: * for it to be preempted, where it will put itself to sleep.
240: *
241: * Suspension of the current LWP will happen on return to userspace.
242: */
243: error = lwp_suspend(l, t);
1.23 rmind 244: if (error) {
1.39 ad 245: mutex_exit(p->p_lock);
1.23 rmind 246: return error;
247: }
248:
249: /*
250: * Wait for:
251: * o process exiting
252: * o target LWP suspended
253: * o target LWP not suspended and L_WSUSPEND clear
254: * o target LWP exited
255: */
256: for (;;) {
1.39 ad 257: error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
1.23 rmind 258: if (error) {
259: error = ERESTART;
260: break;
261: }
1.25 rmind 262: if (lwp_find(p, SCARG(uap, target)) == NULL) {
263: error = ESRCH;
264: break;
265: }
1.23 rmind 266: if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
267: error = ERESTART;
268: break;
269: }
270: if (t->l_stat == LSSUSPENDED ||
271: (t->l_flag & LW_WSUSPEND) == 0)
272: break;
273: }
1.39 ad 274: mutex_exit(p->p_lock);
1.2 ad 275:
276: return error;
277: }
278:
279: int
1.47 rmind 280: sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
281: register_t *retval)
1.2 ad 282: {
1.32 dsl 283: /* {
1.2 ad 284: syscallarg(lwpid_t) target;
1.32 dsl 285: } */
1.2 ad 286: int error;
287: struct proc *p = l->l_proc;
288: struct lwp *t;
289:
290: error = 0;
291:
1.39 ad 292: mutex_enter(p->p_lock);
1.2 ad 293: if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
1.39 ad 294: mutex_exit(p->p_lock);
1.2 ad 295: return ESRCH;
296: }
297:
298: lwp_lock(t);
299: lwp_continue(t);
1.39 ad 300: mutex_exit(p->p_lock);
1.2 ad 301:
302: return error;
303: }
304:
305: int
1.47 rmind 306: sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
307: register_t *retval)
1.2 ad 308: {
1.32 dsl 309: /* {
1.2 ad 310: syscallarg(lwpid_t) target;
1.32 dsl 311: } */
1.2 ad 312: struct lwp *t;
313: struct proc *p;
314: int error;
315:
316: p = l->l_proc;
1.39 ad 317: mutex_enter(p->p_lock);
1.2 ad 318:
319: if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
1.39 ad 320: mutex_exit(p->p_lock);
1.2 ad 321: return ESRCH;
322: }
323:
324: lwp_lock(t);
1.15 ad 325: t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
1.2 ad 326:
327: if (t->l_stat != LSSLEEP) {
1.16 ad 328: lwp_unlock(t);
1.2 ad 329: error = ENODEV;
1.16 ad 330: } else if ((t->l_flag & LW_SINTR) == 0) {
331: lwp_unlock(t);
1.2 ad 332: error = EBUSY;
1.16 ad 333: } else {
334: /* Wake it up. lwp_unsleep() will release the LWP lock. */
1.46 rmind 335: lwp_unsleep(t, true);
1.16 ad 336: error = 0;
1.2 ad 337: }
338:
1.39 ad 339: mutex_exit(p->p_lock);
1.2 ad 340:
341: return error;
342: }
343:
344: int
1.47 rmind 345: sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
346: register_t *retval)
1.2 ad 347: {
1.32 dsl 348: /* {
1.2 ad 349: syscallarg(lwpid_t) wait_for;
350: syscallarg(lwpid_t *) departed;
1.32 dsl 351: } */
1.2 ad 352: struct proc *p = l->l_proc;
353: int error;
354: lwpid_t dep;
355:
1.39 ad 356: mutex_enter(p->p_lock);
1.2 ad 357: error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
1.39 ad 358: mutex_exit(p->p_lock);
1.2 ad 359:
360: if (error)
361: return error;
362:
363: if (SCARG(uap, departed)) {
364: error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
365: if (error)
366: return error;
367: }
368:
369: return 0;
370: }
371:
372: int
1.47 rmind 373: sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
374: register_t *retval)
1.2 ad 375: {
1.32 dsl 376: /* {
1.2 ad 377: syscallarg(lwpid_t) target;
378: syscallarg(int) signo;
1.32 dsl 379: } */
1.2 ad 380: struct proc *p = l->l_proc;
381: struct lwp *t;
382: ksiginfo_t ksi;
383: int signo = SCARG(uap, signo);
384: int error = 0;
385:
386: if ((u_int)signo >= NSIG)
387: return EINVAL;
388:
389: KSI_INIT(&ksi);
390: ksi.ksi_signo = signo;
1.43 ad 391: ksi.ksi_code = SI_LWP;
1.2 ad 392: ksi.ksi_pid = p->p_pid;
393: ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
394: ksi.ksi_lid = SCARG(uap, target);
395:
1.38 ad 396: mutex_enter(proc_lock);
1.39 ad 397: mutex_enter(p->p_lock);
1.2 ad 398: if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
399: error = ESRCH;
400: else if (signo != 0)
401: kpsignal2(p, &ksi);
1.39 ad 402: mutex_exit(p->p_lock);
1.38 ad 403: mutex_exit(proc_lock);
1.2 ad 404:
405: return error;
406: }
407:
408: int
1.47 rmind 409: sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
410: register_t *retval)
1.2 ad 411: {
1.32 dsl 412: /* {
1.2 ad 413: syscallarg(lwpid_t) target;
1.32 dsl 414: } */
1.2 ad 415: struct proc *p;
416: struct lwp *t;
417: lwpid_t target;
418: int error;
419:
420: target = SCARG(uap, target);
421: p = l->l_proc;
422:
1.39 ad 423: mutex_enter(p->p_lock);
1.2 ad 424:
425: if (l->l_lid == target)
426: t = l;
427: else {
428: /*
429: * We can't use lwp_find() here because the target might
430: * be a zombie.
431: */
432: LIST_FOREACH(t, &p->p_lwps, l_sibling)
433: if (t->l_lid == target)
434: break;
435: }
436:
437: /*
438: * If the LWP is already detached, there's nothing to do.
439: * If it's a zombie, we need to clean up after it. LSZOMB
440: * is visible with the proc mutex held.
441: *
442: * After we have detached or released the LWP, kick any
443: * other LWPs that may be sitting in _lwp_wait(), waiting
444: * for the target LWP to exit.
445: */
446: if (t != NULL && t->l_stat != LSIDL) {
447: if ((t->l_prflag & LPR_DETACHED) == 0) {
448: p->p_ndlwps++;
449: t->l_prflag |= LPR_DETACHED;
450: if (t->l_stat == LSZOMB) {
1.17 ad 451: /* Releases proc mutex. */
452: lwp_free(t, false, false);
1.2 ad 453: return 0;
454: }
455: error = 0;
1.17 ad 456:
457: /*
458: * Have any LWPs sleeping in lwp_wait() recheck
459: * for deadlock.
460: */
461: cv_broadcast(&p->p_lwpcv);
1.2 ad 462: } else
463: error = EINVAL;
464: } else
465: error = ESRCH;
466:
1.39 ad 467: mutex_exit(p->p_lock);
1.2 ad 468:
469: return error;
470: }
471:
472: static inline wchan_t
473: lwp_park_wchan(struct proc *p, const void *hint)
474: {
1.22 ad 475:
1.2 ad 476: return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
477: }
478:
479: int
1.24 ad 480: lwp_unpark(lwpid_t target, const void *hint)
1.2 ad 481: {
1.24 ad 482: sleepq_t *sq;
483: wchan_t wchan;
1.41 ad 484: kmutex_t *mp;
1.24 ad 485: proc_t *p;
486: lwp_t *t;
487:
488: /*
489: * Easy case: search for the LWP on the sleep queue. If
490: * it's parked, remove it from the queue and set running.
491: */
492: p = curproc;
493: wchan = lwp_park_wchan(p, hint);
1.41 ad 494: sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
1.24 ad 495:
1.41 ad 496: TAILQ_FOREACH(t, sq, l_sleepchain)
1.24 ad 497: if (t->l_proc == p && t->l_lid == target)
498: break;
499:
500: if (__predict_true(t != NULL)) {
1.46 rmind 501: sleepq_remove(sq, t);
1.41 ad 502: mutex_spin_exit(mp);
1.24 ad 503: return 0;
504: }
505:
506: /*
507: * The LWP hasn't parked yet. Take the hit and mark the
508: * operation as pending.
509: */
1.41 ad 510: mutex_spin_exit(mp);
1.20 dsl 511:
1.39 ad 512: mutex_enter(p->p_lock);
1.24 ad 513: if ((t = lwp_find(p, target)) == NULL) {
1.39 ad 514: mutex_exit(p->p_lock);
1.24 ad 515: return ESRCH;
516: }
1.20 dsl 517:
1.24 ad 518: /*
519: * It may not have parked yet, we may have raced, or it
520: * is parked on a different user sync object.
521: */
522: lwp_lock(t);
523: if (t->l_syncobj == &lwp_park_sobj) {
524: /* Releases the LWP lock. */
1.46 rmind 525: lwp_unsleep(t, true);
1.24 ad 526: } else {
527: /*
528: * Set the operation pending. The next call to _lwp_park
529: * will return early.
530: */
531: t->l_flag |= LW_UNPARKED;
532: lwp_unlock(t);
533: }
1.20 dsl 534:
1.39 ad 535: mutex_exit(p->p_lock);
1.24 ad 536: return 0;
1.20 dsl 537: }
538:
539: int
1.24 ad 540: lwp_park(struct timespec *ts, const void *hint)
1.20 dsl 541: {
1.2 ad 542: sleepq_t *sq;
1.41 ad 543: kmutex_t *mp;
1.2 ad 544: wchan_t wchan;
545: int timo, error;
1.24 ad 546: lwp_t *l;
1.2 ad 547:
548: /* Fix up the given timeout value. */
1.20 dsl 549: if (ts != NULL) {
1.48 ! rmind 550: error = abstimeout2timo(ts, &timo);
! 551: if (error) {
1.2 ad 552: return error;
1.48 ! rmind 553: }
1.24 ad 554: KASSERT(timo != 0);
1.48 ! rmind 555: } else {
1.2 ad 556: timo = 0;
1.48 ! rmind 557: }
1.2 ad 558:
559: /* Find and lock the sleep queue. */
1.24 ad 560: l = curlwp;
1.20 dsl 561: wchan = lwp_park_wchan(l->l_proc, hint);
1.41 ad 562: sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
1.2 ad 563:
564: /*
565: * Before going the full route and blocking, check to see if an
566: * unpark op is pending.
567: */
1.19 yamt 568: lwp_lock(l);
1.8 ad 569: if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
570: l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
1.19 yamt 571: lwp_unlock(l);
1.41 ad 572: mutex_spin_exit(mp);
1.2 ad 573: return EALREADY;
574: }
1.41 ad 575: lwp_unlock_to(l, mp);
1.24 ad 576: l->l_biglocks = 0;
1.27 ad 577: sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
1.19 yamt 578: error = sleepq_block(timo, true);
1.13 yamt 579: switch (error) {
1.14 yamt 580: case EWOULDBLOCK:
581: error = ETIMEDOUT;
582: break;
583: case ERESTART:
584: error = EINTR;
585: break;
586: default:
587: /* nothing */
588: break;
1.13 yamt 589: }
590: return error;
1.2 ad 591: }
592:
1.24 ad 593: /*
594: * 'park' an LWP waiting on a user-level synchronisation object. The LWP
595: * will remain parked until another LWP in the same process calls in and
596: * requests that it be unparked.
597: */
1.2 ad 598: int
1.44 christos 599: sys____lwp_park50(struct lwp *l, const struct sys____lwp_park50_args *uap,
600: register_t *retval)
1.2 ad 601: {
1.32 dsl 602: /* {
1.24 ad 603: syscallarg(const struct timespec *) ts;
604: syscallarg(lwpid_t) unpark;
605: syscallarg(const void *) hint;
606: syscallarg(const void *) unparkhint;
1.32 dsl 607: } */
1.24 ad 608: struct timespec ts, *tsp;
609: int error;
1.2 ad 610:
1.24 ad 611: if (SCARG(uap, ts) == NULL)
612: tsp = NULL;
613: else {
614: error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
615: if (error != 0)
616: return error;
617: tsp = &ts;
618: }
1.2 ad 619:
1.24 ad 620: if (SCARG(uap, unpark) != 0) {
621: error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
622: if (error != 0)
623: return error;
1.15 ad 624: }
625:
1.24 ad 626: return lwp_park(tsp, SCARG(uap, hint));
627: }
1.2 ad 628:
1.24 ad 629: int
1.47 rmind 630: sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
631: register_t *retval)
1.24 ad 632: {
1.32 dsl 633: /* {
1.24 ad 634: syscallarg(lwpid_t) target;
635: syscallarg(const void *) hint;
1.32 dsl 636: } */
1.2 ad 637:
1.24 ad 638: return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
1.2 ad 639: }
640:
641: int
1.47 rmind 642: sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
643: register_t *retval)
1.2 ad 644: {
1.32 dsl 645: /* {
1.2 ad 646: syscallarg(const lwpid_t *) targets;
647: syscallarg(size_t) ntargets;
648: syscallarg(const void *) hint;
1.32 dsl 649: } */
1.2 ad 650: struct proc *p;
651: struct lwp *t;
652: sleepq_t *sq;
653: wchan_t wchan;
654: lwpid_t targets[32], *tp, *tpp, *tmax, target;
1.46 rmind 655: int error;
1.41 ad 656: kmutex_t *mp;
1.15 ad 657: u_int ntargets;
1.2 ad 658: size_t sz;
659:
660: p = l->l_proc;
661: ntargets = SCARG(uap, ntargets);
662:
663: if (SCARG(uap, targets) == NULL) {
664: /*
665: * Let the caller know how much we are willing to do, and
666: * let it unpark the LWPs in blocks.
667: */
668: *retval = LWP_UNPARK_MAX;
669: return 0;
670: }
671: if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
672: return EINVAL;
673:
674: /*
675: * Copy in the target array. If it's a small number of LWPs, then
676: * place the numbers on the stack.
677: */
678: sz = sizeof(target) * ntargets;
679: if (sz <= sizeof(targets))
680: tp = targets;
681: else {
682: tp = kmem_alloc(sz, KM_SLEEP);
683: if (tp == NULL)
684: return ENOMEM;
685: }
686: error = copyin(SCARG(uap, targets), tp, sz);
687: if (error != 0) {
688: if (tp != targets) {
689: kmem_free(tp, sz);
690: }
691: return error;
692: }
693:
694: wchan = lwp_park_wchan(p, SCARG(uap, hint));
1.41 ad 695: sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
1.2 ad 696:
697: for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
698: target = *tpp;
699:
700: /*
701: * Easy case: search for the LWP on the sleep queue. If
702: * it's parked, remove it from the queue and set running.
703: */
1.41 ad 704: TAILQ_FOREACH(t, sq, l_sleepchain)
1.2 ad 705: if (t->l_proc == p && t->l_lid == target)
706: break;
707:
708: if (t != NULL) {
1.46 rmind 709: sleepq_remove(sq, t);
1.2 ad 710: continue;
711: }
712:
713: /*
714: * The LWP hasn't parked yet. Take the hit and
715: * mark the operation as pending.
716: */
1.41 ad 717: mutex_spin_exit(mp);
1.39 ad 718: mutex_enter(p->p_lock);
1.2 ad 719: if ((t = lwp_find(p, target)) == NULL) {
1.39 ad 720: mutex_exit(p->p_lock);
1.41 ad 721: mutex_spin_enter(mp);
1.2 ad 722: continue;
723: }
724: lwp_lock(t);
725:
1.15 ad 726: /*
727: * It may not have parked yet, we may have raced, or
728: * it is parked on a different user sync object.
729: */
730: if (t->l_syncobj == &lwp_park_sobj) {
731: /* Releases the LWP lock. */
1.46 rmind 732: lwp_unsleep(t, true);
1.2 ad 733: } else {
734: /*
1.15 ad 735: * Set the operation pending. The next call to
736: * _lwp_park will return early.
1.2 ad 737: */
1.8 ad 738: t->l_flag |= LW_UNPARKED;
1.2 ad 739: lwp_unlock(t);
740: }
1.15 ad 741:
1.39 ad 742: mutex_exit(p->p_lock);
1.41 ad 743: mutex_spin_enter(mp);
1.2 ad 744: }
745:
1.41 ad 746: mutex_spin_exit(mp);
1.33 ad 747: if (tp != targets)
1.2 ad 748: kmem_free(tp, sz);
1.15 ad 749:
1.2 ad 750: return 0;
751: }
1.28 ad 752:
753: int
1.47 rmind 754: sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
755: register_t *retval)
1.28 ad 756: {
1.32 dsl 757: /* {
1.28 ad 758: syscallarg(lwpid_t) target;
759: syscallarg(const char *) name;
1.32 dsl 760: } */
1.28 ad 761: char *name, *oname;
1.30 ad 762: lwpid_t target;
1.28 ad 763: proc_t *p;
764: lwp_t *t;
765: int error;
766:
1.30 ad 767: if ((target = SCARG(uap, target)) == 0)
768: target = l->l_lid;
769:
1.28 ad 770: name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
771: if (name == NULL)
772: return ENOMEM;
773: error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
774: switch (error) {
775: case ENAMETOOLONG:
776: case 0:
777: name[MAXCOMLEN - 1] = '\0';
778: break;
779: default:
780: kmem_free(name, MAXCOMLEN);
781: return error;
782: }
783:
784: p = curproc;
1.39 ad 785: mutex_enter(p->p_lock);
1.30 ad 786: if ((t = lwp_find(p, target)) == NULL) {
1.39 ad 787: mutex_exit(p->p_lock);
1.28 ad 788: kmem_free(name, MAXCOMLEN);
789: return ESRCH;
790: }
791: lwp_lock(t);
792: oname = t->l_name;
793: t->l_name = name;
794: lwp_unlock(t);
1.39 ad 795: mutex_exit(p->p_lock);
1.28 ad 796:
797: if (oname != NULL)
798: kmem_free(oname, MAXCOMLEN);
799:
800: return 0;
801: }
802:
803: int
1.47 rmind 804: sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
805: register_t *retval)
1.28 ad 806: {
1.32 dsl 807: /* {
1.28 ad 808: syscallarg(lwpid_t) target;
809: syscallarg(char *) name;
810: syscallarg(size_t) len;
1.32 dsl 811: } */
1.28 ad 812: char name[MAXCOMLEN];
1.30 ad 813: lwpid_t target;
1.28 ad 814: proc_t *p;
815: lwp_t *t;
816:
1.30 ad 817: if ((target = SCARG(uap, target)) == 0)
818: target = l->l_lid;
819:
1.28 ad 820: p = curproc;
1.39 ad 821: mutex_enter(p->p_lock);
1.30 ad 822: if ((t = lwp_find(p, target)) == NULL) {
1.39 ad 823: mutex_exit(p->p_lock);
1.28 ad 824: return ESRCH;
825: }
826: lwp_lock(t);
827: if (t->l_name == NULL)
828: name[0] = '\0';
829: else
830: strcpy(name, t->l_name);
831: lwp_unlock(t);
1.39 ad 832: mutex_exit(p->p_lock);
1.28 ad 833:
834: return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL);
835: }
1.30 ad 836:
837: int
1.47 rmind 838: sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
839: register_t *retval)
1.30 ad 840: {
1.32 dsl 841: /* {
1.30 ad 842: syscallarg(int) features;
843: syscallarg(struct lwpctl **) address;
1.32 dsl 844: } */
1.30 ad 845: int error, features;
846: vaddr_t vaddr;
847:
848: features = SCARG(uap, features);
1.35 ad 849: features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
850: if (features != 0)
1.30 ad 851: return ENODEV;
852: if ((error = lwp_ctl_alloc(&vaddr)) != 0)
853: return error;
854: return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
855: }
CVSweb <webmaster@jp.NetBSD.org>