Annotation of src/sys/kern/sys_lwp.c, Revision 1.74
1.74 ! ad 1: /* $NetBSD: sys_lwp.c,v 1.73 2020/01/26 19:08:09 ad Exp $ */
1.2 ad 2:
3: /*-
1.72 ad 4: * Copyright (c) 2001, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
1.2 ad 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Nathan J. Williams, and Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
33: * Lightweight process (LWP) system calls. See kern_lwp.c for a description
34: * of LWPs.
35: */
36:
37: #include <sys/cdefs.h>
1.74 ! ad 38: __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.73 2020/01/26 19:08:09 ad Exp $");
1.2 ad 39:
40: #include <sys/param.h>
41: #include <sys/systm.h>
42: #include <sys/pool.h>
43: #include <sys/proc.h>
44: #include <sys/types.h>
45: #include <sys/syscallargs.h>
46: #include <sys/kauth.h>
47: #include <sys/kmem.h>
1.70 kamil 48: #include <sys/ptrace.h>
1.2 ad 49: #include <sys/sleepq.h>
1.30 ad 50: #include <sys/lwpctl.h>
1.45 ad 51: #include <sys/cpu.h>
1.2 ad 52:
53: #include <uvm/uvm_extern.h>
54:
55: #define LWP_UNPARK_MAX 1024
56:
1.69 maxv 57: static const stack_t lwp_ss_init = SS_INIT;
58:
1.74 ! ad 59: syncobj_t lwp_park_syncobj = {
! 60: .sobj_flag = SOBJ_SLEEPQ_NULL,
1.63 ozaki-r 61: .sobj_unsleep = sleepq_unsleep,
62: .sobj_changepri = sleepq_changepri,
63: .sobj_lendpri = sleepq_lendpri,
64: .sobj_owner = syncobj_noowner,
1.2 ad 65: };
66:
1.64 kamil 67: static void
68: mi_startlwp(void *arg)
69: {
70: struct lwp *l = curlwp;
71: struct proc *p = l->l_proc;
72:
1.65 kamil 73: (p->p_emul->e_startlwp)(arg);
74:
1.64 kamil 75: /* If the process is traced, report lwp creation to a debugger */
1.66 kamil 76: if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) ==
1.64 kamil 77: (PSL_TRACED|PSL_TRACELWP_CREATE)) {
78: /* Paranoid check */
79: mutex_enter(proc_lock);
1.66 kamil 80: if ((p->p_slflag & (PSL_TRACED|PSL_TRACELWP_CREATE)) !=
81: (PSL_TRACED|PSL_TRACELWP_CREATE)) {
1.64 kamil 82: mutex_exit(proc_lock);
1.65 kamil 83: return;
1.64 kamil 84: }
85:
86: mutex_enter(p->p_lock);
1.70 kamil 87: eventswitch(TRAP_LWP, PTRACE_LWP_CREATE, l->l_lid);
1.64 kamil 88: }
89: }
90:
1.2 ad 91: int
1.72 ad 92: do_lwp_create(lwp_t *l, void *arg, u_long flags, lwp_t **l2,
1.59 christos 93: const sigset_t *sigmask, const stack_t *sigstk)
1.2 ad 94: {
95: struct proc *p = l->l_proc;
96: vaddr_t uaddr;
1.54 martin 97: int error;
1.2 ad 98:
99: /* XXX check against resource limits */
100:
1.46 rmind 101: uaddr = uvm_uarea_alloc();
1.54 martin 102: if (__predict_false(uaddr == 0))
1.2 ad 103: return ENOMEM;
104:
1.59 christos 105: error = lwp_create(l, p, uaddr, flags & LWP_DETACHED, NULL, 0,
1.72 ad 106: mi_startlwp, arg, l2, l->l_class, sigmask, &lwp_ss_init);
1.46 rmind 107: if (__predict_false(error)) {
108: uvm_uarea_free(uaddr);
1.18 rmind 109: return error;
110: }
1.2 ad 111:
112: return 0;
113: }
114:
115: int
1.54 martin 116: sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
117: register_t *retval)
118: {
119: /* {
120: syscallarg(const ucontext_t *) ucp;
121: syscallarg(u_long) flags;
122: syscallarg(lwpid_t *) new_lwp;
123: } */
124: struct proc *p = l->l_proc;
1.57 maxv 125: ucontext_t *newuc;
1.72 ad 126: lwp_t *l2;
1.54 martin 127: int error;
128:
129: newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
130: error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
131: if (error)
132: goto fail;
133:
134: /* validate the ucontext */
135: if ((newuc->uc_flags & _UC_CPU) == 0) {
136: error = EINVAL;
137: goto fail;
138: }
139: error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
140: if (error)
141: goto fail;
142:
1.59 christos 143: const sigset_t *sigmask = newuc->uc_flags & _UC_SIGMASK ?
144: &newuc->uc_sigmask : &l->l_sigmask;
1.72 ad 145: error = do_lwp_create(l, newuc, SCARG(uap, flags), &l2, sigmask,
1.59 christos 146: &SS_INIT);
1.54 martin 147: if (error)
148: goto fail;
149:
1.72 ad 150: error = copyout(&l2->l_lid, SCARG(uap, new_lwp), sizeof(l2->l_lid));
1.73 ad 151: if (error == 0) {
1.72 ad 152: lwp_start(l2, SCARG(uap, flags));
1.73 ad 153: return 0;
154: }
155: lwp_exit(l2);
1.72 ad 156: fail:
1.54 martin 157: kmem_free(newuc, sizeof(ucontext_t));
158: return error;
159: }
160:
161: int
1.32 dsl 162: sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
1.2 ad 163: {
164:
165: lwp_exit(l);
166: return 0;
167: }
168:
169: int
1.32 dsl 170: sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
1.2 ad 171: {
172:
173: *retval = l->l_lid;
174: return 0;
175: }
176:
177: int
1.32 dsl 178: sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
1.2 ad 179: {
180:
181: *retval = (uintptr_t)l->l_private;
182: return 0;
183: }
184:
185: int
1.47 rmind 186: sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
187: register_t *retval)
1.2 ad 188: {
1.32 dsl 189: /* {
1.2 ad 190: syscallarg(void *) ptr;
1.32 dsl 191: } */
1.2 ad 192:
1.52 chs 193: return lwp_setprivate(l, SCARG(uap, ptr));
1.2 ad 194: }
195:
196: int
1.47 rmind 197: sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
198: register_t *retval)
1.2 ad 199: {
1.32 dsl 200: /* {
1.2 ad 201: syscallarg(lwpid_t) target;
1.32 dsl 202: } */
1.2 ad 203: struct proc *p = l->l_proc;
204: struct lwp *t;
205: int error;
206:
1.39 ad 207: mutex_enter(p->p_lock);
1.2 ad 208: if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
1.39 ad 209: mutex_exit(p->p_lock);
1.2 ad 210: return ESRCH;
211: }
212:
213: /*
214: * Check for deadlock, which is only possible when we're suspending
215: * ourself. XXX There is a short race here, as p_nrlwps is only
216: * incremented when an LWP suspends itself on the kernel/user
217: * boundary. It's still possible to kill -9 the process so we
218: * don't bother checking further.
219: */
220: lwp_lock(t);
221: if ((t == l && p->p_nrlwps == 1) ||
1.4 pavel 222: (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
1.2 ad 223: lwp_unlock(t);
1.39 ad 224: mutex_exit(p->p_lock);
1.2 ad 225: return EDEADLK;
226: }
227:
228: /*
229: * Suspend the LWP. XXX If it's on a different CPU, we should wait
230: * for it to be preempted, where it will put itself to sleep.
231: *
232: * Suspension of the current LWP will happen on return to userspace.
233: */
234: error = lwp_suspend(l, t);
1.23 rmind 235: if (error) {
1.39 ad 236: mutex_exit(p->p_lock);
1.23 rmind 237: return error;
238: }
239:
240: /*
241: * Wait for:
242: * o process exiting
243: * o target LWP suspended
244: * o target LWP not suspended and L_WSUSPEND clear
245: * o target LWP exited
246: */
247: for (;;) {
1.39 ad 248: error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
1.23 rmind 249: if (error) {
250: error = ERESTART;
251: break;
252: }
1.25 rmind 253: if (lwp_find(p, SCARG(uap, target)) == NULL) {
254: error = ESRCH;
255: break;
256: }
1.23 rmind 257: if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
258: error = ERESTART;
259: break;
260: }
261: if (t->l_stat == LSSUSPENDED ||
262: (t->l_flag & LW_WSUSPEND) == 0)
263: break;
264: }
1.39 ad 265: mutex_exit(p->p_lock);
1.2 ad 266:
267: return error;
268: }
269:
270: int
1.47 rmind 271: sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
272: register_t *retval)
1.2 ad 273: {
1.32 dsl 274: /* {
1.2 ad 275: syscallarg(lwpid_t) target;
1.32 dsl 276: } */
1.2 ad 277: int error;
278: struct proc *p = l->l_proc;
279: struct lwp *t;
280:
281: error = 0;
282:
1.39 ad 283: mutex_enter(p->p_lock);
1.2 ad 284: if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
1.39 ad 285: mutex_exit(p->p_lock);
1.2 ad 286: return ESRCH;
287: }
288:
289: lwp_lock(t);
290: lwp_continue(t);
1.39 ad 291: mutex_exit(p->p_lock);
1.2 ad 292:
293: return error;
294: }
295:
296: int
1.47 rmind 297: sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
298: register_t *retval)
1.2 ad 299: {
1.32 dsl 300: /* {
1.2 ad 301: syscallarg(lwpid_t) target;
1.32 dsl 302: } */
1.2 ad 303: struct lwp *t;
304: struct proc *p;
305: int error;
306:
307: p = l->l_proc;
1.39 ad 308: mutex_enter(p->p_lock);
1.2 ad 309:
310: if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
1.39 ad 311: mutex_exit(p->p_lock);
1.2 ad 312: return ESRCH;
313: }
314:
315: lwp_lock(t);
1.15 ad 316: t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
1.2 ad 317:
318: if (t->l_stat != LSSLEEP) {
1.16 ad 319: lwp_unlock(t);
1.2 ad 320: error = ENODEV;
1.16 ad 321: } else if ((t->l_flag & LW_SINTR) == 0) {
322: lwp_unlock(t);
1.2 ad 323: error = EBUSY;
1.16 ad 324: } else {
325: /* Wake it up. lwp_unsleep() will release the LWP lock. */
1.46 rmind 326: lwp_unsleep(t, true);
1.16 ad 327: error = 0;
1.2 ad 328: }
329:
1.39 ad 330: mutex_exit(p->p_lock);
1.2 ad 331:
332: return error;
333: }
334:
335: int
1.47 rmind 336: sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
337: register_t *retval)
1.2 ad 338: {
1.32 dsl 339: /* {
1.2 ad 340: syscallarg(lwpid_t) wait_for;
341: syscallarg(lwpid_t *) departed;
1.32 dsl 342: } */
1.2 ad 343: struct proc *p = l->l_proc;
344: int error;
345: lwpid_t dep;
346:
1.39 ad 347: mutex_enter(p->p_lock);
1.55 rmind 348: error = lwp_wait(l, SCARG(uap, wait_for), &dep, false);
1.39 ad 349: mutex_exit(p->p_lock);
1.2 ad 350:
1.55 rmind 351: if (!error && SCARG(uap, departed)) {
1.2 ad 352: error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
353: }
354:
1.55 rmind 355: return error;
1.2 ad 356: }
357:
358: int
1.47 rmind 359: sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
360: register_t *retval)
1.2 ad 361: {
1.32 dsl 362: /* {
1.2 ad 363: syscallarg(lwpid_t) target;
364: syscallarg(int) signo;
1.32 dsl 365: } */
1.2 ad 366: struct proc *p = l->l_proc;
367: struct lwp *t;
368: ksiginfo_t ksi;
369: int signo = SCARG(uap, signo);
370: int error = 0;
371:
372: if ((u_int)signo >= NSIG)
373: return EINVAL;
374:
375: KSI_INIT(&ksi);
376: ksi.ksi_signo = signo;
1.43 ad 377: ksi.ksi_code = SI_LWP;
1.2 ad 378: ksi.ksi_pid = p->p_pid;
379: ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
380: ksi.ksi_lid = SCARG(uap, target);
381:
1.38 ad 382: mutex_enter(proc_lock);
1.39 ad 383: mutex_enter(p->p_lock);
1.2 ad 384: if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
385: error = ESRCH;
386: else if (signo != 0)
387: kpsignal2(p, &ksi);
1.39 ad 388: mutex_exit(p->p_lock);
1.38 ad 389: mutex_exit(proc_lock);
1.2 ad 390:
391: return error;
392: }
393:
394: int
1.47 rmind 395: sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
396: register_t *retval)
1.2 ad 397: {
1.32 dsl 398: /* {
1.2 ad 399: syscallarg(lwpid_t) target;
1.32 dsl 400: } */
1.2 ad 401: struct proc *p;
402: struct lwp *t;
403: lwpid_t target;
404: int error;
405:
406: target = SCARG(uap, target);
407: p = l->l_proc;
408:
1.39 ad 409: mutex_enter(p->p_lock);
1.2 ad 410:
411: if (l->l_lid == target)
412: t = l;
413: else {
414: /*
415: * We can't use lwp_find() here because the target might
416: * be a zombie.
417: */
1.74 ! ad 418: t = radix_tree_lookup_node(&p->p_lwptree,
! 419: (uint64_t)(target - 1));
! 420: KASSERT(t == NULL || t->l_lid == target);
1.2 ad 421: }
422:
423: /*
424: * If the LWP is already detached, there's nothing to do.
425: * If it's a zombie, we need to clean up after it. LSZOMB
426: * is visible with the proc mutex held.
427: *
428: * After we have detached or released the LWP, kick any
429: * other LWPs that may be sitting in _lwp_wait(), waiting
430: * for the target LWP to exit.
431: */
432: if (t != NULL && t->l_stat != LSIDL) {
433: if ((t->l_prflag & LPR_DETACHED) == 0) {
434: p->p_ndlwps++;
435: t->l_prflag |= LPR_DETACHED;
436: if (t->l_stat == LSZOMB) {
1.17 ad 437: /* Releases proc mutex. */
438: lwp_free(t, false, false);
1.2 ad 439: return 0;
440: }
441: error = 0;
1.17 ad 442:
443: /*
444: * Have any LWPs sleeping in lwp_wait() recheck
445: * for deadlock.
446: */
447: cv_broadcast(&p->p_lwpcv);
1.2 ad 448: } else
449: error = EINVAL;
450: } else
451: error = ESRCH;
452:
1.39 ad 453: mutex_exit(p->p_lock);
1.2 ad 454:
455: return error;
456: }
457:
458: int
1.74 ! ad 459: lwp_unpark(const lwpid_t *tp, const u_int ntargets)
1.2 ad 460: {
1.74 ! ad 461: uint64_t id;
! 462: u_int target;
! 463: int error;
1.24 ad 464: proc_t *p;
465: lwp_t *t;
466:
467: p = curproc;
1.74 ! ad 468: error = 0;
1.24 ad 469:
1.74 ! ad 470: rw_enter(&p->p_treelock, RW_READER);
! 471: for (target = 0; target < ntargets; target++) {
1.24 ad 472: /*
1.74 ! ad 473: * We don't bother excluding zombies or idle LWPs here, as
! 474: * setting LW_UNPARKED on them won't do any harm.
1.24 ad 475: */
1.74 ! ad 476: id = (uint64_t)(tp[target] - 1);
! 477: t = radix_tree_lookup_node(&p->p_lwptree, id);
! 478: if (t == NULL) {
! 479: error = ESRCH;
! 480: continue;
! 481: }
! 482:
! 483: /* It may not have parked yet or we may have raced. */
! 484: lwp_lock(t);
! 485: if (t->l_syncobj == &lwp_park_syncobj) {
! 486: /* Releases the LWP lock. */
! 487: lwp_unsleep(t, true);
! 488: } else {
! 489: /*
! 490: * Set the operation pending. The next call to
! 491: * _lwp_park() will return early.
! 492: */
! 493: t->l_flag |= LW_UNPARKED;
! 494: lwp_unlock(t);
! 495: }
1.24 ad 496: }
1.74 ! ad 497: rw_exit(&p->p_treelock);
1.20 dsl 498:
1.74 ! ad 499: return error;
1.20 dsl 500: }
501:
502: int
1.74 ! ad 503: lwp_park(clockid_t clock_id, int flags, struct timespec *ts)
1.20 dsl 504: {
1.2 ad 505: int timo, error;
1.62 christos 506: struct timespec start;
1.24 ad 507: lwp_t *l;
1.62 christos 508: bool timeremain = !(flags & TIMER_ABSTIME) && ts;
1.2 ad 509:
1.20 dsl 510: if (ts != NULL) {
1.62 christos 511: if ((error = ts2timo(clock_id, flags, ts, &timo,
512: timeremain ? &start : NULL)) != 0)
1.2 ad 513: return error;
1.24 ad 514: KASSERT(timo != 0);
1.48 rmind 515: } else {
1.2 ad 516: timo = 0;
1.48 rmind 517: }
1.2 ad 518:
519: /*
520: * Before going the full route and blocking, check to see if an
521: * unpark op is pending.
522: */
1.74 ! ad 523: l = curlwp;
1.19 yamt 524: lwp_lock(l);
1.8 ad 525: if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
526: l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
1.19 yamt 527: lwp_unlock(l);
1.2 ad 528: return EALREADY;
529: }
1.24 ad 530: l->l_biglocks = 0;
1.74 ! ad 531: sleepq_enqueue(NULL, l, "parked", &lwp_park_syncobj);
1.19 yamt 532: error = sleepq_block(timo, true);
1.13 yamt 533: switch (error) {
1.14 yamt 534: case EWOULDBLOCK:
535: error = ETIMEDOUT;
1.62 christos 536: if (timeremain)
537: memset(ts, 0, sizeof(*ts));
1.14 yamt 538: break;
539: case ERESTART:
540: error = EINTR;
1.62 christos 541: /*FALLTHROUGH*/
1.14 yamt 542: default:
1.62 christos 543: if (timeremain)
544: clock_timeleft(clock_id, ts, &start);
1.14 yamt 545: break;
1.13 yamt 546: }
547: return error;
1.2 ad 548: }
549:
1.24 ad 550: /*
551: * 'park' an LWP waiting on a user-level synchronisation object. The LWP
552: * will remain parked until another LWP in the same process calls in and
553: * requests that it be unparked.
554: */
1.2 ad 555: int
1.56 christos 556: sys____lwp_park60(struct lwp *l, const struct sys____lwp_park60_args *uap,
1.44 christos 557: register_t *retval)
1.2 ad 558: {
1.32 dsl 559: /* {
1.56 christos 560: syscallarg(clockid_t) clock_id;
561: syscallarg(int) flags;
1.62 christos 562: syscallarg(struct timespec *) ts;
1.24 ad 563: syscallarg(lwpid_t) unpark;
564: syscallarg(const void *) hint;
565: syscallarg(const void *) unparkhint;
1.32 dsl 566: } */
1.24 ad 567: struct timespec ts, *tsp;
568: int error;
1.2 ad 569:
1.24 ad 570: if (SCARG(uap, ts) == NULL)
571: tsp = NULL;
572: else {
573: error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
574: if (error != 0)
575: return error;
576: tsp = &ts;
577: }
1.2 ad 578:
1.24 ad 579: if (SCARG(uap, unpark) != 0) {
1.74 ! ad 580: error = lwp_unpark(&SCARG(uap, unpark), 1);
1.24 ad 581: if (error != 0)
582: return error;
1.15 ad 583: }
584:
1.74 ! ad 585: error = lwp_park(SCARG(uap, clock_id), SCARG(uap, flags), tsp);
1.62 christos 586: if (SCARG(uap, ts) != NULL && (SCARG(uap, flags) & TIMER_ABSTIME) == 0)
587: (void)copyout(tsp, SCARG(uap, ts), sizeof(*tsp));
588: return error;
1.24 ad 589: }
1.2 ad 590:
1.24 ad 591: int
1.47 rmind 592: sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
593: register_t *retval)
1.24 ad 594: {
1.32 dsl 595: /* {
1.24 ad 596: syscallarg(lwpid_t) target;
597: syscallarg(const void *) hint;
1.32 dsl 598: } */
1.2 ad 599:
1.74 ! ad 600: return lwp_unpark(&SCARG(uap, target), 1);
1.2 ad 601: }
602:
603: int
1.47 rmind 604: sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
605: register_t *retval)
1.2 ad 606: {
1.32 dsl 607: /* {
1.2 ad 608: syscallarg(const lwpid_t *) targets;
609: syscallarg(size_t) ntargets;
610: syscallarg(const void *) hint;
1.32 dsl 611: } */
1.74 ! ad 612: lwpid_t targets[32], *tp;
1.46 rmind 613: int error;
1.15 ad 614: u_int ntargets;
1.2 ad 615: size_t sz;
616:
617: ntargets = SCARG(uap, ntargets);
618: if (SCARG(uap, targets) == NULL) {
619: /*
620: * Let the caller know how much we are willing to do, and
621: * let it unpark the LWPs in blocks.
622: */
623: *retval = LWP_UNPARK_MAX;
624: return 0;
625: }
626: if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
627: return EINVAL;
628:
629: /*
630: * Copy in the target array. If it's a small number of LWPs, then
631: * place the numbers on the stack.
632: */
1.74 ! ad 633: sz = sizeof(lwpid_t) * ntargets;
1.2 ad 634: if (sz <= sizeof(targets))
635: tp = targets;
1.61 chs 636: else
1.2 ad 637: tp = kmem_alloc(sz, KM_SLEEP);
638: error = copyin(SCARG(uap, targets), tp, sz);
639: if (error != 0) {
640: if (tp != targets) {
641: kmem_free(tp, sz);
642: }
643: return error;
644: }
1.74 ! ad 645: error = lwp_unpark(tp, ntargets);
1.33 ad 646: if (tp != targets)
1.2 ad 647: kmem_free(tp, sz);
1.74 ! ad 648: return error;
1.2 ad 649: }
1.28 ad 650:
651: int
1.47 rmind 652: sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
653: register_t *retval)
1.28 ad 654: {
1.32 dsl 655: /* {
1.28 ad 656: syscallarg(lwpid_t) target;
657: syscallarg(const char *) name;
1.32 dsl 658: } */
1.28 ad 659: char *name, *oname;
1.30 ad 660: lwpid_t target;
1.28 ad 661: proc_t *p;
662: lwp_t *t;
663: int error;
664:
1.30 ad 665: if ((target = SCARG(uap, target)) == 0)
666: target = l->l_lid;
667:
1.28 ad 668: name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
669: error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
670: switch (error) {
671: case ENAMETOOLONG:
672: case 0:
673: name[MAXCOMLEN - 1] = '\0';
674: break;
675: default:
676: kmem_free(name, MAXCOMLEN);
677: return error;
678: }
679:
680: p = curproc;
1.39 ad 681: mutex_enter(p->p_lock);
1.30 ad 682: if ((t = lwp_find(p, target)) == NULL) {
1.39 ad 683: mutex_exit(p->p_lock);
1.28 ad 684: kmem_free(name, MAXCOMLEN);
685: return ESRCH;
686: }
687: lwp_lock(t);
688: oname = t->l_name;
689: t->l_name = name;
690: lwp_unlock(t);
1.39 ad 691: mutex_exit(p->p_lock);
1.28 ad 692:
693: if (oname != NULL)
694: kmem_free(oname, MAXCOMLEN);
695:
696: return 0;
697: }
698:
699: int
1.47 rmind 700: sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
701: register_t *retval)
1.28 ad 702: {
1.32 dsl 703: /* {
1.28 ad 704: syscallarg(lwpid_t) target;
705: syscallarg(char *) name;
706: syscallarg(size_t) len;
1.32 dsl 707: } */
1.28 ad 708: char name[MAXCOMLEN];
1.30 ad 709: lwpid_t target;
1.68 maxv 710: size_t len;
1.28 ad 711: proc_t *p;
712: lwp_t *t;
713:
1.30 ad 714: if ((target = SCARG(uap, target)) == 0)
715: target = l->l_lid;
716:
1.28 ad 717: p = curproc;
1.39 ad 718: mutex_enter(p->p_lock);
1.30 ad 719: if ((t = lwp_find(p, target)) == NULL) {
1.39 ad 720: mutex_exit(p->p_lock);
1.28 ad 721: return ESRCH;
722: }
723: lwp_lock(t);
724: if (t->l_name == NULL)
725: name[0] = '\0';
726: else
1.58 maya 727: strlcpy(name, t->l_name, sizeof(name));
1.28 ad 728: lwp_unlock(t);
1.39 ad 729: mutex_exit(p->p_lock);
1.28 ad 730:
1.68 maxv 731: len = uimin(SCARG(uap, len), sizeof(name));
732:
733: return copyoutstr(name, SCARG(uap, name), len, NULL);
1.28 ad 734: }
1.30 ad 735:
736: int
1.47 rmind 737: sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
738: register_t *retval)
1.30 ad 739: {
1.32 dsl 740: /* {
1.30 ad 741: syscallarg(int) features;
742: syscallarg(struct lwpctl **) address;
1.32 dsl 743: } */
1.30 ad 744: int error, features;
745: vaddr_t vaddr;
746:
747: features = SCARG(uap, features);
1.35 ad 748: features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
749: if (features != 0)
1.30 ad 750: return ENODEV;
751: if ((error = lwp_ctl_alloc(&vaddr)) != 0)
752: return error;
753: return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
754: }
CVSweb <webmaster@jp.NetBSD.org>