Annotation of src/lib/libpthread_dbg/pthread_dbg.c, Revision 1.26
1.26 ! nathanw 1: /* $NetBSD: pthread_dbg.c,v 1.25 2004/06/11 07:28:05 scw Exp $ */
1.2 thorpej 2:
3: /*-
4: * Copyright (c) 2002 Wasabi Systems, Inc.
5: * All rights reserved.
6: *
7: * Written by Nathan J. Williams for Wasabi Systems, Inc.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed for the NetBSD Project by
20: * Wasabi Systems, Inc.
21: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22: * or promote products derived from this software without specific
23: * prior written permission.
24: *
25: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35: * POSSIBILITY OF SUCH DAMAGE.
36: */
1.5 lukem 37:
38: #include <sys/cdefs.h>
1.26 ! nathanw 39: __RCSID("$NetBSD: pthread_dbg.c,v 1.25 2004/06/11 07:28:05 scw Exp $");
1.2 thorpej 40:
41: #include <stddef.h>
42: #include <stdlib.h>
43: #include <string.h>
44: #include <errno.h>
45: #include <sys/types.h>
46: #include <unistd.h>
47:
1.24 martin 48: #include <machine/reg.h>
49:
1.2 thorpej 50: #include <pthread.h>
51: #include <pthread_int.h>
52: #include <pthread_dbg.h>
53: #include <pthread_dbg_int.h>
54:
1.4 nathanw 55: #define MIN(a,b) ((a)<(b) ? (a) : (b))
56:
1.12 nathanw 57: #ifndef PT_FIXEDSTACKSIZE_LG
58: #undef PT_STACKMASK
59: #define PT_STACKMASK (proc->stackmask)
60: #endif
61:
1.20 nathanw 62: /* Compensate for debuggers that want a zero ID to be a sentinel */
63: #define TN_OFFSET 1
64:
1.2 thorpej 65: static int td__getthread(td_proc_t *proc, caddr_t addr, td_thread_t **threadp);
66: static int td__getsync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp);
1.10 cl 67: static int td__getstacksize(td_proc_t *proc);
68:
1.2 thorpej 69: int
70: td_open(struct td_proc_callbacks_t *cb, void *arg, td_proc_t **procp)
71: {
72: td_proc_t *proc;
1.12 nathanw 73: caddr_t addr;
1.2 thorpej 74: int dbg;
75: int val;
76:
77: proc = malloc(sizeof(*proc));
78: if (proc == NULL)
79: return TD_ERR_NOMEM;
80: proc->cb = cb;
81: proc->arg = arg;
82:
1.12 nathanw 83: val = LOOKUP(proc, "pthread__dbg", &addr);
84: if (val != 0) {
85: if (val == TD_ERR_NOSYM)
86: val = TD_ERR_NOLIB;
87: goto error;
88: }
89: proc->dbgaddr = addr;
90:
91: val = LOOKUP(proc, "pthread__allqueue", &addr);
92: if (val != 0) {
93: if (val == TD_ERR_NOSYM)
94: val = TD_ERR_NOLIB;
95: goto error;
96: }
97: proc->allqaddr = addr;
98:
1.17 nathanw 99: val = LOOKUP(proc, "pthread__runqueue", &addr);
100: if (val != 0) {
101: if (val == TD_ERR_NOSYM)
102: val = TD_ERR_NOLIB;
103: goto error;
104: }
105: proc->runqaddr = addr;
106:
107: val = LOOKUP(proc, "pthread__idlequeue", &addr);
108: if (val != 0) {
109: if (val == TD_ERR_NOSYM)
110: val = TD_ERR_NOLIB;
111: goto error;
112: }
113: proc->idleqaddr = addr;
114:
115: val = LOOKUP(proc, "pthread__suspqueue", &addr);
116: if (val != 0) {
117: if (val == TD_ERR_NOSYM)
118: val = TD_ERR_NOLIB;
119: goto error;
120: }
121: proc->suspqaddr = addr;
122:
1.12 nathanw 123: val = LOOKUP(proc, "pthread__maxlwps", &addr);
124: if (val != 0) {
125: if (val == TD_ERR_NOSYM)
126: val = TD_ERR_NOLIB;
127: goto error;
128: }
129: proc->maxlwpsaddr = addr;
130:
131: val = LOOKUP(proc, "pthread__tsd_alloc", &addr);
1.2 thorpej 132: if (val != 0) {
133: if (val == TD_ERR_NOSYM)
134: val = TD_ERR_NOLIB;
135: goto error;
136: }
1.12 nathanw 137: proc->tsdallocaddr = addr;
1.2 thorpej 138:
1.12 nathanw 139: val = LOOKUP(proc, "pthread__tsd_destructors", &addr);
140: if (val != 0) {
141: if (val == TD_ERR_NOSYM)
142: val = TD_ERR_NOLIB;
143: goto error;
144: }
145: proc->tsddestaddr = addr;
146:
1.16 cl 147: val = READ(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 148: if (val != 0)
149: goto error;
150:
151: if (dbg != 0) {
152: /* Another instance of libpthread_dbg is already attached. */
153: val = TD_ERR_INUSE;
154: goto error;
155: }
156:
1.13 nathanw 157: val = LOOKUP(proc, "pthread_stacksize_lg", &addr);
1.12 nathanw 158: if (val == 0)
159: proc->stacksizeaddr = addr;
160: else
161: proc->stacksizeaddr = NULL;
162: proc->stacksizelg = -1;
163: proc->stacksize = 0;
164: proc->stackmask = 0;
165:
1.23 nathanw 166: proc->regbuf = NULL;
167: proc->fpregbuf = NULL;
168:
1.2 thorpej 169: dbg = getpid();
170: /*
171: * If this fails it probably means we're debugging a core file and
172: * can't write to it.
173: * If it's something else we'll lose the next time we hit WRITE,
174: * but not before, and that's OK.
175: */
1.12 nathanw 176: WRITE(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 177:
178: PTQ_INIT(&proc->threads);
179: PTQ_INIT(&proc->syncs);
180:
181: *procp = proc;
182:
183: return 0;
184:
185: error:
186: free(proc);
187: return val;
188: }
189:
190: int
191: td_close(td_proc_t *proc)
192: {
193: int dbg;
194: td_thread_t *t, *next;
195: td_sync_t *s, *nexts;
196:
197: dbg = 0;
198: /*
199: * Error returns from this write are mot really a problem;
200: * the process doesn't exist any more.
201: */
1.12 nathanw 202: WRITE(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 203:
204: /* Deallocate the list of thread structures */
205: for (t = PTQ_FIRST(&proc->threads); t; t = next) {
206: next = PTQ_NEXT(t, list);
207: PTQ_REMOVE(&proc->threads, t, list);
208: free(t);
209: }
210: /* Deallocate the list of sync objects */
211: for (s = PTQ_FIRST(&proc->syncs); s; s = nexts) {
212: nexts = PTQ_NEXT(s, list);
213: PTQ_REMOVE(&proc->syncs, s, list);
214: free(s);
215: }
1.23 nathanw 216:
217: if (proc->regbuf != NULL) {
218: free(proc->regbuf);
219: free(proc->fpregbuf);
220: }
221:
1.2 thorpej 222: free(proc);
223: return 0;
224: }
225:
226:
227: int
228: td_thr_iter(td_proc_t *proc, int (*call)(td_thread_t *, void *), void *callarg)
229: {
230: int val;
1.12 nathanw 231: caddr_t next;
1.2 thorpej 232: struct pthread_queue_t allq;
233: td_thread_t *thread;
234:
1.12 nathanw 235: val = READ(proc, proc->allqaddr, &allq, sizeof(allq));
1.2 thorpej 236: if (val != 0)
237: return val;
238:
1.3 christos 239: next = (void *)allq.ptqh_first;
240: while (next != NULL) {
1.2 thorpej 241: val = td__getthread(proc, next, &thread);
242: if (val != 0)
243: return val;
244: val = (*call)(thread, callarg);
245: if (val != 0)
246: return 0;
247:
248: val = READ(proc,
1.8 nathanw 249: next + offsetof(struct __pthread_st, pt_allq.ptqe_next),
1.2 thorpej 250: &next, sizeof(next));
251: if (val != 0)
252: return val;
253: }
254: return 0;
255: }
256:
257: int
258: td_thr_info(td_thread_t *thread, td_thread_info_t *info)
259: {
1.11 cl 260: int tmp, tmp1, val;
1.2 thorpej 261: struct pthread_queue_t queue;
262:
263: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
264: if (val != 0)
265: return val;
266:
267: if (tmp != PT_MAGIC)
268: return TD_ERR_BADTHREAD;
269:
270: info->thread_addr = thread->addr;
271: if ((val = READ(thread->proc,
1.11 cl 272: thread->addr + offsetof(struct __pthread_st, pt_blockgen),
1.6 nathanw 273: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 274: return val;
1.11 cl 275: if ((val = READ(thread->proc,
1.14 nathanw 276: thread->addr + offsetof(struct __pthread_st, pt_unblockgen),
1.11 cl 277: &tmp1, sizeof(tmp1))) != 0)
278: return val;
279: if (tmp != tmp1)
280: tmp = _PT_STATE_BLOCKED_SYS;
281: else if ((val = READ(thread->proc,
282: thread->addr + offsetof(struct __pthread_st, pt_state),
283: &tmp, sizeof(tmp))) != 0)
284: return val;
1.2 thorpej 285: switch (tmp) {
286: case PT_STATE_RUNNING:
287: info->thread_state = TD_STATE_RUNNING;
288: break;
289: case PT_STATE_RUNNABLE:
290: info->thread_state = TD_STATE_RUNNABLE;
291: break;
1.11 cl 292: case _PT_STATE_BLOCKED_SYS:
1.2 thorpej 293: info->thread_state = TD_STATE_BLOCKED;
294: break;
295: case PT_STATE_BLOCKED_QUEUE:
296: info->thread_state = TD_STATE_SLEEPING;
297: break;
1.19 nathanw 298: case PT_STATE_SUSPENDED:
299: info->thread_state = TD_STATE_SUSPENDED;
300: break;
1.2 thorpej 301: case PT_STATE_ZOMBIE:
302: info->thread_state = TD_STATE_ZOMBIE;
303: break;
304: default:
305: info->thread_state = TD_STATE_UNKNOWN;
306: }
307:
308: if ((val = READ(thread->proc,
1.8 nathanw 309: thread->addr + offsetof(struct __pthread_st, pt_type),
1.6 nathanw 310: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 311: return val;
312: switch (tmp) {
313: case PT_THREAD_NORMAL:
314: info->thread_type = TD_TYPE_USER;
315: break;
316: case PT_THREAD_UPCALL:
317: case PT_THREAD_IDLE:
318: info->thread_type = TD_TYPE_SYSTEM;
319: break;
320: default:
321: info->thread_type = TD_TYPE_UNKNOWN;
322: }
323:
324: if ((val = READ(thread->proc,
1.8 nathanw 325: thread->addr + offsetof(struct __pthread_st, pt_stack),
1.2 thorpej 326: &info->thread_stack, sizeof(stack_t))) != 0)
327: return val;
328:
329: if ((val = READ(thread->proc,
1.8 nathanw 330: thread->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 331: &queue, sizeof(queue))) != 0)
1.2 thorpej 332: return val;
333:
334: if (PTQ_EMPTY(&queue))
335: info->thread_hasjoiners = 0;
336: else
337: info->thread_hasjoiners = 1;
338:
339: if ((val = READ(thread->proc,
1.8 nathanw 340: thread->addr + offsetof(struct __pthread_st, pt_errno),
1.2 thorpej 341: &info->thread_errno, sizeof(info->thread_errno))) != 0)
342: return val;
343:
344: if ((val = READ(thread->proc,
1.8 nathanw 345: thread->addr + offsetof(struct __pthread_st, pt_num),
1.6 nathanw 346: &info->thread_id, sizeof(info->thread_id))) != 0)
1.2 thorpej 347: return val;
348:
1.20 nathanw 349: info->thread_id += TN_OFFSET;
350:
1.2 thorpej 351: if ((val = READ(thread->proc,
1.8 nathanw 352: thread->addr + offsetof(struct __pthread_st, pt_sigmask),
1.2 thorpej 353: &info->thread_sigmask, sizeof(info->thread_sigmask))) != 0)
354: return val;
355:
356: if ((val = READ(thread->proc,
1.8 nathanw 357: thread->addr + offsetof(struct __pthread_st, pt_siglist),
1.2 thorpej 358: &info->thread_sigpending, sizeof(info->thread_sigpending))) != 0)
359: return val;
360:
361: return 0;
362: }
363:
364: int
1.4 nathanw 365: td_thr_getname(td_thread_t *thread, char *name, int len)
366: {
367: int val, tmp;
368: caddr_t nameaddr;
369:
370:
371: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
372: if (val != 0)
373: return val;
374:
375: if (tmp != PT_MAGIC)
376: return TD_ERR_BADTHREAD;
377:
378: if ((val = READ(thread->proc,
1.8 nathanw 379: thread->addr + offsetof(struct __pthread_st, pt_name),
1.4 nathanw 380: &nameaddr, sizeof(nameaddr))) != 0)
381: return val;
382:
383: if (nameaddr == 0)
384: name[0] = '\0';
385: else if ((val = READ(thread->proc, nameaddr,
1.9 christos 386: name, (size_t)MIN(PTHREAD_MAX_NAMELEN_NP, len))) != 0)
1.4 nathanw 387: return val;
388:
389: return 0;
390: }
391:
392: int
1.2 thorpej 393: td_thr_getregs(td_thread_t *thread, int regset, void *buf)
394: {
1.11 cl 395: int tmp, tmp1, val;
1.2 thorpej 396: caddr_t addr;
397: ucontext_t uc;
398:
1.11 cl 399: if ((val = READ(thread->proc,
400: thread->addr + offsetof(struct __pthread_st, pt_blockgen),
401: &tmp, sizeof(tmp))) != 0)
402: return val;
403: if ((val = READ(thread->proc,
1.14 nathanw 404: thread->addr + offsetof(struct __pthread_st, pt_unblockgen),
1.11 cl 405: &tmp1, sizeof(tmp1))) != 0)
406: return val;
407: if (tmp != tmp1)
408: tmp = _PT_STATE_BLOCKED_SYS;
409: else if ((val = READ(thread->proc,
410: thread->addr + offsetof(struct __pthread_st, pt_state),
411: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 412: return val;
413:
414: switch (tmp) {
415: case PT_STATE_RUNNING:
416: /*
417: * The register state of the thread is live in the
418: * inferior process's register state.
419: */
420: val = GETREGS(thread->proc, regset, thread->lwp, buf);
421: if (val != 0)
422: return val;
423: break;
424: case PT_STATE_RUNNABLE:
1.19 nathanw 425: case PT_STATE_SUSPENDED:
1.11 cl 426: case _PT_STATE_BLOCKED_SYS:
1.2 thorpej 427: case PT_STATE_BLOCKED_QUEUE:
428: /*
429: * The register state of the thread is in the ucontext_t
430: * of the thread structure.
431: */
1.11 cl 432: if (tmp == _PT_STATE_BLOCKED_SYS) {
433: val = READ(thread->proc,
434: thread->addr + offsetof(struct __pthread_st, pt_blockuc),
435: &addr, sizeof(addr));
436: if (val != 0)
437: return val;
438: } else
439: addr = 0;
440: if (addr == 0) {
441: val = READ(thread->proc,
442: thread->addr + offsetof(struct __pthread_st, pt_trapuc),
443: &addr, sizeof(addr));
444: if (val != 0)
445: return val;
446: }
1.7 nathanw 447: if (addr == 0) {
448: val = READ(thread->proc,
1.8 nathanw 449: thread->addr + offsetof(struct __pthread_st, pt_uc),
1.7 nathanw 450: &addr, sizeof(addr));
451: if (val != 0)
452: return val;
453: }
454: val = READ(thread->proc, addr, &uc, sizeof(uc));
1.2 thorpej 455: if (val != 0)
456: return val;
457:
458: switch (regset) {
459: case 0:
1.15 nathanw 460: if ((uc.uc_flags & _UC_CPU) == 0)
461: return TD_ERR_ERR;
1.2 thorpej 462: PTHREAD_UCONTEXT_TO_REG((struct reg *)buf, &uc);
463: break;
464: case 1:
1.15 nathanw 465: if ((uc.uc_flags & _UC_FPU) == 0)
466: return TD_ERR_ERR;
1.2 thorpej 467: PTHREAD_UCONTEXT_TO_FPREG((struct fpreg *)buf, &uc);
468: break;
1.15 nathanw 469: #ifdef PTHREAD_UCONTEXT_XREG_FLAG
1.2 thorpej 470: case 2:
1.15 nathanw 471: if ((uc.uc_flags & PTHREAD_UCONTEXT_XREG_FLAG) == 0)
472: return TD_ERR_ERR;
473: PTHREAD_UCONTEXT_TO_XREG(buf, &uc);
474: break;
475: #endif
476: default:
1.2 thorpej 477: return TD_ERR_INVAL;
478: }
479: break;
480: case PT_STATE_ZOMBIE:
481: default:
482: return TD_ERR_BADTHREAD;
483: }
484:
485: return 0;
486: }
487:
488: int
489: td_thr_setregs(td_thread_t *thread, int regset, void *buf)
490: {
1.11 cl 491: int val, tmp, tmp1;
1.2 thorpej 492: caddr_t addr;
493: ucontext_t uc;
494:
1.11 cl 495: if ((val = READ(thread->proc,
496: thread->addr + offsetof(struct __pthread_st, pt_blockgen),
497: &tmp, sizeof(tmp))) != 0)
498: return val;
499: if ((val = READ(thread->proc,
1.14 nathanw 500: thread->addr + offsetof(struct __pthread_st, pt_unblockgen),
1.11 cl 501: &tmp1, sizeof(tmp1))) != 0)
502: return val;
503: if (tmp != tmp1)
504: tmp = _PT_STATE_BLOCKED_SYS;
505: else if ((val = READ(thread->proc,
506: thread->addr + offsetof(struct __pthread_st, pt_state),
507: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 508: return val;
509:
510: switch (tmp) {
511: case PT_STATE_RUNNING:
512: /*
513: * The register state of the thread is live in the
514: * inferior process's register state.
515: */
516: val = SETREGS(thread->proc, regset, thread->lwp, buf);
517: if (val != 0)
518: return val;
519: break;
520: case PT_STATE_RUNNABLE:
1.19 nathanw 521: case PT_STATE_SUSPENDED:
1.11 cl 522: case _PT_STATE_BLOCKED_SYS:
1.2 thorpej 523: case PT_STATE_BLOCKED_QUEUE:
524: /*
525: * The register state of the thread is in the ucontext_t
526: * of the thread structure.
527: *
528: * Fetch the uc first, since there is state in it
529: * besides the registers that should be preserved.
530: */
1.11 cl 531: if (tmp == _PT_STATE_BLOCKED_SYS) {
532: val = READ(thread->proc,
533: thread->addr + offsetof(struct __pthread_st, pt_blockuc),
534: &addr, sizeof(addr));
535: if (val != 0)
536: return val;
537: } else
538: addr = 0;
539: if (addr == 0) {
540: val = READ(thread->proc,
541: thread->addr + offsetof(struct __pthread_st, pt_trapuc),
542: &addr, sizeof(addr));
543: if (val != 0)
544: return val;
545: }
1.7 nathanw 546: if (addr == 0) {
547: val = READ(thread->proc,
1.8 nathanw 548: thread->addr + offsetof(struct __pthread_st, pt_uc),
1.7 nathanw 549: &addr, sizeof(addr));
550: if (val != 0)
551: return val;
552: }
1.2 thorpej 553: val = READ(thread->proc,
554: addr, &uc, sizeof(uc));
555: if (val != 0)
556: return val;
557:
558: switch (regset) {
559: case 0:
1.3 christos 560: PTHREAD_REG_TO_UCONTEXT(&uc,
561: (struct reg *)(void *)buf);
1.2 thorpej 562: break;
563: case 1:
1.3 christos 564: PTHREAD_FPREG_TO_UCONTEXT(&uc,
565: (struct fpreg *)(void *)buf);
1.2 thorpej 566: break;
1.15 nathanw 567: #ifdef PTHREAD_UCONTEXT_XREG_FLAG
1.2 thorpej 568: case 2:
1.15 nathanw 569: PTHREAD_XREG_TO_UCONTEXT(&uc, buf);
570: break;
571: #endif
572: default:
1.2 thorpej 573: return TD_ERR_INVAL;
574: }
575:
576: val = WRITE(thread->proc,
577: addr, &uc, sizeof(uc));
578: if (val != 0)
579: return val;
580:
581: break;
582: case PT_STATE_ZOMBIE:
583: default:
584: return TD_ERR_BADTHREAD;
585: }
586:
587: return 0;
588: }
589:
590: int
591: td_thr_join_iter(td_thread_t *thread, int (*call)(td_thread_t *, void *),
592: void *arg)
593: {
594: int val;
595: caddr_t next;
596: td_thread_t *thread2;
597: struct pthread_queue_t queue;
598:
599: if ((val = READ(thread->proc,
1.8 nathanw 600: thread->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 601: &queue, sizeof(queue))) != 0)
1.2 thorpej 602: return val;
603:
1.3 christos 604: next = (void *)queue.ptqh_first;
605: while (next != NULL) {
1.2 thorpej 606: val = td__getthread(thread->proc, next, &thread2);
607: if (val != 0)
608: return val;
1.4 nathanw 609: val = (*call)(thread2, arg);
1.2 thorpej 610: if (val != 0)
611: return 0;
612:
613: val = READ(thread->proc,
1.8 nathanw 614: next + offsetof(struct __pthread_st, pt_sleep.ptqe_next),
1.2 thorpej 615: &next, sizeof(next));
616: if (val != 0)
617: return val;
618: }
619:
620: return 0;
621: }
622:
623: int
624: td_sync_info(td_sync_t *s, td_sync_info_t *info)
625: {
626: int val, magic, n;
627: struct pthread_queue_t queue;
628: pthread_spin_t slock;
629: pthread_t taddr;
1.12 nathanw 630: td_proc_t *proc = s->proc;
631:
632: val = READ(proc, s->addr, &magic, sizeof(magic));
1.2 thorpej 633: if (val != 0)
634: return val;
635:
636: info->sync_type = TD_SYNC_UNKNOWN;
637: info->sync_size = 0;
638: info->sync_haswaiters = 0;
639: switch (magic) {
640: case _PT_MUTEX_MAGIC:
641: info->sync_type = TD_SYNC_MUTEX;
1.8 nathanw 642: info->sync_size = sizeof(struct __pthread_mutex_st);
1.2 thorpej 643: if ((val = READ(s->proc,
1.8 nathanw 644: s->addr + offsetof(struct __pthread_mutex_st, ptm_blocked),
1.6 nathanw 645: &queue, sizeof(queue))) != 0)
1.2 thorpej 646: return val;
647:
648: if (!PTQ_EMPTY(&queue))
649: info->sync_haswaiters = 1;
650: /*
651: * The cast to (void *) is to explicitly throw away the
652: * volatile qualifier on pthread_spin_t,
653: * from __cpu_simple_lock_t.
654: */
1.12 nathanw 655: if ((val = READ(proc,
1.8 nathanw 656: s->addr + offsetof(struct __pthread_mutex_st, ptm_lock),
1.6 nathanw 657: (void *)&slock, sizeof(slock))) != 0)
1.2 thorpej 658: return val;
659: if (slock == __SIMPLELOCK_LOCKED) {
660: info->sync_data.mutex.locked = 1;
1.12 nathanw 661: if ((val = READ(proc,
1.8 nathanw 662: s->addr + offsetof(struct __pthread_mutex_st,
1.2 thorpej 663: ptm_owner),
1.6 nathanw 664: &taddr, sizeof(taddr))) != 0)
1.2 thorpej 665: return val;
1.12 nathanw 666: if ((val = td__getstacksize(proc)) != 0)
1.10 cl 667: return val;
1.2 thorpej 668: taddr = pthread__id(taddr);
1.12 nathanw 669: td__getthread(proc, (void *)taddr,
1.2 thorpej 670: &info->sync_data.mutex.owner);
671: } else
672: info->sync_data.mutex.locked = 0;
673: break;
674: case _PT_COND_MAGIC:
675: info->sync_type = TD_SYNC_COND;
1.8 nathanw 676: info->sync_size = sizeof(struct __pthread_cond_st);
1.12 nathanw 677: if ((val = READ(proc,
1.8 nathanw 678: s->addr + offsetof(struct __pthread_cond_st, ptc_waiters),
1.6 nathanw 679: &queue, sizeof(queue))) != 0)
1.2 thorpej 680: return val;
681: if (!PTQ_EMPTY(&queue))
682: info->sync_haswaiters = 1;
683: break;
684: case _PT_SPINLOCK_MAGIC:
685: info->sync_type = TD_SYNC_SPIN;
1.8 nathanw 686: info->sync_size = sizeof(struct __pthread_spinlock_st);
1.12 nathanw 687: if ((val = READ(proc,
1.8 nathanw 688: s->addr + offsetof(struct __pthread_spinlock_st, pts_spin),
1.6 nathanw 689: (void *)&slock, sizeof(slock))) != 0)
1.2 thorpej 690: return val;
691: if (slock == __SIMPLELOCK_LOCKED)
692: info->sync_data.spin.locked = 1;
693: break;
694: case PT_MAGIC:
695: info->sync_type = TD_SYNC_JOIN;
1.8 nathanw 696: info->sync_size = sizeof(struct __pthread_st);
1.12 nathanw 697: td__getthread(proc, s->addr,
1.2 thorpej 698: &info->sync_data.join.thread);
1.12 nathanw 699: if ((val = READ(proc,
1.8 nathanw 700: s->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 701: &queue, sizeof(queue))) != 0)
1.2 thorpej 702: return val;
703:
704: if (!PTQ_EMPTY(&queue))
705: info->sync_haswaiters = 1;
706: break;
1.3 christos 707: case (int)_PT_RWLOCK_MAGIC:
1.2 thorpej 708: info->sync_type = TD_SYNC_RWLOCK;
1.8 nathanw 709: info->sync_size = sizeof(struct __pthread_rwlock_st);
1.12 nathanw 710: if ((val = READ(proc,
1.8 nathanw 711: s->addr + offsetof(struct __pthread_rwlock_st, ptr_rblocked),
1.6 nathanw 712: &queue, sizeof(queue))) != 0)
1.2 thorpej 713: return val;
714: if (!PTQ_EMPTY(&queue))
715: info->sync_haswaiters = 1;
716:
1.12 nathanw 717: if ((val = READ(proc,
1.8 nathanw 718: s->addr + offsetof(struct __pthread_rwlock_st, ptr_wblocked),
1.6 nathanw 719: &queue, sizeof(queue))) != 0)
1.2 thorpej 720: return val;
721: if (!PTQ_EMPTY(&queue))
722: info->sync_haswaiters = 1;
723:
724:
725: info->sync_data.rwlock.locked = 0;
1.12 nathanw 726: if ((val = READ(proc,
1.8 nathanw 727: s->addr + offsetof(struct __pthread_rwlock_st, ptr_nreaders),
1.6 nathanw 728: &n, sizeof(n))) != 0)
1.2 thorpej 729: return val;
730: info->sync_data.rwlock.readlocks = n;
731: if (n > 0)
732: info->sync_data.rwlock.locked = 1;
733:
1.12 nathanw 734: if ((val = READ(proc,
1.8 nathanw 735: s->addr + offsetof(struct __pthread_rwlock_st, ptr_writer),
1.6 nathanw 736: &taddr, sizeof(taddr))) != 0)
1.2 thorpej 737: return val;
738: if (taddr != 0) {
739: info->sync_data.rwlock.locked = 1;
1.12 nathanw 740: td__getthread(proc, (void *)taddr,
1.2 thorpej 741: &info->sync_data.rwlock.writeowner);
742: }
1.3 christos 743: /*FALLTHROUGH*/
1.2 thorpej 744: default:
745: return (0);
746: }
747:
748: info->sync_addr = s->addr;
749:
750: return 0;
751: }
752:
753:
754: int
755: td_sync_waiters_iter(td_sync_t *s, int (*call)(td_thread_t *, void *),
756: void *arg)
757: {
758: int val, magic;
759: caddr_t next;
760: struct pthread_queue_t queue;
761: td_thread_t *thread;
762:
763: val = READ(s->proc, s->addr, &magic, sizeof(magic));
764: if (val != 0)
765: return val;
766:
767: switch (magic) {
768: case _PT_MUTEX_MAGIC:
769: if ((val = READ(s->proc,
1.8 nathanw 770: s->addr + offsetof(struct __pthread_mutex_st, ptm_blocked),
1.6 nathanw 771: &queue, sizeof(queue))) != 0)
1.2 thorpej 772: return val;
773: break;
774: case _PT_COND_MAGIC:
775: if ((val = READ(s->proc,
1.8 nathanw 776: s->addr + offsetof(struct __pthread_cond_st, ptc_waiters),
1.6 nathanw 777: &queue, sizeof(queue))) != 0)
1.2 thorpej 778: return val;
779: break;
780: case PT_MAGIC:
781: /* Redundant with join_iter, but what the hell... */
782: if ((val = READ(s->proc,
1.8 nathanw 783: s->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 784: &queue, sizeof(queue))) != 0)
1.2 thorpej 785: return val;
786: break;
787: default:
788: return (0);
789: }
790:
1.3 christos 791: next = (void *)queue.ptqh_first;
792: while (next != NULL) {
1.2 thorpej 793: val = td__getthread(s->proc, next, &thread);
794: if (val != 0)
795: return val;
796: val = (*call)(thread, arg);
797: if (val != 0)
798: return 0;
799:
800: val = READ(s->proc,
1.8 nathanw 801: next + offsetof(struct __pthread_st, pt_sleep.ptqe_next),
1.2 thorpej 802: &next, sizeof(next));
803: if (val != 0)
804: return val;
805: }
806: return 0;
807: }
808:
809:
810: int
811: td_map_addr2sync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp)
812: {
813: int magic, val;
814:
815: val = READ(proc, addr, &magic, sizeof(magic));
816: if (val != 0)
817: return val;
818:
819: if ((magic != _PT_MUTEX_MAGIC) &&
820: (magic != _PT_COND_MAGIC) &&
1.26 ! nathanw 821: (magic != _PT_SPINLOCK_MAGIC) &&
! 822: (magic != _PT_RWLOCK_MAGIC))
1.2 thorpej 823: return TD_ERR_NOOBJ;
824:
825: val = td__getsync(proc, addr, syncp);
826: if (val != 0)
827: return val;
828:
829: return 0;
830: }
831:
832:
833: int
834: td_map_pth2thr(td_proc_t *proc, pthread_t thread, td_thread_t **threadp)
835: {
836: int magic, val;
837:
1.3 christos 838: val = READ(proc, (void *)thread, &magic, sizeof(magic));
1.2 thorpej 839: if (val != 0)
840: return val;
841:
842: if (magic != PT_MAGIC)
843: return TD_ERR_NOOBJ;
844:
1.3 christos 845: val = td__getthread(proc, (void *)thread, threadp);
1.2 thorpej 846: if (val != 0)
847: return val;
848:
849: return 0;
850: }
851:
852: int
853: td_map_id2thr(td_proc_t *proc, int threadid, td_thread_t **threadp)
854: {
855: int val, num;
1.12 nathanw 856: caddr_t next;
1.2 thorpej 857: struct pthread_queue_t allq;
858: td_thread_t *thread;
859:
860:
1.12 nathanw 861: val = READ(proc, proc->allqaddr, &allq, sizeof(allq));
1.2 thorpej 862: if (val != 0)
863: return val;
864:
1.20 nathanw 865: /* Correct for offset */
866: threadid -= TN_OFFSET;
1.3 christos 867: next = (void *)allq.ptqh_first;
868: while (next != NULL) {
1.2 thorpej 869: val = READ(proc,
1.8 nathanw 870: next + offsetof(struct __pthread_st, pt_num),
1.2 thorpej 871: &num, sizeof(num));
872:
873: if (num == threadid)
874: break;
875:
876: val = READ(proc,
1.8 nathanw 877: next + offsetof(struct __pthread_st, pt_allq.ptqe_next),
1.2 thorpej 878: &next, sizeof(next));
879: if (val != 0)
880: return val;
881: }
882:
883: if (next == 0) {
884: /* A matching thread was not found. */
885: return TD_ERR_NOOBJ;
886: }
887:
888: val = td__getthread(proc, next, &thread);
889: if (val != 0)
890: return val;
891: *threadp = thread;
892:
893: return 0;
894: }
895:
896: /* Return the thread handle of the thread running on the given LWP */
897: int
898: td_map_lwp2thr(td_proc_t *proc, int lwp, td_thread_t **threadp)
899: {
900: int val, magic;
901: struct reg gregs;
902: ucontext_t uc;
1.3 christos 903: void *th;
1.2 thorpej 904:
905: val = GETREGS(proc, 0, lwp, &gregs);
906: if (val != 0)
907: return val;
908:
909: PTHREAD_REG_TO_UCONTEXT(&uc, &gregs);
910:
1.10 cl 911: val = td__getstacksize(proc);
912: if (val != 0)
913: return val;
914:
1.3 christos 915: th = pthread__id(pthread__uc_sp(&uc));
1.2 thorpej 916:
917: val = READ(proc, th, &magic, sizeof(magic));
918: if (val != 0)
919: return val;
920:
921: if (magic != PT_MAGIC)
922: return TD_ERR_NOOBJ;
923:
924: val = td__getthread(proc, th, threadp);
925: if (val != 0)
926: return val;
927:
928: (*threadp)->lwp = lwp;
929:
930: return 0;
931: }
932:
933: int
934: td_map_lwps(td_proc_t *proc)
935: {
936: int i, val, nlwps;
937: td_thread_t *thread;
938:
1.12 nathanw 939: val = READ(proc, proc->maxlwpsaddr, &nlwps, sizeof(nlwps));
1.2 thorpej 940: if (val != 0)
941: return val;
942:
1.18 nathanw 943: if (nlwps < 1)
944: nlwps = 1; /* always at least one LWP */
945:
946: PTQ_FOREACH(thread, &proc->threads, list) {
947: thread->lwp = -1;
948: }
949:
1.2 thorpej 950: for (i = 1; i <= nlwps; i++) {
951: /*
952: * Errors are deliberately ignored for the call to
953: * td_map_lwp2thr(); it is entirely likely that not
954: * all LWPs in the range 1..nlwps exist, and that's
955: * not a problem.
956: */
957: td_map_lwp2thr(proc, i, &thread);
958: }
959: return 0;
960: }
961:
962: int
963: td_tsd_iter(td_proc_t *proc,
964: int (*call)(pthread_key_t, void (*)(void *), void *), void *arg)
965: {
966: int val;
967: int i, allocated;
968: void (*destructor)(void *);
969:
970: for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
1.12 nathanw 971: val = READ(proc, proc->tsdallocaddr + i * sizeof(int),
1.6 nathanw 972: &allocated, sizeof(allocated));
1.2 thorpej 973: if (val != 0)
974: return val;
975:
976: if (allocated) {
1.12 nathanw 977: val = READ(proc, proc->tsddestaddr +
978: i * sizeof(destructor),
1.2 thorpej 979: &destructor, sizeof(destructor));
980: if (val != 0)
981: return val;
982:
983: val = (call)(i, destructor, arg);
984: if (val != 0)
985: return val;
986: }
987: }
988:
989: return 0;
990: }
991:
992: /* Get the synchronization object that the thread is sleeping on */
993: int
994: td_thr_sleepinfo(td_thread_t *thread, td_sync_t **s)
995: {
996: int val;
997: caddr_t addr;
998:
999: if ((val = READ(thread->proc,
1.8 nathanw 1000: thread->addr + offsetof(struct __pthread_st, pt_sleepobj),
1.6 nathanw 1001: &addr, sizeof(addr))) != 0)
1.2 thorpej 1002: return val;
1003:
1004: td__getsync(thread->proc, addr, s);
1005:
1006: return 0;
1007:
1008: }
1009:
1.17 nathanw 1010: #define DPTQ_REMOVE(head, elm, field) do { \
1011: int _val; \
1012: PTQ_ENTRY(__pthread_st) _qent; \
1013: \
1014: _val = READ(thread->proc, \
1015: (elm) + offsetof(struct __pthread_st, field), \
1016: &_qent, sizeof(_qent)); \
1017: if (_val != 0) \
1018: return _val; \
1019: if (_qent.ptqe_next != NULL) { \
1020: _val = WRITE(thread->proc, \
1021: (caddr_t)(void *)_qent.ptqe_next + \
1022: offsetof(struct __pthread_st, field.ptqe_prev), \
1023: &_qent.ptqe_prev, sizeof(_qent.ptqe_prev)); \
1024: if (_val != 0) \
1025: return _val; \
1026: } else { \
1027: _val = WRITE(thread->proc, (head) + \
1028: offsetof(struct pthread_queue_t, ptqh_last), \
1029: &_qent.ptqe_prev, sizeof(_qent.ptqe_prev)); \
1030: if (_val != 0) \
1031: return _val; \
1032: } \
1033: _val = WRITE(thread->proc, (caddr_t)(void *)_qent.ptqe_prev, \
1034: &_qent.ptqe_next, sizeof(_qent.ptqe_next)); \
1035: if (_val != 0) \
1036: return _val; \
1037: } while (/*CONSTCOND*/0)
1038:
1039: #define DPTQ_INSERT_TAIL(head, elm, field) do { \
1040: int _val; \
1041: struct pthread_queue_t _qhead; \
1042: PTQ_ENTRY(__pthread_st) _qent; \
1043: \
1044: /* if ((head)->ptqh_last == NULL) */ \
1045: /* (head)->ptqh_last = &(head)->ptqh_first; */ \
1046: _val = READ(thread->proc, (head), &_qhead, sizeof(_qhead)); \
1047: \
1048: if (_val != 0) \
1049: return _val; \
1050: if (_qhead.ptqh_last == NULL) \
1051: _qhead.ptqh_last = (void *)(head); \
1052: \
1053: /* (elm)->field.ptqe_prev = (head)->ptqh_last; */ \
1054: _qent.ptqe_prev = _qhead.ptqh_last; \
1055: \
1056: /* *(head)->ptqh_last = (elm); */ \
1057: _qent.ptqe_next = (void *)elm; \
1058: _val = WRITE(thread->proc, (caddr_t)(void *)_qhead.ptqh_last, \
1059: &_qent.ptqe_next, sizeof(_qent.ptqe_next)); \
1060: if (_val != 0) \
1061: return _val; \
1062: \
1063: /* (elm)->field.ptqe_next = NULL; */ \
1064: _qent.ptqe_next = NULL; \
1065: \
1066: /* (head)->ptqh_last = &(elm)->field.ptqe_next; */ \
1067: _qhead.ptqh_last = (void *) ((elm) + \
1068: offsetof(struct __pthread_st, field.ptqe_next)); \
1069: \
1070: _val = WRITE(thread->proc, (elm) + \
1071: offsetof(struct __pthread_st, field), \
1072: &_qent, sizeof(_qent)); \
1073: if (_val != 0) \
1074: return _val; \
1075: _val = WRITE(thread->proc, \
1076: (head) + offsetof(struct pthread_queue_t, ptqh_last), \
1077: &_qhead.ptqh_last, sizeof(_qhead.ptqh_last)); \
1078: if (_val != 0) \
1079: return _val; \
1080: } while (/*CONSTCOND*/0)
1081:
1082:
1083: /* Suspend a thread from running */
1084: int
1085: td_thr_suspend(td_thread_t *thread)
1086: {
1087: int tmp, tmp1, val;
1.22 nathanw 1088: caddr_t addr, sp, nthreadaddr, qaddr;
1.23 nathanw 1089: size_t rsize;
1.22 nathanw 1090: td_thread_t *nthread;
1.17 nathanw 1091: ucontext_t uc;
1092: struct pthread_queue_t qhead;
1093:
1094: /* validate the thread */
1095: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
1096: if (val != 0)
1097: return val;
1098: if (tmp != PT_MAGIC)
1099: return TD_ERR_BADTHREAD;
1100:
1101: val = READ(thread->proc,
1102: thread->addr + offsetof(struct __pthread_st, pt_type),
1103: &tmp, sizeof(tmp));
1104: if (val != 0)
1105: return val;
1106: if (tmp != PT_THREAD_NORMAL)
1107: return TD_ERR_BADTHREAD;
1108:
1109: /* find the thread's current state */
1110: if ((val = READ(thread->proc,
1111: thread->addr + offsetof(struct __pthread_st, pt_blockgen),
1112: &tmp, sizeof(tmp))) != 0)
1113: return val;
1114: if ((val = READ(thread->proc,
1115: thread->addr + offsetof(struct __pthread_st, pt_unblockgen),
1116: &tmp1, sizeof(tmp1))) != 0)
1117: return val;
1118: if (tmp != tmp1)
1119: tmp = _PT_STATE_BLOCKED_SYS;
1120: else if ((val = READ(thread->proc,
1121: thread->addr + offsetof(struct __pthread_st, pt_state),
1122: &tmp, sizeof(tmp))) != 0)
1123: return val;
1124:
1125: switch (tmp) {
1126: case PT_STATE_RUNNING:
1127: /* grab the current thread's state and stash it */
1.23 nathanw 1128: if (thread->proc->regbuf == NULL) {
1129: val = REGSIZE(thread->proc, 0, &rsize);
1130: if (val != 0)
1131: return val;
1132: errno = 0;
1133: thread->proc->regbuf = malloc(rsize);
1134: if ((thread->proc->regbuf == NULL) &&
1135: (errno == ENOMEM))
1136: return TD_ERR_NOMEM;
1137: val = REGSIZE(thread->proc, 1, &rsize);
1138: if (val != 0) {
1139: free(thread->proc->regbuf);
1140: thread->proc->regbuf = NULL;
1141: return TD_ERR_NOMEM;
1142: }
1143: thread->proc->fpregbuf = malloc(rsize);
1144: if ((thread->proc->fpregbuf == NULL) &&
1145: (errno == ENOMEM)) {
1146: free(thread->proc->regbuf);
1147: thread->proc->regbuf = NULL;
1148: return TD_ERR_NOMEM;
1149: }
1150: }
1151: val = GETREGS(thread->proc, 0, thread->lwp,
1152: thread->proc->regbuf);
1.17 nathanw 1153: if (val != 0)
1154: return val;
1.23 nathanw 1155: val = GETREGS(thread->proc, 1, thread->lwp,
1156: thread->proc->fpregbuf);
1.17 nathanw 1157: if (val != 0)
1158: return val;
1159: _INITCONTEXT_U(&uc);
1.23 nathanw 1160: PTHREAD_REG_TO_UCONTEXT(&uc, thread->proc->regbuf);
1161: PTHREAD_FPREG_TO_UCONTEXT(&uc, thread->proc->fpregbuf);
1.25 scw 1162: sp = (caddr_t)(intptr_t)pthread__uc_sp(&uc);
1.17 nathanw 1163: sp -= sizeof(uc);
1164: #ifdef _UC_UCONTEXT_ALIGN
1165: sp = (caddr_t) ((unsigned long)sp & _UC_UCONTEXT_ALIGN);
1166: #endif
1167: val = WRITE(thread->proc, sp, &uc, sizeof(uc));
1168: if (val != 0)
1169: return val;
1170: val = WRITE(thread->proc,
1171: thread->addr + offsetof(struct __pthread_st, pt_uc),
1172: &sp, sizeof(sp));
1173:
1174: /* get a thread from the runq or idleq and put it on the cpu */
1175: qaddr = thread->proc->runqaddr;
1176: val = READ(thread->proc, qaddr, &qhead, sizeof(qhead));
1177: if (val != 0)
1178: return val;
1179: if (qhead.ptqh_first == NULL) {
1180: qaddr = thread->proc->idleqaddr;
1181: val = READ(thread->proc, qaddr, &qhead, sizeof(qhead));
1182: if (val != 0)
1183: return val;
1184: if (qhead.ptqh_first == NULL) {
1185: /* Well, crap. This isn't supposed to happen */
1186: return TD_ERR_ERR;
1187: }
1188: }
1189:
1.22 nathanw 1190: nthreadaddr = (caddr_t)(void *)qhead.ptqh_first;
1191: DPTQ_REMOVE(qaddr, nthreadaddr, pt_runq);
1.17 nathanw 1192: val = READ(thread->proc,
1.22 nathanw 1193: nthreadaddr + offsetof(struct __pthread_st, pt_trapuc),
1.17 nathanw 1194: &addr, sizeof(addr));
1195: if (val != 0)
1196: return val;
1197: if (addr == 0) {
1198: val = READ(thread->proc,
1.22 nathanw 1199: nthreadaddr + offsetof(struct __pthread_st, pt_uc),
1.17 nathanw 1200: &addr, sizeof(addr));
1201: if (val != 0)
1202: return val;
1203: }
1204: val = READ(thread->proc, addr, &uc, sizeof(uc));
1205: if (val != 0)
1206: return val;
1.23 nathanw 1207: PTHREAD_UCONTEXT_TO_REG(thread->proc->regbuf, &uc);
1208: PTHREAD_UCONTEXT_TO_FPREG(thread->proc->fpregbuf, &uc);
1209: val = SETREGS(thread->proc, 0, thread->lwp,
1210: thread->proc->regbuf);
1.17 nathanw 1211: if (val != 0)
1212: return val;
1.23 nathanw 1213: val = SETREGS(thread->proc, 1, thread->lwp,
1214: thread->proc->fpregbuf);
1.17 nathanw 1215: if (val != 0)
1216: return val;
1217:
1.22 nathanw 1218: td__getthread(thread->proc, nthreadaddr, &nthread);
1.21 nathanw 1219: nthread->lwp = thread->lwp;
1220: thread->lwp = -1;
1.17 nathanw 1221: break;
1222: case PT_STATE_RUNNABLE:
1223: /* remove from runq */
1224: DPTQ_REMOVE(thread->proc->runqaddr, thread->addr, pt_runq);
1225: break;
1226: case PT_STATE_BLOCKED_QUEUE:
1227: /* remove from the particular sleepq */
1228: val = READ(thread->proc, thread->addr +
1229: offsetof(struct __pthread_st, pt_sleepq),
1230: &addr, sizeof(addr));
1231: DPTQ_REMOVE(addr, thread->addr, pt_sleep);
1232: break;
1233: case _PT_STATE_BLOCKED_SYS:
1234: /* set flag so unblock upcall will suspend */
1235: val = READ(thread->proc, thread->addr +
1236: offsetof(struct __pthread_st, pt_flags),
1237: &tmp, sizeof(tmp));
1238: if (val != 0)
1239: return val;
1240: tmp |= PT_FLAG_SUSPENDED;
1241: val = WRITE(thread->proc, thread->addr +
1242: offsetof(struct __pthread_st, pt_flags),
1243: &tmp, sizeof(tmp));
1244: /* all done, don't want to actually go on the queue yet. */
1245: return 0;
1246: case PT_STATE_SUSPENDED:
1247: /* don't do anything */
1248: return 0;
1249: case PT_STATE_ZOMBIE:
1250: case PT_STATE_DEAD:
1251: /* suspending these isn't meaningful */
1252: return TD_ERR_BADTHREAD;
1253: default:
1254: return TD_ERR_ERR;
1255: }
1256:
1257: DPTQ_INSERT_TAIL(thread->proc->suspqaddr, thread->addr, pt_runq);
1258: tmp = PT_STATE_SUSPENDED;
1259: val = WRITE(thread->proc, thread->addr +
1260: offsetof(struct __pthread_st, pt_state),
1261: &tmp, sizeof(tmp));
1262: if (val != 0)
1263: return val;
1264:
1265: return 0;
1266: }
1267:
1268: /* Restore a suspended thread to its previous state */
1269: int
1270: td_thr_resume(td_thread_t *thread)
1271: {
1272: int tmp, tmp1, val;
1273:
1274: /* validate the thread */
1275: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
1276: if (val != 0)
1277: return val;
1278: if (tmp != PT_MAGIC)
1279: return TD_ERR_BADTHREAD;
1280:
1281: /* clear flag */
1282: val = READ(thread->proc, thread->addr +
1283: offsetof(struct __pthread_st, pt_flags),
1284: &tmp, sizeof(tmp));
1285: if (val != 0)
1286: return val;
1287: tmp &= ~PT_FLAG_SUSPENDED;
1288: val = WRITE(thread->proc, thread->addr +
1289: offsetof(struct __pthread_st, pt_flags),
1290: &tmp, sizeof(tmp));
1291:
1292: /* find the thread's current state */
1293: if ((val = READ(thread->proc,
1294: thread->addr + offsetof(struct __pthread_st, pt_blockgen),
1295: &tmp, sizeof(tmp))) != 0)
1296: return val;
1297: if ((val = READ(thread->proc,
1298: thread->addr + offsetof(struct __pthread_st, pt_unblockgen),
1299: &tmp1, sizeof(tmp1))) != 0)
1300: return val;
1301: if (tmp != tmp1)
1302: tmp = _PT_STATE_BLOCKED_SYS;
1303: else if ((val = READ(thread->proc,
1304: thread->addr + offsetof(struct __pthread_st, pt_state),
1305: &tmp, sizeof(tmp))) != 0)
1306: return val;
1307:
1308: if (tmp == PT_STATE_SUSPENDED) {
1309: DPTQ_REMOVE(thread->proc->suspqaddr, thread->addr, pt_runq);
1310: /* emulate pthread__sched */
1311: tmp = PT_STATE_RUNNABLE;
1312: val = WRITE(thread->proc,
1313: thread->addr + offsetof(struct __pthread_st, pt_state),
1314: &tmp, sizeof(tmp));
1315: DPTQ_INSERT_TAIL(thread->proc->runqaddr, thread->addr, pt_runq);
1316: }
1317:
1318:
1319: return 0;
1320: }
1.2 thorpej 1321:
1322:
1323: static int
1324: td__getthread(td_proc_t *proc, caddr_t addr, td_thread_t **threadp)
1325: {
1326: td_thread_t *thread;
1327:
1328: /*
1329: * Check if we've allocated a descriptor for this thread.
1330: * Sadly, this makes iterating over a set of threads O(N^2)
1331: * in the number of threads. More sophisticated data structures
1332: * can wait.
1333: */
1334: PTQ_FOREACH(thread, &proc->threads, list) {
1335: if (thread->addr == addr)
1336: break;
1337: }
1338: if (thread == NULL) {
1339: thread = malloc(sizeof(*thread));
1340: if (thread == NULL)
1341: return TD_ERR_NOMEM;
1342: thread->proc = proc;
1343: thread->addr = addr;
1344: thread->lwp = 0;
1345: PTQ_INSERT_HEAD(&proc->threads, thread, list);
1346: }
1347:
1348: *threadp = thread;
1349: return 0;
1350: }
1351:
1352:
1353: static int
1354: td__getsync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp)
1355: {
1356: td_sync_t *s;
1357:
1358: /* Check if we've allocated a descriptor for this object. */
1359: PTQ_FOREACH(s, &proc->syncs, list) {
1360: if (s->addr == addr)
1361: break;
1362: }
1363: /* Allocate a fresh one */
1364: if (s == NULL) {
1365: s = malloc(sizeof(*s));
1366: if (s == NULL)
1367: return TD_ERR_NOMEM;
1368: s->proc = proc;
1369: s->addr = addr;
1370: PTQ_INSERT_HEAD(&proc->syncs, s, list);
1371: }
1372:
1373: *syncp = s;
1374: return 0;
1375: }
1376:
1377:
1378: int
1379: td_thr_tsd(td_thread_t *thread, pthread_key_t key, void **value)
1380: {
1381: int val;
1382:
1383: val = READ(thread->proc, thread->addr +
1.8 nathanw 1384: offsetof(struct __pthread_st, pt_specific) +
1.6 nathanw 1385: key * sizeof(void *), value, sizeof(*value));
1.2 thorpej 1386:
1387: return val;
1388: }
1389:
1.10 cl 1390:
1391: static int
1392: td__getstacksize(td_proc_t *proc)
1393: {
1394: int lg, val;
1395:
1.12 nathanw 1396: if (proc->stacksizeaddr == NULL)
1397: return 0;
1398:
1399: val = READ(proc, proc->stacksizeaddr, &lg, sizeof(int));
1.10 cl 1400: if (val != 0)
1.12 nathanw 1401: return 0;
1402: if (lg != proc->stacksizelg) {
1403: proc->stacksizelg = lg;
1404: proc->stacksize = (1 << lg);
1405: proc->stackmask = proc->stacksize - 1;
1.10 cl 1406: }
1407: return 0;
1408: }
CVSweb <webmaster@jp.NetBSD.org>