Annotation of src/lib/libpthread_dbg/pthread_dbg.c, Revision 1.16
1.16 ! cl 1: /* $NetBSD: pthread_dbg.c,v 1.15 2004/02/11 21:07:18 nathanw Exp $ */
1.2 thorpej 2:
3: /*-
4: * Copyright (c) 2002 Wasabi Systems, Inc.
5: * All rights reserved.
6: *
7: * Written by Nathan J. Williams for Wasabi Systems, Inc.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed for the NetBSD Project by
20: * Wasabi Systems, Inc.
21: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22: * or promote products derived from this software without specific
23: * prior written permission.
24: *
25: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35: * POSSIBILITY OF SUCH DAMAGE.
36: */
1.5 lukem 37:
38: #include <sys/cdefs.h>
1.16 ! cl 39: __RCSID("$NetBSD: pthread_dbg.c,v 1.15 2004/02/11 21:07:18 nathanw Exp $");
1.2 thorpej 40:
41: #include <stddef.h>
42: #include <stdlib.h>
43: #include <string.h>
44: #include <errno.h>
45: #include <sys/types.h>
46: #include <unistd.h>
47:
48: #include <pthread.h>
49: #include <pthread_int.h>
50: #include <pthread_dbg.h>
51: #include <pthread_dbg_int.h>
52: #include <machine/reg.h>
53:
1.4 nathanw 54: #define MIN(a,b) ((a)<(b) ? (a) : (b))
55:
1.12 nathanw 56: #ifndef PT_FIXEDSTACKSIZE_LG
57: #undef PT_STACKMASK
58: #define PT_STACKMASK (proc->stackmask)
59: #endif
60:
1.2 thorpej 61: static int td__getthread(td_proc_t *proc, caddr_t addr, td_thread_t **threadp);
62: static int td__getsync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp);
1.10 cl 63: static int td__getstacksize(td_proc_t *proc);
64:
1.2 thorpej 65: int
66: td_open(struct td_proc_callbacks_t *cb, void *arg, td_proc_t **procp)
67: {
68: td_proc_t *proc;
1.12 nathanw 69: caddr_t addr;
1.2 thorpej 70: int dbg;
71: int val;
72:
73: proc = malloc(sizeof(*proc));
74: if (proc == NULL)
75: return TD_ERR_NOMEM;
76: proc->cb = cb;
77: proc->arg = arg;
78:
1.12 nathanw 79: val = LOOKUP(proc, "pthread__dbg", &addr);
80: if (val != 0) {
81: if (val == TD_ERR_NOSYM)
82: val = TD_ERR_NOLIB;
83: goto error;
84: }
85: proc->dbgaddr = addr;
86:
87: val = LOOKUP(proc, "pthread__allqueue", &addr);
88: if (val != 0) {
89: if (val == TD_ERR_NOSYM)
90: val = TD_ERR_NOLIB;
91: goto error;
92: }
93: proc->allqaddr = addr;
94:
95: val = LOOKUP(proc, "pthread__maxlwps", &addr);
96: if (val != 0) {
97: if (val == TD_ERR_NOSYM)
98: val = TD_ERR_NOLIB;
99: goto error;
100: }
101: proc->maxlwpsaddr = addr;
102:
103: val = LOOKUP(proc, "pthread__tsd_alloc", &addr);
1.2 thorpej 104: if (val != 0) {
105: if (val == TD_ERR_NOSYM)
106: val = TD_ERR_NOLIB;
107: goto error;
108: }
1.12 nathanw 109: proc->tsdallocaddr = addr;
1.2 thorpej 110:
1.12 nathanw 111: val = LOOKUP(proc, "pthread__tsd_destructors", &addr);
112: if (val != 0) {
113: if (val == TD_ERR_NOSYM)
114: val = TD_ERR_NOLIB;
115: goto error;
116: }
117: proc->tsddestaddr = addr;
118:
1.16 ! cl 119: val = READ(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 120: if (val != 0)
121: goto error;
122:
123: if (dbg != 0) {
124: /* Another instance of libpthread_dbg is already attached. */
125: val = TD_ERR_INUSE;
126: goto error;
127: }
128:
1.13 nathanw 129: val = LOOKUP(proc, "pthread_stacksize_lg", &addr);
1.12 nathanw 130: if (val == 0)
131: proc->stacksizeaddr = addr;
132: else
133: proc->stacksizeaddr = NULL;
134: proc->stacksizelg = -1;
135: proc->stacksize = 0;
136: proc->stackmask = 0;
137:
1.2 thorpej 138: dbg = getpid();
139: /*
140: * If this fails it probably means we're debugging a core file and
141: * can't write to it.
142: * If it's something else we'll lose the next time we hit WRITE,
143: * but not before, and that's OK.
144: */
1.12 nathanw 145: WRITE(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 146:
147: PTQ_INIT(&proc->threads);
148: PTQ_INIT(&proc->syncs);
149:
150: *procp = proc;
151:
152: return 0;
153:
154: error:
155: free(proc);
156: return val;
157: }
158:
159: int
160: td_close(td_proc_t *proc)
161: {
162: int dbg;
163: td_thread_t *t, *next;
164: td_sync_t *s, *nexts;
165:
166: dbg = 0;
167: /*
168: * Error returns from this write are mot really a problem;
169: * the process doesn't exist any more.
170: */
1.12 nathanw 171: WRITE(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 172:
173: /* Deallocate the list of thread structures */
174: for (t = PTQ_FIRST(&proc->threads); t; t = next) {
175: next = PTQ_NEXT(t, list);
176: PTQ_REMOVE(&proc->threads, t, list);
177: free(t);
178: }
179: /* Deallocate the list of sync objects */
180: for (s = PTQ_FIRST(&proc->syncs); s; s = nexts) {
181: nexts = PTQ_NEXT(s, list);
182: PTQ_REMOVE(&proc->syncs, s, list);
183: free(s);
184: }
185: free(proc);
186: return 0;
187: }
188:
189:
190: int
191: td_thr_iter(td_proc_t *proc, int (*call)(td_thread_t *, void *), void *callarg)
192: {
193: int val;
1.12 nathanw 194: caddr_t next;
1.2 thorpej 195: struct pthread_queue_t allq;
196: td_thread_t *thread;
197:
1.12 nathanw 198: val = READ(proc, proc->allqaddr, &allq, sizeof(allq));
1.2 thorpej 199: if (val != 0)
200: return val;
201:
1.3 christos 202: next = (void *)allq.ptqh_first;
203: while (next != NULL) {
1.2 thorpej 204: val = td__getthread(proc, next, &thread);
205: if (val != 0)
206: return val;
207: val = (*call)(thread, callarg);
208: if (val != 0)
209: return 0;
210:
211: val = READ(proc,
1.8 nathanw 212: next + offsetof(struct __pthread_st, pt_allq.ptqe_next),
1.2 thorpej 213: &next, sizeof(next));
214: if (val != 0)
215: return val;
216: }
217: return 0;
218: }
219:
220: int
221: td_thr_info(td_thread_t *thread, td_thread_info_t *info)
222: {
1.11 cl 223: int tmp, tmp1, val;
1.2 thorpej 224: struct pthread_queue_t queue;
225:
226: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
227: if (val != 0)
228: return val;
229:
230: if (tmp != PT_MAGIC)
231: return TD_ERR_BADTHREAD;
232:
233: info->thread_addr = thread->addr;
234: if ((val = READ(thread->proc,
1.11 cl 235: thread->addr + offsetof(struct __pthread_st, pt_blockgen),
1.6 nathanw 236: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 237: return val;
1.11 cl 238: if ((val = READ(thread->proc,
1.14 nathanw 239: thread->addr + offsetof(struct __pthread_st, pt_unblockgen),
1.11 cl 240: &tmp1, sizeof(tmp1))) != 0)
241: return val;
242: if (tmp != tmp1)
243: tmp = _PT_STATE_BLOCKED_SYS;
244: else if ((val = READ(thread->proc,
245: thread->addr + offsetof(struct __pthread_st, pt_state),
246: &tmp, sizeof(tmp))) != 0)
247: return val;
1.2 thorpej 248: switch (tmp) {
249: case PT_STATE_RUNNING:
250: info->thread_state = TD_STATE_RUNNING;
251: break;
252: case PT_STATE_RUNNABLE:
253: info->thread_state = TD_STATE_RUNNABLE;
254: break;
1.11 cl 255: case _PT_STATE_BLOCKED_SYS:
1.2 thorpej 256: info->thread_state = TD_STATE_BLOCKED;
257: break;
258: case PT_STATE_BLOCKED_QUEUE:
259: info->thread_state = TD_STATE_SLEEPING;
260: break;
261: case PT_STATE_ZOMBIE:
262: info->thread_state = TD_STATE_ZOMBIE;
263: break;
264: default:
265: info->thread_state = TD_STATE_UNKNOWN;
266: }
267:
268: if ((val = READ(thread->proc,
1.8 nathanw 269: thread->addr + offsetof(struct __pthread_st, pt_type),
1.6 nathanw 270: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 271: return val;
272: switch (tmp) {
273: case PT_THREAD_NORMAL:
274: info->thread_type = TD_TYPE_USER;
275: break;
276: case PT_THREAD_UPCALL:
277: case PT_THREAD_IDLE:
278: info->thread_type = TD_TYPE_SYSTEM;
279: break;
280: default:
281: info->thread_type = TD_TYPE_UNKNOWN;
282: }
283:
284: if ((val = READ(thread->proc,
1.8 nathanw 285: thread->addr + offsetof(struct __pthread_st, pt_stack),
1.2 thorpej 286: &info->thread_stack, sizeof(stack_t))) != 0)
287: return val;
288:
289: if ((val = READ(thread->proc,
1.8 nathanw 290: thread->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 291: &queue, sizeof(queue))) != 0)
1.2 thorpej 292: return val;
293:
294: if (PTQ_EMPTY(&queue))
295: info->thread_hasjoiners = 0;
296: else
297: info->thread_hasjoiners = 1;
298:
299: if ((val = READ(thread->proc,
1.8 nathanw 300: thread->addr + offsetof(struct __pthread_st, pt_errno),
1.2 thorpej 301: &info->thread_errno, sizeof(info->thread_errno))) != 0)
302: return val;
303:
304: if ((val = READ(thread->proc,
1.8 nathanw 305: thread->addr + offsetof(struct __pthread_st, pt_num),
1.6 nathanw 306: &info->thread_id, sizeof(info->thread_id))) != 0)
1.2 thorpej 307: return val;
308:
309: if ((val = READ(thread->proc,
1.8 nathanw 310: thread->addr + offsetof(struct __pthread_st, pt_sigmask),
1.2 thorpej 311: &info->thread_sigmask, sizeof(info->thread_sigmask))) != 0)
312: return val;
313:
314: if ((val = READ(thread->proc,
1.8 nathanw 315: thread->addr + offsetof(struct __pthread_st, pt_siglist),
1.2 thorpej 316: &info->thread_sigpending, sizeof(info->thread_sigpending))) != 0)
317: return val;
318:
319: return 0;
320: }
321:
322: int
1.4 nathanw 323: td_thr_getname(td_thread_t *thread, char *name, int len)
324: {
325: int val, tmp;
326: caddr_t nameaddr;
327:
328:
329: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
330: if (val != 0)
331: return val;
332:
333: if (tmp != PT_MAGIC)
334: return TD_ERR_BADTHREAD;
335:
336: if ((val = READ(thread->proc,
1.8 nathanw 337: thread->addr + offsetof(struct __pthread_st, pt_name),
1.4 nathanw 338: &nameaddr, sizeof(nameaddr))) != 0)
339: return val;
340:
341: if (nameaddr == 0)
342: name[0] = '\0';
343: else if ((val = READ(thread->proc, nameaddr,
1.9 christos 344: name, (size_t)MIN(PTHREAD_MAX_NAMELEN_NP, len))) != 0)
1.4 nathanw 345: return val;
346:
347: return 0;
348: }
349:
350: int
1.2 thorpej 351: td_thr_getregs(td_thread_t *thread, int regset, void *buf)
352: {
1.11 cl 353: int tmp, tmp1, val;
1.2 thorpej 354: caddr_t addr;
355: ucontext_t uc;
356:
1.11 cl 357: if ((val = READ(thread->proc,
358: thread->addr + offsetof(struct __pthread_st, pt_blockgen),
359: &tmp, sizeof(tmp))) != 0)
360: return val;
361: if ((val = READ(thread->proc,
1.14 nathanw 362: thread->addr + offsetof(struct __pthread_st, pt_unblockgen),
1.11 cl 363: &tmp1, sizeof(tmp1))) != 0)
364: return val;
365: if (tmp != tmp1)
366: tmp = _PT_STATE_BLOCKED_SYS;
367: else if ((val = READ(thread->proc,
368: thread->addr + offsetof(struct __pthread_st, pt_state),
369: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 370: return val;
371:
372: switch (tmp) {
373: case PT_STATE_RUNNING:
374: /*
375: * The register state of the thread is live in the
376: * inferior process's register state.
377: */
378: val = GETREGS(thread->proc, regset, thread->lwp, buf);
379: if (val != 0)
380: return val;
381: break;
382: case PT_STATE_RUNNABLE:
1.11 cl 383: case _PT_STATE_BLOCKED_SYS:
1.2 thorpej 384: case PT_STATE_BLOCKED_QUEUE:
385: /*
386: * The register state of the thread is in the ucontext_t
387: * of the thread structure.
388: */
1.11 cl 389: if (tmp == _PT_STATE_BLOCKED_SYS) {
390: val = READ(thread->proc,
391: thread->addr + offsetof(struct __pthread_st, pt_blockuc),
392: &addr, sizeof(addr));
393: if (val != 0)
394: return val;
395: } else
396: addr = 0;
397: if (addr == 0) {
398: val = READ(thread->proc,
399: thread->addr + offsetof(struct __pthread_st, pt_trapuc),
400: &addr, sizeof(addr));
401: if (val != 0)
402: return val;
403: }
1.7 nathanw 404: if (addr == 0) {
405: val = READ(thread->proc,
1.8 nathanw 406: thread->addr + offsetof(struct __pthread_st, pt_uc),
1.7 nathanw 407: &addr, sizeof(addr));
408: if (val != 0)
409: return val;
410: }
411: val = READ(thread->proc, addr, &uc, sizeof(uc));
1.2 thorpej 412: if (val != 0)
413: return val;
414:
415: switch (regset) {
416: case 0:
1.15 nathanw 417: if ((uc.uc_flags & _UC_CPU) == 0)
418: return TD_ERR_ERR;
1.2 thorpej 419: PTHREAD_UCONTEXT_TO_REG((struct reg *)buf, &uc);
420: break;
421: case 1:
1.15 nathanw 422: if ((uc.uc_flags & _UC_FPU) == 0)
423: return TD_ERR_ERR;
1.2 thorpej 424: PTHREAD_UCONTEXT_TO_FPREG((struct fpreg *)buf, &uc);
425: break;
1.15 nathanw 426: #ifdef PTHREAD_UCONTEXT_XREG_FLAG
1.2 thorpej 427: case 2:
1.15 nathanw 428: if ((uc.uc_flags & PTHREAD_UCONTEXT_XREG_FLAG) == 0)
429: return TD_ERR_ERR;
430: PTHREAD_UCONTEXT_TO_XREG(buf, &uc);
431: break;
432: #endif
433: default:
1.2 thorpej 434: return TD_ERR_INVAL;
435: }
436: break;
437: case PT_STATE_ZOMBIE:
438: default:
439: return TD_ERR_BADTHREAD;
440: }
441:
442: return 0;
443: }
444:
445: int
446: td_thr_setregs(td_thread_t *thread, int regset, void *buf)
447: {
1.11 cl 448: int val, tmp, tmp1;
1.2 thorpej 449: caddr_t addr;
450: ucontext_t uc;
451:
1.11 cl 452: if ((val = READ(thread->proc,
453: thread->addr + offsetof(struct __pthread_st, pt_blockgen),
454: &tmp, sizeof(tmp))) != 0)
455: return val;
456: if ((val = READ(thread->proc,
1.14 nathanw 457: thread->addr + offsetof(struct __pthread_st, pt_unblockgen),
1.11 cl 458: &tmp1, sizeof(tmp1))) != 0)
459: return val;
460: if (tmp != tmp1)
461: tmp = _PT_STATE_BLOCKED_SYS;
462: else if ((val = READ(thread->proc,
463: thread->addr + offsetof(struct __pthread_st, pt_state),
464: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 465: return val;
466:
467: switch (tmp) {
468: case PT_STATE_RUNNING:
469: /*
470: * The register state of the thread is live in the
471: * inferior process's register state.
472: */
473: val = SETREGS(thread->proc, regset, thread->lwp, buf);
474: if (val != 0)
475: return val;
476: break;
477: case PT_STATE_RUNNABLE:
1.11 cl 478: case _PT_STATE_BLOCKED_SYS:
1.2 thorpej 479: case PT_STATE_BLOCKED_QUEUE:
480: /*
481: * The register state of the thread is in the ucontext_t
482: * of the thread structure.
483: *
484: * Fetch the uc first, since there is state in it
485: * besides the registers that should be preserved.
486: */
1.11 cl 487: if (tmp == _PT_STATE_BLOCKED_SYS) {
488: val = READ(thread->proc,
489: thread->addr + offsetof(struct __pthread_st, pt_blockuc),
490: &addr, sizeof(addr));
491: if (val != 0)
492: return val;
493: } else
494: addr = 0;
495: if (addr == 0) {
496: val = READ(thread->proc,
497: thread->addr + offsetof(struct __pthread_st, pt_trapuc),
498: &addr, sizeof(addr));
499: if (val != 0)
500: return val;
501: }
1.7 nathanw 502: if (addr == 0) {
503: val = READ(thread->proc,
1.8 nathanw 504: thread->addr + offsetof(struct __pthread_st, pt_uc),
1.7 nathanw 505: &addr, sizeof(addr));
506: if (val != 0)
507: return val;
508: }
1.2 thorpej 509: val = READ(thread->proc,
510: addr, &uc, sizeof(uc));
511: if (val != 0)
512: return val;
513:
514: switch (regset) {
515: case 0:
1.3 christos 516: PTHREAD_REG_TO_UCONTEXT(&uc,
517: (struct reg *)(void *)buf);
1.2 thorpej 518: break;
519: case 1:
1.3 christos 520: PTHREAD_FPREG_TO_UCONTEXT(&uc,
521: (struct fpreg *)(void *)buf);
1.2 thorpej 522: break;
1.15 nathanw 523: #ifdef PTHREAD_UCONTEXT_XREG_FLAG
1.2 thorpej 524: case 2:
1.15 nathanw 525: PTHREAD_XREG_TO_UCONTEXT(&uc, buf);
526: break;
527: #endif
528: default:
1.2 thorpej 529: return TD_ERR_INVAL;
530: }
531:
532: val = WRITE(thread->proc,
533: addr, &uc, sizeof(uc));
534: if (val != 0)
535: return val;
536:
537: break;
538: case PT_STATE_ZOMBIE:
539: default:
540: return TD_ERR_BADTHREAD;
541: }
542:
543: return 0;
544: }
545:
546: int
547: td_thr_join_iter(td_thread_t *thread, int (*call)(td_thread_t *, void *),
548: void *arg)
549: {
550: int val;
551: caddr_t next;
552: td_thread_t *thread2;
553: struct pthread_queue_t queue;
554:
555: if ((val = READ(thread->proc,
1.8 nathanw 556: thread->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 557: &queue, sizeof(queue))) != 0)
1.2 thorpej 558: return val;
559:
1.3 christos 560: next = (void *)queue.ptqh_first;
561: while (next != NULL) {
1.2 thorpej 562: val = td__getthread(thread->proc, next, &thread2);
563: if (val != 0)
564: return val;
1.4 nathanw 565: val = (*call)(thread2, arg);
1.2 thorpej 566: if (val != 0)
567: return 0;
568:
569: val = READ(thread->proc,
1.8 nathanw 570: next + offsetof(struct __pthread_st, pt_sleep.ptqe_next),
1.2 thorpej 571: &next, sizeof(next));
572: if (val != 0)
573: return val;
574: }
575:
576: return 0;
577: }
578:
579: int
580: td_sync_info(td_sync_t *s, td_sync_info_t *info)
581: {
582: int val, magic, n;
583: struct pthread_queue_t queue;
584: pthread_spin_t slock;
585: pthread_t taddr;
1.12 nathanw 586: td_proc_t *proc = s->proc;
587:
588: val = READ(proc, s->addr, &magic, sizeof(magic));
1.2 thorpej 589: if (val != 0)
590: return val;
591:
592: info->sync_type = TD_SYNC_UNKNOWN;
593: info->sync_size = 0;
594: info->sync_haswaiters = 0;
595: switch (magic) {
596: case _PT_MUTEX_MAGIC:
597: info->sync_type = TD_SYNC_MUTEX;
1.8 nathanw 598: info->sync_size = sizeof(struct __pthread_mutex_st);
1.2 thorpej 599: if ((val = READ(s->proc,
1.8 nathanw 600: s->addr + offsetof(struct __pthread_mutex_st, ptm_blocked),
1.6 nathanw 601: &queue, sizeof(queue))) != 0)
1.2 thorpej 602: return val;
603:
604: if (!PTQ_EMPTY(&queue))
605: info->sync_haswaiters = 1;
606: /*
607: * The cast to (void *) is to explicitly throw away the
608: * volatile qualifier on pthread_spin_t,
609: * from __cpu_simple_lock_t.
610: */
1.12 nathanw 611: if ((val = READ(proc,
1.8 nathanw 612: s->addr + offsetof(struct __pthread_mutex_st, ptm_lock),
1.6 nathanw 613: (void *)&slock, sizeof(slock))) != 0)
1.2 thorpej 614: return val;
615: if (slock == __SIMPLELOCK_LOCKED) {
616: info->sync_data.mutex.locked = 1;
1.12 nathanw 617: if ((val = READ(proc,
1.8 nathanw 618: s->addr + offsetof(struct __pthread_mutex_st,
1.2 thorpej 619: ptm_owner),
1.6 nathanw 620: &taddr, sizeof(taddr))) != 0)
1.2 thorpej 621: return val;
1.12 nathanw 622: if ((val = td__getstacksize(proc)) != 0)
1.10 cl 623: return val;
1.2 thorpej 624: taddr = pthread__id(taddr);
1.12 nathanw 625: td__getthread(proc, (void *)taddr,
1.2 thorpej 626: &info->sync_data.mutex.owner);
627: } else
628: info->sync_data.mutex.locked = 0;
629: break;
630: case _PT_COND_MAGIC:
631: info->sync_type = TD_SYNC_COND;
1.8 nathanw 632: info->sync_size = sizeof(struct __pthread_cond_st);
1.12 nathanw 633: if ((val = READ(proc,
1.8 nathanw 634: s->addr + offsetof(struct __pthread_cond_st, ptc_waiters),
1.6 nathanw 635: &queue, sizeof(queue))) != 0)
1.2 thorpej 636: return val;
637: if (!PTQ_EMPTY(&queue))
638: info->sync_haswaiters = 1;
639: break;
640: case _PT_SPINLOCK_MAGIC:
641: info->sync_type = TD_SYNC_SPIN;
1.8 nathanw 642: info->sync_size = sizeof(struct __pthread_spinlock_st);
1.12 nathanw 643: if ((val = READ(proc,
1.8 nathanw 644: s->addr + offsetof(struct __pthread_spinlock_st, pts_spin),
1.6 nathanw 645: (void *)&slock, sizeof(slock))) != 0)
1.2 thorpej 646: return val;
647: if (slock == __SIMPLELOCK_LOCKED)
648: info->sync_data.spin.locked = 1;
649: break;
650: case PT_MAGIC:
651: info->sync_type = TD_SYNC_JOIN;
1.8 nathanw 652: info->sync_size = sizeof(struct __pthread_st);
1.12 nathanw 653: td__getthread(proc, s->addr,
1.2 thorpej 654: &info->sync_data.join.thread);
1.12 nathanw 655: if ((val = READ(proc,
1.8 nathanw 656: s->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 657: &queue, sizeof(queue))) != 0)
1.2 thorpej 658: return val;
659:
660: if (!PTQ_EMPTY(&queue))
661: info->sync_haswaiters = 1;
662: break;
1.3 christos 663: case (int)_PT_RWLOCK_MAGIC:
1.2 thorpej 664: info->sync_type = TD_SYNC_RWLOCK;
1.8 nathanw 665: info->sync_size = sizeof(struct __pthread_rwlock_st);
1.12 nathanw 666: if ((val = READ(proc,
1.8 nathanw 667: s->addr + offsetof(struct __pthread_rwlock_st, ptr_rblocked),
1.6 nathanw 668: &queue, sizeof(queue))) != 0)
1.2 thorpej 669: return val;
670: if (!PTQ_EMPTY(&queue))
671: info->sync_haswaiters = 1;
672:
1.12 nathanw 673: if ((val = READ(proc,
1.8 nathanw 674: s->addr + offsetof(struct __pthread_rwlock_st, ptr_wblocked),
1.6 nathanw 675: &queue, sizeof(queue))) != 0)
1.2 thorpej 676: return val;
677: if (!PTQ_EMPTY(&queue))
678: info->sync_haswaiters = 1;
679:
680:
681: info->sync_data.rwlock.locked = 0;
1.12 nathanw 682: if ((val = READ(proc,
1.8 nathanw 683: s->addr + offsetof(struct __pthread_rwlock_st, ptr_nreaders),
1.6 nathanw 684: &n, sizeof(n))) != 0)
1.2 thorpej 685: return val;
686: info->sync_data.rwlock.readlocks = n;
687: if (n > 0)
688: info->sync_data.rwlock.locked = 1;
689:
1.12 nathanw 690: if ((val = READ(proc,
1.8 nathanw 691: s->addr + offsetof(struct __pthread_rwlock_st, ptr_writer),
1.6 nathanw 692: &taddr, sizeof(taddr))) != 0)
1.2 thorpej 693: return val;
694: if (taddr != 0) {
695: info->sync_data.rwlock.locked = 1;
1.12 nathanw 696: td__getthread(proc, (void *)taddr,
1.2 thorpej 697: &info->sync_data.rwlock.writeowner);
698: }
1.3 christos 699: /*FALLTHROUGH*/
1.2 thorpej 700: default:
701: return (0);
702: }
703:
704: info->sync_addr = s->addr;
705:
706: return 0;
707: }
708:
709:
710: int
711: td_sync_waiters_iter(td_sync_t *s, int (*call)(td_thread_t *, void *),
712: void *arg)
713: {
714: int val, magic;
715: caddr_t next;
716: struct pthread_queue_t queue;
717: td_thread_t *thread;
718:
719: val = READ(s->proc, s->addr, &magic, sizeof(magic));
720: if (val != 0)
721: return val;
722:
723: switch (magic) {
724: case _PT_MUTEX_MAGIC:
725: if ((val = READ(s->proc,
1.8 nathanw 726: s->addr + offsetof(struct __pthread_mutex_st, ptm_blocked),
1.6 nathanw 727: &queue, sizeof(queue))) != 0)
1.2 thorpej 728: return val;
729: break;
730: case _PT_COND_MAGIC:
731: if ((val = READ(s->proc,
1.8 nathanw 732: s->addr + offsetof(struct __pthread_cond_st, ptc_waiters),
1.6 nathanw 733: &queue, sizeof(queue))) != 0)
1.2 thorpej 734: return val;
735: break;
736: case PT_MAGIC:
737: /* Redundant with join_iter, but what the hell... */
738: if ((val = READ(s->proc,
1.8 nathanw 739: s->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 740: &queue, sizeof(queue))) != 0)
1.2 thorpej 741: return val;
742: break;
743: default:
744: return (0);
745: }
746:
1.3 christos 747: next = (void *)queue.ptqh_first;
748: while (next != NULL) {
1.2 thorpej 749: val = td__getthread(s->proc, next, &thread);
750: if (val != 0)
751: return val;
752: val = (*call)(thread, arg);
753: if (val != 0)
754: return 0;
755:
756: val = READ(s->proc,
1.8 nathanw 757: next + offsetof(struct __pthread_st, pt_sleep.ptqe_next),
1.2 thorpej 758: &next, sizeof(next));
759: if (val != 0)
760: return val;
761: }
762: return 0;
763: }
764:
765:
766: int
767: td_map_addr2sync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp)
768: {
769: int magic, val;
770:
771: val = READ(proc, addr, &magic, sizeof(magic));
772: if (val != 0)
773: return val;
774:
775: if ((magic != _PT_MUTEX_MAGIC) &&
776: (magic != _PT_COND_MAGIC) &&
777: (magic != _PT_SPINLOCK_MAGIC))
778: return TD_ERR_NOOBJ;
779:
780: val = td__getsync(proc, addr, syncp);
781: if (val != 0)
782: return val;
783:
784: return 0;
785: }
786:
787:
788: int
789: td_map_pth2thr(td_proc_t *proc, pthread_t thread, td_thread_t **threadp)
790: {
791: int magic, val;
792:
1.3 christos 793: val = READ(proc, (void *)thread, &magic, sizeof(magic));
1.2 thorpej 794: if (val != 0)
795: return val;
796:
797: if (magic != PT_MAGIC)
798: return TD_ERR_NOOBJ;
799:
1.3 christos 800: val = td__getthread(proc, (void *)thread, threadp);
1.2 thorpej 801: if (val != 0)
802: return val;
803:
804: return 0;
805: }
806:
807: int
808: td_map_id2thr(td_proc_t *proc, int threadid, td_thread_t **threadp)
809: {
810: int val, num;
1.12 nathanw 811: caddr_t next;
1.2 thorpej 812: struct pthread_queue_t allq;
813: td_thread_t *thread;
814:
815:
1.12 nathanw 816: val = READ(proc, proc->allqaddr, &allq, sizeof(allq));
1.2 thorpej 817: if (val != 0)
818: return val;
819:
1.3 christos 820: next = (void *)allq.ptqh_first;
821: while (next != NULL) {
1.2 thorpej 822: val = READ(proc,
1.8 nathanw 823: next + offsetof(struct __pthread_st, pt_num),
1.2 thorpej 824: &num, sizeof(num));
825:
826: if (num == threadid)
827: break;
828:
829: val = READ(proc,
1.8 nathanw 830: next + offsetof(struct __pthread_st, pt_allq.ptqe_next),
1.2 thorpej 831: &next, sizeof(next));
832: if (val != 0)
833: return val;
834: }
835:
836: if (next == 0) {
837: /* A matching thread was not found. */
838: return TD_ERR_NOOBJ;
839: }
840:
841: val = td__getthread(proc, next, &thread);
842: if (val != 0)
843: return val;
844: *threadp = thread;
845:
846: return 0;
847: }
848:
849: /* Return the thread handle of the thread running on the given LWP */
850: int
851: td_map_lwp2thr(td_proc_t *proc, int lwp, td_thread_t **threadp)
852: {
853: int val, magic;
854: struct reg gregs;
855: ucontext_t uc;
1.3 christos 856: void *th;
1.2 thorpej 857:
858: val = GETREGS(proc, 0, lwp, &gregs);
859: if (val != 0)
860: return val;
861:
862: PTHREAD_REG_TO_UCONTEXT(&uc, &gregs);
863:
1.10 cl 864: val = td__getstacksize(proc);
865: if (val != 0)
866: return val;
867:
1.3 christos 868: th = pthread__id(pthread__uc_sp(&uc));
1.2 thorpej 869:
870: val = READ(proc, th, &magic, sizeof(magic));
871: if (val != 0)
872: return val;
873:
874: if (magic != PT_MAGIC)
875: return TD_ERR_NOOBJ;
876:
877: val = td__getthread(proc, th, threadp);
878: if (val != 0)
879: return val;
880:
881: (*threadp)->lwp = lwp;
882:
883: return 0;
884: }
885:
886: int
887: td_map_lwps(td_proc_t *proc)
888: {
889: int i, val, nlwps;
890: td_thread_t *thread;
891:
1.12 nathanw 892: val = READ(proc, proc->maxlwpsaddr, &nlwps, sizeof(nlwps));
1.2 thorpej 893: if (val != 0)
894: return val;
895:
896: for (i = 1; i <= nlwps; i++) {
897: /*
898: * Errors are deliberately ignored for the call to
899: * td_map_lwp2thr(); it is entirely likely that not
900: * all LWPs in the range 1..nlwps exist, and that's
901: * not a problem.
902: */
903: td_map_lwp2thr(proc, i, &thread);
904: }
905: return 0;
906: }
907:
908: int
909: td_tsd_iter(td_proc_t *proc,
910: int (*call)(pthread_key_t, void (*)(void *), void *), void *arg)
911: {
912: int val;
913: int i, allocated;
914: void (*destructor)(void *);
915:
916: for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
1.12 nathanw 917: val = READ(proc, proc->tsdallocaddr + i * sizeof(int),
1.6 nathanw 918: &allocated, sizeof(allocated));
1.2 thorpej 919: if (val != 0)
920: return val;
921:
922: if (allocated) {
1.12 nathanw 923: val = READ(proc, proc->tsddestaddr +
924: i * sizeof(destructor),
1.2 thorpej 925: &destructor, sizeof(destructor));
926: if (val != 0)
927: return val;
928:
929: val = (call)(i, destructor, arg);
930: if (val != 0)
931: return val;
932: }
933: }
934:
935: return 0;
936: }
937:
938: /* Get the synchronization object that the thread is sleeping on */
939: int
940: td_thr_sleepinfo(td_thread_t *thread, td_sync_t **s)
941: {
942: int val;
943: caddr_t addr;
944:
945: if ((val = READ(thread->proc,
1.8 nathanw 946: thread->addr + offsetof(struct __pthread_st, pt_sleepobj),
1.6 nathanw 947: &addr, sizeof(addr))) != 0)
1.2 thorpej 948: return val;
949:
950: td__getsync(thread->proc, addr, s);
951:
952: return 0;
953:
954: }
955:
956:
957:
958: static int
959: td__getthread(td_proc_t *proc, caddr_t addr, td_thread_t **threadp)
960: {
961: td_thread_t *thread;
962:
963: /*
964: * Check if we've allocated a descriptor for this thread.
965: * Sadly, this makes iterating over a set of threads O(N^2)
966: * in the number of threads. More sophisticated data structures
967: * can wait.
968: */
969: PTQ_FOREACH(thread, &proc->threads, list) {
970: if (thread->addr == addr)
971: break;
972: }
973: if (thread == NULL) {
974: thread = malloc(sizeof(*thread));
975: if (thread == NULL)
976: return TD_ERR_NOMEM;
977: thread->proc = proc;
978: thread->addr = addr;
979: thread->lwp = 0;
980: PTQ_INSERT_HEAD(&proc->threads, thread, list);
981: }
982:
983: *threadp = thread;
984: return 0;
985: }
986:
987:
988: static int
989: td__getsync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp)
990: {
991: td_sync_t *s;
992:
993: /* Check if we've allocated a descriptor for this object. */
994: PTQ_FOREACH(s, &proc->syncs, list) {
995: if (s->addr == addr)
996: break;
997: }
998: /* Allocate a fresh one */
999: if (s == NULL) {
1000: s = malloc(sizeof(*s));
1001: if (s == NULL)
1002: return TD_ERR_NOMEM;
1003: s->proc = proc;
1004: s->addr = addr;
1005: PTQ_INSERT_HEAD(&proc->syncs, s, list);
1006: }
1007:
1008: *syncp = s;
1009: return 0;
1010: }
1011:
1012:
1013: int
1014: td_thr_tsd(td_thread_t *thread, pthread_key_t key, void **value)
1015: {
1016: int val;
1017:
1018: val = READ(thread->proc, thread->addr +
1.8 nathanw 1019: offsetof(struct __pthread_st, pt_specific) +
1.6 nathanw 1020: key * sizeof(void *), value, sizeof(*value));
1.2 thorpej 1021:
1022: return val;
1023: }
1024:
1.10 cl 1025:
1026: static int
1027: td__getstacksize(td_proc_t *proc)
1028: {
1029: int lg, val;
1030:
1.12 nathanw 1031: if (proc->stacksizeaddr == NULL)
1032: return 0;
1033:
1034: val = READ(proc, proc->stacksizeaddr, &lg, sizeof(int));
1.10 cl 1035: if (val != 0)
1.12 nathanw 1036: return 0;
1037: if (lg != proc->stacksizelg) {
1038: proc->stacksizelg = lg;
1039: proc->stacksize = (1 << lg);
1040: proc->stackmask = proc->stacksize - 1;
1.10 cl 1041: }
1042: return 0;
1043: }
CVSweb <webmaster@jp.NetBSD.org>