Annotation of src/lib/libpthread_dbg/pthread_dbg.c, Revision 1.34
1.34 ! ad 1: /* $NetBSD: pthread_dbg.c,v 1.33 2007/02/06 15:24:37 ad Exp $ */
1.2 thorpej 2:
3: /*-
4: * Copyright (c) 2002 Wasabi Systems, Inc.
5: * All rights reserved.
6: *
7: * Written by Nathan J. Williams for Wasabi Systems, Inc.
8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed for the NetBSD Project by
20: * Wasabi Systems, Inc.
21: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22: * or promote products derived from this software without specific
23: * prior written permission.
24: *
25: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35: * POSSIBILITY OF SUCH DAMAGE.
36: */
1.5 lukem 37:
38: #include <sys/cdefs.h>
1.34 ! ad 39: __RCSID("$NetBSD: pthread_dbg.c,v 1.33 2007/02/06 15:24:37 ad Exp $");
1.2 thorpej 40:
1.29 chs 41: #define __EXPOSE_STACK 1
1.34 ! ad 42:
1.29 chs 43: #include <sys/param.h>
1.34 ! ad 44: #include <sys/types.h>
! 45:
1.2 thorpej 46: #include <stddef.h>
47: #include <stdlib.h>
48: #include <string.h>
49: #include <errno.h>
50: #include <unistd.h>
1.34 ! ad 51: #include <lwp.h>
1.2 thorpej 52:
1.24 martin 53: #include <machine/reg.h>
54:
1.2 thorpej 55: #include <pthread.h>
56: #include <pthread_int.h>
57: #include <pthread_dbg.h>
58: #include <pthread_dbg_int.h>
59:
1.12 nathanw 60: #ifndef PT_FIXEDSTACKSIZE_LG
61: #undef PT_STACKMASK
62: #define PT_STACKMASK (proc->stackmask)
63: #endif
64:
1.20 nathanw 65: /* Compensate for debuggers that want a zero ID to be a sentinel */
66: #define TN_OFFSET 1
67:
1.2 thorpej 68: static int td__getthread(td_proc_t *proc, caddr_t addr, td_thread_t **threadp);
69: static int td__getsync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp);
1.10 cl 70: static int td__getstacksize(td_proc_t *proc);
71:
1.2 thorpej 72: int
73: td_open(struct td_proc_callbacks_t *cb, void *arg, td_proc_t **procp)
74: {
75: td_proc_t *proc;
1.12 nathanw 76: caddr_t addr;
1.2 thorpej 77: int dbg;
78: int val;
79:
80: proc = malloc(sizeof(*proc));
81: if (proc == NULL)
82: return TD_ERR_NOMEM;
83: proc->cb = cb;
84: proc->arg = arg;
85:
1.12 nathanw 86: val = LOOKUP(proc, "pthread__dbg", &addr);
87: if (val != 0) {
88: if (val == TD_ERR_NOSYM)
89: val = TD_ERR_NOLIB;
90: goto error;
91: }
92: proc->dbgaddr = addr;
93:
94: val = LOOKUP(proc, "pthread__allqueue", &addr);
95: if (val != 0) {
96: if (val == TD_ERR_NOSYM)
97: val = TD_ERR_NOLIB;
98: goto error;
99: }
100: proc->allqaddr = addr;
101:
1.17 nathanw 102: val = LOOKUP(proc, "pthread__runqueue", &addr);
103: if (val != 0) {
104: if (val == TD_ERR_NOSYM)
105: val = TD_ERR_NOLIB;
106: goto error;
107: }
108: proc->runqaddr = addr;
109:
110: val = LOOKUP(proc, "pthread__idlequeue", &addr);
111: if (val != 0) {
112: if (val == TD_ERR_NOSYM)
113: val = TD_ERR_NOLIB;
114: goto error;
115: }
116: proc->idleqaddr = addr;
117:
118: val = LOOKUP(proc, "pthread__suspqueue", &addr);
119: if (val != 0) {
120: if (val == TD_ERR_NOSYM)
121: val = TD_ERR_NOLIB;
122: goto error;
123: }
124: proc->suspqaddr = addr;
125:
1.12 nathanw 126: val = LOOKUP(proc, "pthread__maxlwps", &addr);
127: if (val != 0) {
128: if (val == TD_ERR_NOSYM)
129: val = TD_ERR_NOLIB;
130: goto error;
131: }
132: proc->maxlwpsaddr = addr;
133:
134: val = LOOKUP(proc, "pthread__tsd_alloc", &addr);
1.2 thorpej 135: if (val != 0) {
136: if (val == TD_ERR_NOSYM)
137: val = TD_ERR_NOLIB;
138: goto error;
139: }
1.12 nathanw 140: proc->tsdallocaddr = addr;
1.2 thorpej 141:
1.12 nathanw 142: val = LOOKUP(proc, "pthread__tsd_destructors", &addr);
143: if (val != 0) {
144: if (val == TD_ERR_NOSYM)
145: val = TD_ERR_NOLIB;
146: goto error;
147: }
148: proc->tsddestaddr = addr;
149:
1.16 cl 150: val = READ(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 151: if (val != 0)
152: goto error;
153:
154: if (dbg != 0) {
155: /* Another instance of libpthread_dbg is already attached. */
156: val = TD_ERR_INUSE;
157: goto error;
158: }
159:
1.13 nathanw 160: val = LOOKUP(proc, "pthread_stacksize_lg", &addr);
1.12 nathanw 161: if (val == 0)
162: proc->stacksizeaddr = addr;
163: else
164: proc->stacksizeaddr = NULL;
165: proc->stacksizelg = -1;
166: proc->stacksize = 0;
167: proc->stackmask = 0;
168:
1.23 nathanw 169: proc->regbuf = NULL;
170: proc->fpregbuf = NULL;
171:
1.2 thorpej 172: dbg = getpid();
173: /*
174: * If this fails it probably means we're debugging a core file and
175: * can't write to it.
176: * If it's something else we'll lose the next time we hit WRITE,
177: * but not before, and that's OK.
178: */
1.12 nathanw 179: WRITE(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 180:
181: PTQ_INIT(&proc->threads);
182: PTQ_INIT(&proc->syncs);
183:
184: *procp = proc;
185:
186: return 0;
187:
188: error:
189: free(proc);
190: return val;
191: }
192:
193: int
194: td_close(td_proc_t *proc)
195: {
196: int dbg;
197: td_thread_t *t, *next;
198: td_sync_t *s, *nexts;
199:
200: dbg = 0;
201: /*
202: * Error returns from this write are mot really a problem;
203: * the process doesn't exist any more.
204: */
1.12 nathanw 205: WRITE(proc, proc->dbgaddr, &dbg, sizeof(int));
1.2 thorpej 206:
207: /* Deallocate the list of thread structures */
208: for (t = PTQ_FIRST(&proc->threads); t; t = next) {
209: next = PTQ_NEXT(t, list);
210: PTQ_REMOVE(&proc->threads, t, list);
211: free(t);
212: }
213: /* Deallocate the list of sync objects */
214: for (s = PTQ_FIRST(&proc->syncs); s; s = nexts) {
215: nexts = PTQ_NEXT(s, list);
216: PTQ_REMOVE(&proc->syncs, s, list);
217: free(s);
218: }
1.23 nathanw 219:
220: if (proc->regbuf != NULL) {
221: free(proc->regbuf);
222: free(proc->fpregbuf);
223: }
224:
1.2 thorpej 225: free(proc);
226: return 0;
227: }
228:
229:
230: int
231: td_thr_iter(td_proc_t *proc, int (*call)(td_thread_t *, void *), void *callarg)
232: {
233: int val;
1.12 nathanw 234: caddr_t next;
1.2 thorpej 235: struct pthread_queue_t allq;
236: td_thread_t *thread;
237:
1.12 nathanw 238: val = READ(proc, proc->allqaddr, &allq, sizeof(allq));
1.2 thorpej 239: if (val != 0)
240: return val;
241:
1.3 christos 242: next = (void *)allq.ptqh_first;
243: while (next != NULL) {
1.2 thorpej 244: val = td__getthread(proc, next, &thread);
245: if (val != 0)
246: return val;
247: val = (*call)(thread, callarg);
248: if (val != 0)
249: return 0;
250:
251: val = READ(proc,
1.8 nathanw 252: next + offsetof(struct __pthread_st, pt_allq.ptqe_next),
1.2 thorpej 253: &next, sizeof(next));
254: if (val != 0)
255: return val;
256: }
257: return 0;
258: }
259:
260: int
261: td_thr_info(td_thread_t *thread, td_thread_info_t *info)
262: {
1.33 ad 263: int tmp, val;
1.2 thorpej 264: struct pthread_queue_t queue;
265:
266: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
267: if (val != 0)
268: return val;
269:
270: if (tmp != PT_MAGIC)
271: return TD_ERR_BADTHREAD;
272:
273: info->thread_addr = thread->addr;
1.33 ad 274: if ((val = READ(thread->proc,
1.11 cl 275: thread->addr + offsetof(struct __pthread_st, pt_state),
276: &tmp, sizeof(tmp))) != 0)
277: return val;
1.2 thorpej 278: switch (tmp) {
279: case PT_STATE_RUNNING:
280: info->thread_state = TD_STATE_RUNNING;
281: break;
1.34 ! ad 282: #ifdef XXXLWP
1.2 thorpej 283: case PT_STATE_RUNNABLE:
284: info->thread_state = TD_STATE_RUNNABLE;
285: break;
1.11 cl 286: case _PT_STATE_BLOCKED_SYS:
1.2 thorpej 287: info->thread_state = TD_STATE_BLOCKED;
288: break;
289: case PT_STATE_BLOCKED_QUEUE:
290: info->thread_state = TD_STATE_SLEEPING;
291: break;
1.19 nathanw 292: case PT_STATE_SUSPENDED:
293: info->thread_state = TD_STATE_SUSPENDED;
294: break;
1.32 christos 295: #endif
1.2 thorpej 296: case PT_STATE_ZOMBIE:
297: info->thread_state = TD_STATE_ZOMBIE;
298: break;
299: default:
300: info->thread_state = TD_STATE_UNKNOWN;
301: }
302:
1.34 ! ad 303: info->thread_type = TD_TYPE_USER;
1.2 thorpej 304:
305: if ((val = READ(thread->proc,
1.8 nathanw 306: thread->addr + offsetof(struct __pthread_st, pt_stack),
1.2 thorpej 307: &info->thread_stack, sizeof(stack_t))) != 0)
308: return val;
309:
310: if ((val = READ(thread->proc,
1.8 nathanw 311: thread->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 312: &queue, sizeof(queue))) != 0)
1.2 thorpej 313: return val;
314:
315: if (PTQ_EMPTY(&queue))
316: info->thread_hasjoiners = 0;
317: else
318: info->thread_hasjoiners = 1;
319:
320: if ((val = READ(thread->proc,
1.8 nathanw 321: thread->addr + offsetof(struct __pthread_st, pt_errno),
1.2 thorpej 322: &info->thread_errno, sizeof(info->thread_errno))) != 0)
323: return val;
324:
325: if ((val = READ(thread->proc,
1.8 nathanw 326: thread->addr + offsetof(struct __pthread_st, pt_num),
1.6 nathanw 327: &info->thread_id, sizeof(info->thread_id))) != 0)
1.2 thorpej 328: return val;
329:
1.20 nathanw 330: info->thread_id += TN_OFFSET;
331:
1.34 ! ad 332: #ifdef XXXLWP
1.2 thorpej 333: if ((val = READ(thread->proc,
1.8 nathanw 334: thread->addr + offsetof(struct __pthread_st, pt_sigmask),
1.2 thorpej 335: &info->thread_sigmask, sizeof(info->thread_sigmask))) != 0)
336: return val;
337:
338: if ((val = READ(thread->proc,
1.8 nathanw 339: thread->addr + offsetof(struct __pthread_st, pt_siglist),
1.2 thorpej 340: &info->thread_sigpending, sizeof(info->thread_sigpending))) != 0)
341: return val;
1.34 ! ad 342: #endif
1.2 thorpej 343:
344: return 0;
345: }
346:
347: int
1.4 nathanw 348: td_thr_getname(td_thread_t *thread, char *name, int len)
349: {
350: int val, tmp;
351: caddr_t nameaddr;
352:
353:
354: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
355: if (val != 0)
356: return val;
357:
358: if (tmp != PT_MAGIC)
359: return TD_ERR_BADTHREAD;
360:
361: if ((val = READ(thread->proc,
1.8 nathanw 362: thread->addr + offsetof(struct __pthread_st, pt_name),
1.4 nathanw 363: &nameaddr, sizeof(nameaddr))) != 0)
364: return val;
365:
366: if (nameaddr == 0)
367: name[0] = '\0';
368: else if ((val = READ(thread->proc, nameaddr,
1.9 christos 369: name, (size_t)MIN(PTHREAD_MAX_NAMELEN_NP, len))) != 0)
1.4 nathanw 370: return val;
371:
372: return 0;
373: }
374:
375: int
1.2 thorpej 376: td_thr_getregs(td_thread_t *thread, int regset, void *buf)
377: {
1.33 ad 378: int tmp, val;
1.2 thorpej 379:
1.11 cl 380: if ((val = READ(thread->proc,
381: thread->addr + offsetof(struct __pthread_st, pt_state),
382: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 383: return val;
384:
385: switch (tmp) {
386: case PT_STATE_RUNNING:
387: /*
388: * The register state of the thread is live in the
389: * inferior process's register state.
390: */
391: val = GETREGS(thread->proc, regset, thread->lwp, buf);
392: if (val != 0)
393: return val;
394: break;
395: case PT_STATE_ZOMBIE:
396: default:
397: return TD_ERR_BADTHREAD;
398: }
399:
400: return 0;
401: }
402:
403: int
404: td_thr_setregs(td_thread_t *thread, int regset, void *buf)
405: {
1.33 ad 406: int val, tmp;
1.2 thorpej 407:
1.11 cl 408: if ((val = READ(thread->proc,
409: thread->addr + offsetof(struct __pthread_st, pt_state),
410: &tmp, sizeof(tmp))) != 0)
1.2 thorpej 411: return val;
412:
413: switch (tmp) {
414: case PT_STATE_RUNNING:
415: /*
416: * The register state of the thread is live in the
417: * inferior process's register state.
418: */
419: val = SETREGS(thread->proc, regset, thread->lwp, buf);
420: if (val != 0)
421: return val;
422: break;
423: case PT_STATE_ZOMBIE:
424: default:
425: return TD_ERR_BADTHREAD;
426: }
427:
428: return 0;
429: }
430:
431: int
432: td_thr_join_iter(td_thread_t *thread, int (*call)(td_thread_t *, void *),
433: void *arg)
434: {
435: int val;
436: caddr_t next;
437: td_thread_t *thread2;
438: struct pthread_queue_t queue;
439:
440: if ((val = READ(thread->proc,
1.8 nathanw 441: thread->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 442: &queue, sizeof(queue))) != 0)
1.2 thorpej 443: return val;
444:
1.3 christos 445: next = (void *)queue.ptqh_first;
446: while (next != NULL) {
1.2 thorpej 447: val = td__getthread(thread->proc, next, &thread2);
448: if (val != 0)
449: return val;
1.4 nathanw 450: val = (*call)(thread2, arg);
1.2 thorpej 451: if (val != 0)
452: return 0;
453:
454: val = READ(thread->proc,
1.8 nathanw 455: next + offsetof(struct __pthread_st, pt_sleep.ptqe_next),
1.2 thorpej 456: &next, sizeof(next));
457: if (val != 0)
458: return val;
459: }
460:
461: return 0;
462: }
463:
464: int
465: td_sync_info(td_sync_t *s, td_sync_info_t *info)
466: {
467: int val, magic, n;
468: struct pthread_queue_t queue;
469: pthread_spin_t slock;
470: pthread_t taddr;
1.12 nathanw 471: td_proc_t *proc = s->proc;
472:
473: val = READ(proc, s->addr, &magic, sizeof(magic));
1.2 thorpej 474: if (val != 0)
475: return val;
476:
477: info->sync_type = TD_SYNC_UNKNOWN;
478: info->sync_size = 0;
479: info->sync_haswaiters = 0;
480: switch (magic) {
481: case _PT_MUTEX_MAGIC:
482: info->sync_type = TD_SYNC_MUTEX;
1.8 nathanw 483: info->sync_size = sizeof(struct __pthread_mutex_st);
1.2 thorpej 484: if ((val = READ(s->proc,
1.8 nathanw 485: s->addr + offsetof(struct __pthread_mutex_st, ptm_blocked),
1.6 nathanw 486: &queue, sizeof(queue))) != 0)
1.2 thorpej 487: return val;
488:
489: if (!PTQ_EMPTY(&queue))
490: info->sync_haswaiters = 1;
491: /*
492: * The cast to (void *) is to explicitly throw away the
493: * volatile qualifier on pthread_spin_t,
494: * from __cpu_simple_lock_t.
495: */
1.12 nathanw 496: if ((val = READ(proc,
1.8 nathanw 497: s->addr + offsetof(struct __pthread_mutex_st, ptm_lock),
1.6 nathanw 498: (void *)&slock, sizeof(slock))) != 0)
1.2 thorpej 499: return val;
500: if (slock == __SIMPLELOCK_LOCKED) {
501: info->sync_data.mutex.locked = 1;
1.12 nathanw 502: if ((val = READ(proc,
1.8 nathanw 503: s->addr + offsetof(struct __pthread_mutex_st,
1.2 thorpej 504: ptm_owner),
1.6 nathanw 505: &taddr, sizeof(taddr))) != 0)
1.2 thorpej 506: return val;
1.12 nathanw 507: if ((val = td__getstacksize(proc)) != 0)
1.10 cl 508: return val;
1.2 thorpej 509: taddr = pthread__id(taddr);
1.12 nathanw 510: td__getthread(proc, (void *)taddr,
1.2 thorpej 511: &info->sync_data.mutex.owner);
512: } else
513: info->sync_data.mutex.locked = 0;
514: break;
515: case _PT_COND_MAGIC:
516: info->sync_type = TD_SYNC_COND;
1.8 nathanw 517: info->sync_size = sizeof(struct __pthread_cond_st);
1.12 nathanw 518: if ((val = READ(proc,
1.8 nathanw 519: s->addr + offsetof(struct __pthread_cond_st, ptc_waiters),
1.6 nathanw 520: &queue, sizeof(queue))) != 0)
1.2 thorpej 521: return val;
522: if (!PTQ_EMPTY(&queue))
523: info->sync_haswaiters = 1;
524: break;
525: case _PT_SPINLOCK_MAGIC:
526: info->sync_type = TD_SYNC_SPIN;
1.8 nathanw 527: info->sync_size = sizeof(struct __pthread_spinlock_st);
1.12 nathanw 528: if ((val = READ(proc,
1.8 nathanw 529: s->addr + offsetof(struct __pthread_spinlock_st, pts_spin),
1.6 nathanw 530: (void *)&slock, sizeof(slock))) != 0)
1.2 thorpej 531: return val;
532: if (slock == __SIMPLELOCK_LOCKED)
533: info->sync_data.spin.locked = 1;
1.27 nathanw 534: else
535: info->sync_data.spin.locked = 0;
1.2 thorpej 536: break;
537: case PT_MAGIC:
538: info->sync_type = TD_SYNC_JOIN;
1.8 nathanw 539: info->sync_size = sizeof(struct __pthread_st);
1.12 nathanw 540: td__getthread(proc, s->addr,
1.2 thorpej 541: &info->sync_data.join.thread);
1.12 nathanw 542: if ((val = READ(proc,
1.8 nathanw 543: s->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 544: &queue, sizeof(queue))) != 0)
1.2 thorpej 545: return val;
546:
547: if (!PTQ_EMPTY(&queue))
548: info->sync_haswaiters = 1;
549: break;
1.3 christos 550: case (int)_PT_RWLOCK_MAGIC:
1.2 thorpej 551: info->sync_type = TD_SYNC_RWLOCK;
1.8 nathanw 552: info->sync_size = sizeof(struct __pthread_rwlock_st);
1.12 nathanw 553: if ((val = READ(proc,
1.8 nathanw 554: s->addr + offsetof(struct __pthread_rwlock_st, ptr_rblocked),
1.6 nathanw 555: &queue, sizeof(queue))) != 0)
1.2 thorpej 556: return val;
557: if (!PTQ_EMPTY(&queue))
558: info->sync_haswaiters = 1;
559:
1.12 nathanw 560: if ((val = READ(proc,
1.8 nathanw 561: s->addr + offsetof(struct __pthread_rwlock_st, ptr_wblocked),
1.6 nathanw 562: &queue, sizeof(queue))) != 0)
1.2 thorpej 563: return val;
564: if (!PTQ_EMPTY(&queue))
565: info->sync_haswaiters = 1;
566:
567:
568: info->sync_data.rwlock.locked = 0;
1.12 nathanw 569: if ((val = READ(proc,
1.8 nathanw 570: s->addr + offsetof(struct __pthread_rwlock_st, ptr_nreaders),
1.6 nathanw 571: &n, sizeof(n))) != 0)
1.2 thorpej 572: return val;
573: info->sync_data.rwlock.readlocks = n;
574: if (n > 0)
575: info->sync_data.rwlock.locked = 1;
576:
1.12 nathanw 577: if ((val = READ(proc,
1.8 nathanw 578: s->addr + offsetof(struct __pthread_rwlock_st, ptr_writer),
1.6 nathanw 579: &taddr, sizeof(taddr))) != 0)
1.2 thorpej 580: return val;
581: if (taddr != 0) {
582: info->sync_data.rwlock.locked = 1;
1.12 nathanw 583: td__getthread(proc, (void *)taddr,
1.2 thorpej 584: &info->sync_data.rwlock.writeowner);
585: }
1.3 christos 586: /*FALLTHROUGH*/
1.2 thorpej 587: default:
588: return (0);
589: }
590:
591: info->sync_addr = s->addr;
592:
593: return 0;
594: }
595:
596:
597: int
598: td_sync_waiters_iter(td_sync_t *s, int (*call)(td_thread_t *, void *),
599: void *arg)
600: {
601: int val, magic;
602: caddr_t next;
603: struct pthread_queue_t queue;
604: td_thread_t *thread;
605:
606: val = READ(s->proc, s->addr, &magic, sizeof(magic));
607: if (val != 0)
608: return val;
609:
610: switch (magic) {
611: case _PT_MUTEX_MAGIC:
612: if ((val = READ(s->proc,
1.8 nathanw 613: s->addr + offsetof(struct __pthread_mutex_st, ptm_blocked),
1.6 nathanw 614: &queue, sizeof(queue))) != 0)
1.2 thorpej 615: return val;
616: break;
617: case _PT_COND_MAGIC:
618: if ((val = READ(s->proc,
1.8 nathanw 619: s->addr + offsetof(struct __pthread_cond_st, ptc_waiters),
1.6 nathanw 620: &queue, sizeof(queue))) != 0)
1.2 thorpej 621: return val;
622: break;
623: case PT_MAGIC:
624: /* Redundant with join_iter, but what the hell... */
625: if ((val = READ(s->proc,
1.8 nathanw 626: s->addr + offsetof(struct __pthread_st, pt_joiners),
1.6 nathanw 627: &queue, sizeof(queue))) != 0)
1.2 thorpej 628: return val;
629: break;
630: default:
631: return (0);
632: }
633:
1.3 christos 634: next = (void *)queue.ptqh_first;
635: while (next != NULL) {
1.2 thorpej 636: val = td__getthread(s->proc, next, &thread);
637: if (val != 0)
638: return val;
639: val = (*call)(thread, arg);
640: if (val != 0)
641: return 0;
642:
643: val = READ(s->proc,
1.8 nathanw 644: next + offsetof(struct __pthread_st, pt_sleep.ptqe_next),
1.2 thorpej 645: &next, sizeof(next));
646: if (val != 0)
647: return val;
648: }
649: return 0;
650: }
651:
652:
653: int
654: td_map_addr2sync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp)
655: {
656: int magic, val;
657:
658: val = READ(proc, addr, &magic, sizeof(magic));
659: if (val != 0)
660: return val;
661:
662: if ((magic != _PT_MUTEX_MAGIC) &&
663: (magic != _PT_COND_MAGIC) &&
1.26 nathanw 664: (magic != _PT_SPINLOCK_MAGIC) &&
1.28 nathanw 665: (magic != _PT_RWLOCK_MAGIC) &&
666: (magic != PT_MAGIC))
1.2 thorpej 667: return TD_ERR_NOOBJ;
668:
669: val = td__getsync(proc, addr, syncp);
670: if (val != 0)
671: return val;
672:
673: return 0;
674: }
675:
676:
677: int
678: td_map_pth2thr(td_proc_t *proc, pthread_t thread, td_thread_t **threadp)
679: {
680: int magic, val;
681:
1.3 christos 682: val = READ(proc, (void *)thread, &magic, sizeof(magic));
1.2 thorpej 683: if (val != 0)
684: return val;
685:
686: if (magic != PT_MAGIC)
687: return TD_ERR_NOOBJ;
688:
1.3 christos 689: val = td__getthread(proc, (void *)thread, threadp);
1.2 thorpej 690: if (val != 0)
691: return val;
692:
693: return 0;
694: }
695:
696: int
697: td_map_id2thr(td_proc_t *proc, int threadid, td_thread_t **threadp)
698: {
699: int val, num;
1.12 nathanw 700: caddr_t next;
1.2 thorpej 701: struct pthread_queue_t allq;
702: td_thread_t *thread;
703:
704:
1.12 nathanw 705: val = READ(proc, proc->allqaddr, &allq, sizeof(allq));
1.2 thorpej 706: if (val != 0)
707: return val;
708:
1.20 nathanw 709: /* Correct for offset */
710: threadid -= TN_OFFSET;
1.3 christos 711: next = (void *)allq.ptqh_first;
712: while (next != NULL) {
1.2 thorpej 713: val = READ(proc,
1.8 nathanw 714: next + offsetof(struct __pthread_st, pt_num),
1.2 thorpej 715: &num, sizeof(num));
716:
717: if (num == threadid)
718: break;
719:
720: val = READ(proc,
1.8 nathanw 721: next + offsetof(struct __pthread_st, pt_allq.ptqe_next),
1.2 thorpej 722: &next, sizeof(next));
723: if (val != 0)
724: return val;
725: }
726:
727: if (next == 0) {
728: /* A matching thread was not found. */
729: return TD_ERR_NOOBJ;
730: }
731:
732: val = td__getthread(proc, next, &thread);
733: if (val != 0)
734: return val;
735: *threadp = thread;
736:
737: return 0;
738: }
739:
740: /* Return the thread handle of the thread running on the given LWP */
741: int
742: td_map_lwp2thr(td_proc_t *proc, int lwp, td_thread_t **threadp)
743: {
744: int val, magic;
745: struct reg gregs;
746: ucontext_t uc;
1.3 christos 747: void *th;
1.2 thorpej 748:
749: val = GETREGS(proc, 0, lwp, &gregs);
750: if (val != 0)
751: return val;
752:
753: PTHREAD_REG_TO_UCONTEXT(&uc, &gregs);
754:
1.10 cl 755: val = td__getstacksize(proc);
756: if (val != 0)
757: return val;
758:
1.3 christos 759: th = pthread__id(pthread__uc_sp(&uc));
1.2 thorpej 760:
761: val = READ(proc, th, &magic, sizeof(magic));
762: if (val != 0)
763: return val;
764:
765: if (magic != PT_MAGIC)
766: return TD_ERR_NOOBJ;
767:
768: val = td__getthread(proc, th, threadp);
769: if (val != 0)
770: return val;
771:
772: (*threadp)->lwp = lwp;
773:
774: return 0;
775: }
776:
777: int
778: td_map_lwps(td_proc_t *proc)
779: {
780: int i, val, nlwps;
781: td_thread_t *thread;
782:
1.12 nathanw 783: val = READ(proc, proc->maxlwpsaddr, &nlwps, sizeof(nlwps));
1.2 thorpej 784: if (val != 0)
785: return val;
786:
1.18 nathanw 787: if (nlwps < 1)
788: nlwps = 1; /* always at least one LWP */
789:
790: PTQ_FOREACH(thread, &proc->threads, list) {
791: thread->lwp = -1;
792: }
793:
1.2 thorpej 794: for (i = 1; i <= nlwps; i++) {
795: /*
796: * Errors are deliberately ignored for the call to
797: * td_map_lwp2thr(); it is entirely likely that not
798: * all LWPs in the range 1..nlwps exist, and that's
799: * not a problem.
800: */
801: td_map_lwp2thr(proc, i, &thread);
802: }
803: return 0;
804: }
805:
806: int
807: td_tsd_iter(td_proc_t *proc,
808: int (*call)(pthread_key_t, void (*)(void *), void *), void *arg)
809: {
810: int val;
811: int i, allocated;
812: void (*destructor)(void *);
813:
814: for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
1.12 nathanw 815: val = READ(proc, proc->tsdallocaddr + i * sizeof(int),
1.6 nathanw 816: &allocated, sizeof(allocated));
1.2 thorpej 817: if (val != 0)
818: return val;
819:
820: if (allocated) {
1.12 nathanw 821: val = READ(proc, proc->tsddestaddr +
822: i * sizeof(destructor),
1.2 thorpej 823: &destructor, sizeof(destructor));
824: if (val != 0)
825: return val;
826:
827: val = (call)(i, destructor, arg);
828: if (val != 0)
829: return val;
830: }
831: }
832:
833: return 0;
834: }
835:
836: /* Get the synchronization object that the thread is sleeping on */
837: int
838: td_thr_sleepinfo(td_thread_t *thread, td_sync_t **s)
839: {
840: int val;
841: caddr_t addr;
842:
843: if ((val = READ(thread->proc,
1.8 nathanw 844: thread->addr + offsetof(struct __pthread_st, pt_sleepobj),
1.6 nathanw 845: &addr, sizeof(addr))) != 0)
1.2 thorpej 846: return val;
847:
848: td__getsync(thread->proc, addr, s);
849:
850: return 0;
851:
852: }
853:
1.17 nathanw 854: #define DPTQ_REMOVE(head, elm, field) do { \
855: int _val; \
856: PTQ_ENTRY(__pthread_st) _qent; \
857: \
858: _val = READ(thread->proc, \
859: (elm) + offsetof(struct __pthread_st, field), \
860: &_qent, sizeof(_qent)); \
861: if (_val != 0) \
862: return _val; \
863: if (_qent.ptqe_next != NULL) { \
864: _val = WRITE(thread->proc, \
865: (caddr_t)(void *)_qent.ptqe_next + \
866: offsetof(struct __pthread_st, field.ptqe_prev), \
867: &_qent.ptqe_prev, sizeof(_qent.ptqe_prev)); \
868: if (_val != 0) \
869: return _val; \
870: } else { \
871: _val = WRITE(thread->proc, (head) + \
872: offsetof(struct pthread_queue_t, ptqh_last), \
873: &_qent.ptqe_prev, sizeof(_qent.ptqe_prev)); \
874: if (_val != 0) \
875: return _val; \
876: } \
877: _val = WRITE(thread->proc, (caddr_t)(void *)_qent.ptqe_prev, \
878: &_qent.ptqe_next, sizeof(_qent.ptqe_next)); \
879: if (_val != 0) \
880: return _val; \
881: } while (/*CONSTCOND*/0)
882:
883: #define DPTQ_INSERT_TAIL(head, elm, field) do { \
884: int _val; \
885: struct pthread_queue_t _qhead; \
886: PTQ_ENTRY(__pthread_st) _qent; \
887: \
888: /* if ((head)->ptqh_last == NULL) */ \
889: /* (head)->ptqh_last = &(head)->ptqh_first; */ \
890: _val = READ(thread->proc, (head), &_qhead, sizeof(_qhead)); \
891: \
892: if (_val != 0) \
893: return _val; \
894: if (_qhead.ptqh_last == NULL) \
895: _qhead.ptqh_last = (void *)(head); \
896: \
897: /* (elm)->field.ptqe_prev = (head)->ptqh_last; */ \
898: _qent.ptqe_prev = _qhead.ptqh_last; \
899: \
900: /* *(head)->ptqh_last = (elm); */ \
901: _qent.ptqe_next = (void *)elm; \
902: _val = WRITE(thread->proc, (caddr_t)(void *)_qhead.ptqh_last, \
903: &_qent.ptqe_next, sizeof(_qent.ptqe_next)); \
904: if (_val != 0) \
905: return _val; \
906: \
907: /* (elm)->field.ptqe_next = NULL; */ \
908: _qent.ptqe_next = NULL; \
909: \
910: /* (head)->ptqh_last = &(elm)->field.ptqe_next; */ \
911: _qhead.ptqh_last = (void *) ((elm) + \
912: offsetof(struct __pthread_st, field.ptqe_next)); \
913: \
914: _val = WRITE(thread->proc, (elm) + \
915: offsetof(struct __pthread_st, field), \
916: &_qent, sizeof(_qent)); \
917: if (_val != 0) \
918: return _val; \
919: _val = WRITE(thread->proc, \
920: (head) + offsetof(struct pthread_queue_t, ptqh_last), \
921: &_qhead.ptqh_last, sizeof(_qhead.ptqh_last)); \
922: if (_val != 0) \
923: return _val; \
924: } while (/*CONSTCOND*/0)
925:
926:
927: /* Suspend a thread from running */
928: int
929: td_thr_suspend(td_thread_t *thread)
930: {
1.33 ad 931: int tmp, val;
1.17 nathanw 932:
933: /* validate the thread */
934: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
935: if (val != 0)
936: return val;
937: if (tmp != PT_MAGIC)
938: return TD_ERR_BADTHREAD;
939:
1.34 ! ad 940: /* XXXLWP get lid, suspend the sucker */;
1.17 nathanw 941: return 0;
942: }
943:
944: /* Restore a suspended thread to its previous state */
945: int
946: td_thr_resume(td_thread_t *thread)
947: {
1.33 ad 948: int tmp, val;
1.17 nathanw 949:
950: /* validate the thread */
951: val = READ(thread->proc, thread->addr, &tmp, sizeof(tmp));
952: if (val != 0)
953: return val;
954: if (tmp != PT_MAGIC)
955: return TD_ERR_BADTHREAD;
956:
1.34 ! ad 957: /* XXXLWP get lid */
1.17 nathanw 958: val = READ(thread->proc, thread->addr +
959: offsetof(struct __pthread_st, pt_flags),
960: &tmp, sizeof(tmp));
961: if (val != 0)
962: return val;
963:
1.34 ! ad 964: /* XXXLWP suspend the sucker */;
! 965:
1.17 nathanw 966: return 0;
967: }
1.2 thorpej 968:
969:
970: static int
971: td__getthread(td_proc_t *proc, caddr_t addr, td_thread_t **threadp)
972: {
973: td_thread_t *thread;
974:
975: /*
976: * Check if we've allocated a descriptor for this thread.
977: * Sadly, this makes iterating over a set of threads O(N^2)
978: * in the number of threads. More sophisticated data structures
979: * can wait.
980: */
981: PTQ_FOREACH(thread, &proc->threads, list) {
982: if (thread->addr == addr)
983: break;
984: }
985: if (thread == NULL) {
986: thread = malloc(sizeof(*thread));
987: if (thread == NULL)
988: return TD_ERR_NOMEM;
989: thread->proc = proc;
990: thread->addr = addr;
991: thread->lwp = 0;
992: PTQ_INSERT_HEAD(&proc->threads, thread, list);
993: }
994:
995: *threadp = thread;
996: return 0;
997: }
998:
999:
1000: static int
1001: td__getsync(td_proc_t *proc, caddr_t addr, td_sync_t **syncp)
1002: {
1003: td_sync_t *s;
1004:
1005: /* Check if we've allocated a descriptor for this object. */
1006: PTQ_FOREACH(s, &proc->syncs, list) {
1007: if (s->addr == addr)
1008: break;
1009: }
1010: /* Allocate a fresh one */
1011: if (s == NULL) {
1012: s = malloc(sizeof(*s));
1013: if (s == NULL)
1014: return TD_ERR_NOMEM;
1015: s->proc = proc;
1016: s->addr = addr;
1017: PTQ_INSERT_HEAD(&proc->syncs, s, list);
1018: }
1019:
1020: *syncp = s;
1021: return 0;
1022: }
1023:
1024:
1025: int
1026: td_thr_tsd(td_thread_t *thread, pthread_key_t key, void **value)
1027: {
1028: int val;
1029:
1030: val = READ(thread->proc, thread->addr +
1.8 nathanw 1031: offsetof(struct __pthread_st, pt_specific) +
1.6 nathanw 1032: key * sizeof(void *), value, sizeof(*value));
1.2 thorpej 1033:
1034: return val;
1035: }
1036:
1.10 cl 1037:
1038: static int
1039: td__getstacksize(td_proc_t *proc)
1040: {
1041: int lg, val;
1042:
1.12 nathanw 1043: if (proc->stacksizeaddr == NULL)
1044: return 0;
1045:
1046: val = READ(proc, proc->stacksizeaddr, &lg, sizeof(int));
1.10 cl 1047: if (val != 0)
1.12 nathanw 1048: return 0;
1049: if (lg != proc->stacksizelg) {
1050: proc->stacksizelg = lg;
1051: proc->stacksize = (1 << lg);
1052: proc->stackmask = proc->stacksize - 1;
1.10 cl 1053: }
1054: return 0;
1055: }
CVSweb <webmaster@jp.NetBSD.org>