Annotation of src/external/cddl/osnet/dev/cyclic/cyclic.c, Revision 1.2.6.1
1.2.6.1 ! yamt 1: /* $NetBSD: cyclic.c,v 1.2 2010/02/21 01:46:33 darran Exp $ */
1.2 darran 2:
1.1 darran 3: /*
4: * CDDL HEADER START
5: *
6: * The contents of this file are subject to the terms of the
7: * Common Development and Distribution License, Version 1.0 only
8: * (the "License"). You may not use this file except in compliance
9: * with the License.
10: *
11: * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
12: * or http://www.opensolaris.org/os/licensing.
13: * See the License for the specific language governing permissions
14: * and limitations under the License.
15: *
16: * When distributing Covered Code, include this CDDL HEADER in each
17: * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
18: * If applicable, add the following below this CDDL HEADER, with the
19: * fields enclosed by brackets "[]" replaced with your own identifying
20: * information: Portions Copyright [yyyy] [name of copyright owner]
21: *
22: * CDDL HEADER END
23: *
24: * Portions Copyright 2008 John Birrell <jb@freebsd.org>
25: *
1.2.6.1 ! yamt 26: * $FreeBSD$
1.1 darran 27: *
28: * This is a simplified version of the cyclic timer subsystem from
29: * OpenSolaris. In the FreeBSD version, we don't use interrupt levels.
30: */
31:
32: /*
33: * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
34: * Use is subject to license terms.
35: */
36:
37: /*
38: * The Cyclic Subsystem
39: * --------------------
40: *
41: * Prehistory
42: *
43: * Historically, most computer architectures have specified interval-based
44: * timer parts (e.g. SPARCstation's counter/timer; Intel's i8254). While
45: * these parts deal in relative (i.e. not absolute) time values, they are
46: * typically used by the operating system to implement the abstraction of
47: * absolute time. As a result, these parts cannot typically be reprogrammed
48: * without introducing error in the system's notion of time.
49: *
50: * Starting in about 1994, chip architectures began specifying high resolution
51: * timestamp registers. As of this writing (1999), all major chip families
52: * (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution
53: * timestamp registers, and two (UltraSPARC and MIPS) have added the capacity
54: * to interrupt based on timestamp values. These timestamp-compare registers
55: * present a time-based interrupt source which can be reprogrammed arbitrarily
56: * often without introducing error. Given the low cost of implementing such a
57: * timestamp-compare register (and the tangible benefit of eliminating
58: * discrete timer parts), it is reasonable to expect that future chip
59: * architectures will adopt this feature.
60: *
61: * The cyclic subsystem has been designed to take advantage of chip
62: * architectures with the capacity to interrupt based on absolute, high
63: * resolution values of time.
64: *
65: * Subsystem Overview
66: *
67: * The cyclic subsystem is a low-level kernel subsystem designed to provide
68: * arbitrarily high resolution, per-CPU interval timers (to avoid colliding
69: * with existing terms, we dub such an interval timer a "cyclic").
70: * Alternatively, a cyclic may be specified to be "omnipresent", denoting
71: * firing on all online CPUs.
72: *
73: * Cyclic Subsystem Interface Overview
74: * -----------------------------------
75: *
76: * The cyclic subsystem has interfaces with the kernel at-large, with other
77: * kernel subsystems (e.g. the processor management subsystem, the checkpoint
78: * resume subsystem) and with the platform (the cyclic backend). Each
79: * of these interfaces is given a brief synopsis here, and is described
80: * in full above the interface's implementation.
81: *
82: * The following diagram displays the cyclic subsystem's interfaces to
83: * other kernel components. The arrows denote a "calls" relationship, with
84: * the large arrow indicating the cyclic subsystem's consumer interface.
85: * Each arrow is labeled with the section in which the corresponding
86: * interface is described.
87: *
88: * Kernel at-large consumers
89: * -----------++------------
90: * ||
91: * ||
92: * _||_
93: * \ /
94: * \/
95: * +---------------------+
96: * | |
97: * | Cyclic subsystem |<----------- Other kernel subsystems
98: * | |
99: * +---------------------+
100: * ^ |
101: * | |
102: * | |
103: * | v
104: * +---------------------+
105: * | |
106: * | Cyclic backend |
107: * | (platform specific) |
108: * | |
109: * +---------------------+
110: *
111: *
112: * Kernel At-Large Interfaces
113: *
114: * cyclic_add() <-- Creates a cyclic
115: * cyclic_add_omni() <-- Creates an omnipresent cyclic
116: * cyclic_remove() <-- Removes a cyclic
117: *
118: * Backend Interfaces
119: *
120: * cyclic_init() <-- Initializes the cyclic subsystem
121: * cyclic_fire() <-- Interrupt entry point
122: *
123: * The backend-supplied interfaces (through the cyc_backend structure) are
124: * documented in detail in <sys/cyclic_impl.h>
125: *
126: *
127: * Cyclic Subsystem Implementation Overview
128: * ----------------------------------------
129: *
130: * The cyclic subsystem is designed to minimize interference between cyclics
131: * on different CPUs. Thus, all of the cyclic subsystem's data structures
132: * hang off of a per-CPU structure, cyc_cpu.
133: *
134: * Each cyc_cpu has a power-of-two sized array of cyclic structures (the
135: * cyp_cyclics member of the cyc_cpu structure). If cyclic_add() is called
136: * and there does not exist a free slot in the cyp_cyclics array, the size of
137: * the array will be doubled. The array will never shrink. Cyclics are
138: * referred to by their index in the cyp_cyclics array, which is of type
139: * cyc_index_t.
140: *
141: * The cyclics are kept sorted by expiration time in the cyc_cpu's heap. The
142: * heap is keyed by cyclic expiration time, with parents expiring earlier
143: * than their children.
144: *
145: * Heap Management
146: *
147: * The heap is managed primarily by cyclic_fire(). Upon entry, cyclic_fire()
148: * compares the root cyclic's expiration time to the current time. If the
149: * expiration time is in the past, cyclic_expire() is called on the root
150: * cyclic. Upon return from cyclic_expire(), the cyclic's new expiration time
151: * is derived by adding its interval to its old expiration time, and a
152: * downheap operation is performed. After the downheap, cyclic_fire()
153: * examines the (potentially changed) root cyclic, repeating the
154: * cyclic_expire()/add interval/cyclic_downheap() sequence until the root
155: * cyclic has an expiration time in the future. This expiration time
156: * (guaranteed to be the earliest in the heap) is then communicated to the
157: * backend via cyb_reprogram. Optimal backends will next call cyclic_fire()
158: * shortly after the root cyclic's expiration time.
159: *
160: * To allow efficient, deterministic downheap operations, we implement the
161: * heap as an array (the cyp_heap member of the cyc_cpu structure), with each
162: * element containing an index into the CPU's cyp_cyclics array.
163: *
164: * The heap is laid out in the array according to the following:
165: *
166: * 1. The root of the heap is always in the 0th element of the heap array
167: * 2. The left and right children of the nth element are element
168: * (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively.
169: *
170: * This layout is standard (see, e.g., Cormen's "Algorithms"); the proof
171: * that these constraints correctly lay out a heap (or indeed, any binary
172: * tree) is trivial and left to the reader.
173: *
174: * To see the heap by example, assume our cyclics array has the following
175: * members (at time t):
176: *
177: * cy_handler cy_expire
178: * ---------------------------------------------
179: * [ 0] clock() t+10000000
180: * [ 1] deadman() t+1000000000
181: * [ 2] clock_highres_fire() t+100
182: * [ 3] clock_highres_fire() t+1000
183: * [ 4] clock_highres_fire() t+500
184: * [ 5] (free) --
185: * [ 6] (free) --
186: * [ 7] (free) --
187: *
188: * The heap array could be:
189: *
190: * [0] [1] [2] [3] [4] [5] [6] [7]
191: * +-----+-----+-----+-----+-----+-----+-----+-----+
192: * | | | | | | | | |
193: * | 2 | 3 | 4 | 0 | 1 | x | x | x |
194: * | | | | | | | | |
195: * +-----+-----+-----+-----+-----+-----+-----+-----+
196: *
197: * Graphically, this array corresponds to the following (excuse the ASCII art):
198: *
199: * 2
200: * |
201: * +------------------+------------------+
202: * 3 4
203: * |
204: * +---------+--------+
205: * 0 1
206: *
207: * Note that the heap is laid out by layer: all nodes at a given depth are
208: * stored in consecutive elements of the array. Moreover, layers of
209: * consecutive depths are in adjacent element ranges. This property
210: * guarantees high locality of reference during downheap operations.
211: * Specifically, we are guaranteed that we can downheap to a depth of
212: *
213: * lg (cache_line_size / sizeof (cyc_index_t))
214: *
215: * nodes with at most one cache miss. On UltraSPARC (64 byte e-cache line
216: * size), this corresponds to a depth of four nodes. Thus, if there are
217: * fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at
218: * most once in the e-cache.
219: *
220: * Downheaps are required to compare siblings as they proceed down the
221: * heap. For downheaps proceeding beyond the one-cache-miss depth, every
222: * access to a left child could potentially miss in the cache. However,
223: * if we assume
224: *
225: * (cache_line_size / sizeof (cyc_index_t)) > 2,
226: *
227: * then all siblings are guaranteed to be on the same cache line. Thus, the
228: * miss on the left child will guarantee a hit on the right child; downheaps
229: * will incur at most one cache miss per layer beyond the one-cache-miss
230: * depth. The total number of cache misses for heap management during a
231: * downheap operation is thus bounded by
232: *
233: * lg (n) - lg (cache_line_size / sizeof (cyc_index_t))
234: *
235: * Traditional pointer-based heaps are implemented without regard to
236: * locality. Downheaps can thus incur two cache misses per layer (one for
237: * each child), but at most one cache miss at the root. This yields a bound
238: * of
239: *
240: * 2 * lg (n) - 1
241: *
242: * on the total cache misses.
243: *
244: * This difference may seem theoretically trivial (the difference is, after
245: * all, constant), but can become substantial in practice -- especially for
246: * caches with very large cache lines and high miss penalties (e.g. TLBs).
247: *
248: * Heaps must always be full, balanced trees. Heap management must therefore
249: * track the next point-of-insertion into the heap. In pointer-based heaps,
250: * recomputing this point takes O(lg (n)). Given the layout of the
251: * array-based implementation, however, the next point-of-insertion is
252: * always:
253: *
254: * heap[number_of_elements]
255: *
256: * We exploit this property by implementing the free-list in the usused
257: * heap elements. Heap insertion, therefore, consists only of filling in
258: * the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing
259: * the number of elements, and performing an upheap. Heap deletion consists
260: * of decrementing the number of elements, swapping the to-be-deleted element
261: * with the element at cyp_heap[number_of_elements], and downheaping.
262: *
263: * Filling in more details in our earlier example:
264: *
265: * +--- free list head
266: * |
267: * V
268: *
269: * [0] [1] [2] [3] [4] [5] [6] [7]
270: * +-----+-----+-----+-----+-----+-----+-----+-----+
271: * | | | | | | | | |
272: * | 2 | 3 | 4 | 0 | 1 | 5 | 6 | 7 |
273: * | | | | | | | | |
274: * +-----+-----+-----+-----+-----+-----+-----+-----+
275: *
276: * To insert into this heap, we would just need to fill in the cyclic at
277: * cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform
278: * an upheap.
279: *
280: * If we wanted to remove, say, cyp_cyclics[3], we would first scan for it
281: * in the cyp_heap, and discover it at cyp_heap[1]. We would then decrement
282: * the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4],
283: * and perform a downheap from cyp_heap[1]. The linear scan is required
284: * because the cyclic does not keep a backpointer into the heap. This makes
285: * heap manipulation (e.g. downheaps) faster at the expense of removal
286: * operations.
287: *
288: * Expiry processing
289: *
290: * As alluded to above, cyclic_expire() is called by cyclic_fire() to expire
291: * a cyclic. Cyclic subsystem consumers are guaranteed that for an arbitrary
292: * time t in the future, their cyclic handler will have been called
293: * (t - cyt_when) / cyt_interval times. cyclic_expire() simply needs to call
294: * the handler.
295: *
296: * Resizing
297: *
298: * All of the discussion thus far has assumed a static number of cyclics.
299: * Obviously, static limitations are not practical; we need the capacity
300: * to resize our data structures dynamically.
301: *
302: * We resize our data structures lazily, and only on a per-CPU basis.
303: * The size of the data structures always doubles and never shrinks. We
304: * serialize adds (and thus resizes) on cpu_lock; we never need to deal
305: * with concurrent resizes. Resizes should be rare; they may induce jitter
306: * on the CPU being resized, but should not affect cyclic operation on other
307: * CPUs.
308: *
309: * Three key cyc_cpu data structures need to be resized: the cyclics array,
310: * nad the heap array. Resizing is relatively straightforward:
311: *
312: * 1. The new, larger arrays are allocated in cyclic_expand() (called
313: * from cyclic_add()).
314: * 2. The contents of the old arrays are copied into the new arrays.
315: * 3. The old cyclics array is bzero()'d
316: * 4. The pointers are updated.
317: *
318: * Removals
319: *
320: * Cyclic removals should be rare. To simplify the implementation (and to
321: * allow optimization for the cyclic_fire()/cyclic_expire()
322: * path), we force removals and adds to serialize on cpu_lock.
323: *
324: */
325: #include <sys/cdefs.h>
326: #include <sys/param.h>
327: #include <sys/conf.h>
328: #include <sys/kernel.h>
1.2.6.1 ! yamt 329: #ifdef __FreeBSD___
1.1 darran 330: #include <sys/lock.h>
331: #include <sys/sx.h>
1.2.6.1 ! yamt 332: #endif
1.1 darran 333: #include <sys/cyclic_impl.h>
334: #include <sys/module.h>
335: #include <sys/systm.h>
336: #include <sys/atomic.h>
337: #include <sys/kmem.h>
338: #include <sys/cmn_err.h>
339: #include <sys/dtrace_bsd.h>
1.2.6.1 ! yamt 340: #ifdef __FreeBSD__
1.1 darran 341: #include <machine/cpu.h>
1.2.6.1 ! yamt 342: #endif
! 343:
! 344: #ifdef __NetBSD__
! 345: #include <sys/cpu.h>
! 346: #include <sys/malloc.h>
! 347: #include <sys/xcall.h>
! 348:
! 349: #undef mutex_init
! 350: #define mtx_init(m, d, p, f) mutex_init(m, MUTEX_DEFAULT, IPL_CLOCK)
! 351: #define mtx_lock_spin(x) mutex_spin_enter(x)
! 352: #define mtx_unlock_spin(x) mutex_spin_exit(x)
! 353: #define mtx_destroy(x) mutex_destroy(x)
! 354:
! 355: #define ASSERT(x) KASSERT(x)
! 356: #define SYSINIT(a1, a2, a3, a4, a5)
! 357: #define SYSUNINIT(a1, a2, a3, a4, a5)
! 358: #define CPU_FOREACH(var) \
! 359: CPU_INFO_ITERATOR cii; \
! 360: struct cpu_info *ci; \
! 361: for (CPU_INFO_FOREACH(cii, ci))
! 362: #define MAXCPU MAXCPUS
! 363: #define TRAPF_USERMODE(x) CLKF_USERMODE(x)
! 364: #define TRAPF_PC(x) CLKF_PC(x)
! 365: #endif
1.1 darran 366:
367: static kmem_cache_t *cyclic_id_cache;
368: static cyc_id_t *cyclic_id_head;
369: static cyc_backend_t cyclic_backend;
370:
371: MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem");
372:
373: /*
374: * Returns 1 if the upheap propagated to the root, 0 if it did not. This
375: * allows the caller to reprogram the backend only when the root has been
376: * modified.
377: */
378: static int
379: cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx)
380: {
381: cyclic_t *cyclics;
382: cyc_index_t *heap;
383: cyc_index_t heap_parent, heap_current = ndx;
384: cyc_index_t parent, current;
385:
386: if (heap_current == 0)
387: return (1);
388:
389: heap = cpu->cyp_heap;
390: cyclics = cpu->cyp_cyclics;
391: heap_parent = CYC_HEAP_PARENT(heap_current);
392:
393: for (;;) {
394: current = heap[heap_current];
395: parent = heap[heap_parent];
396:
397: /*
398: * We have an expiration time later than our parent; we're
399: * done.
400: */
401: if (cyclics[current].cy_expire >= cyclics[parent].cy_expire)
402: return (0);
403:
404: /*
405: * We need to swap with our parent, and continue up the heap.
406: */
407: heap[heap_parent] = current;
408: heap[heap_current] = parent;
409:
410: /*
411: * If we just reached the root, we're done.
412: */
413: if (heap_parent == 0)
414: return (1);
415:
416: heap_current = heap_parent;
417: heap_parent = CYC_HEAP_PARENT(heap_current);
418: }
419: }
420:
421: static void
422: cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx)
423: {
424: cyclic_t *cyclics = cpu->cyp_cyclics;
425: cyc_index_t *heap = cpu->cyp_heap;
426:
427: cyc_index_t heap_left, heap_right, heap_me = ndx;
428: cyc_index_t left, right, me;
429: cyc_index_t nelems = cpu->cyp_nelems;
430:
431: for (;;) {
432: /*
433: * If we don't have a left child (i.e., we're a leaf), we're
434: * done.
435: */
436: if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems)
437: return;
438:
439: left = heap[heap_left];
440: me = heap[heap_me];
441:
442: heap_right = CYC_HEAP_RIGHT(heap_me);
443:
444: /*
445: * Even if we don't have a right child, we still need to compare
446: * our expiration time against that of our left child.
447: */
448: if (heap_right >= nelems)
449: goto comp_left;
450:
451: right = heap[heap_right];
452:
453: /*
454: * We have both a left and a right child. We need to compare
455: * the expiration times of the children to determine which
456: * expires earlier.
457: */
458: if (cyclics[right].cy_expire < cyclics[left].cy_expire) {
459: /*
460: * Our right child is the earlier of our children.
461: * We'll now compare our expiration time to its; if
462: * ours is the earlier, we're done.
463: */
464: if (cyclics[me].cy_expire <= cyclics[right].cy_expire)
465: return;
466:
467: /*
468: * Our right child expires earlier than we do; swap
469: * with our right child, and descend right.
470: */
471: heap[heap_right] = me;
472: heap[heap_me] = right;
473: heap_me = heap_right;
474: continue;
475: }
476:
477: comp_left:
478: /*
479: * Our left child is the earlier of our children (or we have
480: * no right child). We'll now compare our expiration time
481: * to its; if ours is the earlier, we're done.
482: */
483: if (cyclics[me].cy_expire <= cyclics[left].cy_expire)
484: return;
485:
486: /*
487: * Our left child expires earlier than we do; swap with our
488: * left child, and descend left.
489: */
490: heap[heap_left] = me;
491: heap[heap_me] = left;
492: heap_me = heap_left;
493: }
494: }
495:
496: static void
497: cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic)
498: {
499: cyc_func_t handler = cyclic->cy_handler;
500: void *arg = cyclic->cy_arg;
501:
502: (*handler)(arg);
503: }
504:
505: /*
506: * cyclic_fire(cpu_t *)
507: *
508: * Overview
509: *
510: * cyclic_fire() is the cyclic subsystem's interrupt handler.
511: * Called by the cyclic backend.
512: *
513: * Arguments and notes
514: *
515: * The only argument is the CPU on which the interrupt is executing;
516: * backends must call into cyclic_fire() on the specified CPU.
517: *
518: * cyclic_fire() may be called spuriously without ill effect. Optimal
519: * backends will call into cyclic_fire() at or shortly after the time
520: * requested via cyb_reprogram(). However, calling cyclic_fire()
521: * arbitrarily late will only manifest latency bubbles; the correctness
522: * of the cyclic subsystem does not rely on the timeliness of the backend.
523: *
524: * cyclic_fire() is wait-free; it will not block or spin.
525: *
526: * Return values
527: *
528: * None.
529: *
530: */
531: static void
532: cyclic_fire(cpu_t *c)
533: {
534: cyc_cpu_t *cpu = c->cpu_cyclic;
1.2.6.1 ! yamt 535: cyc_backend_t *be = cpu->cyp_backend;
1.1 darran 536: cyc_index_t *heap = cpu->cyp_heap;
537: cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
1.2.6.1 ! yamt 538: void *arg = be->cyb_arg;
1.1 darran 539: hrtime_t now = gethrtime();
540: hrtime_t exp;
541:
542: if (cpu->cyp_nelems == 0) {
543: /* This is a spurious fire. */
544: return;
545: }
546:
547: for (;;) {
548: cyc_index_t ndx = heap[0];
549:
550: cyclic = &cyclics[ndx];
551:
552: ASSERT(!(cyclic->cy_flags & CYF_FREE));
553:
554: if ((exp = cyclic->cy_expire) > now)
555: break;
556:
557: cyclic_expire(cpu, ndx, cyclic);
558:
559: /*
560: * If this cyclic will be set to next expire in the distant
561: * past, we have one of two situations:
562: *
563: * a) This is the first firing of a cyclic which had
564: * cy_expire set to 0.
565: *
566: * b) We are tragically late for a cyclic -- most likely
567: * due to being in the debugger.
568: *
569: * In either case, we set the new expiration time to be the
570: * the next interval boundary. This assures that the
571: * expiration time modulo the interval is invariant.
572: *
573: * We arbitrarily define "distant" to be one second (one second
574: * is chosen because it's shorter than any foray to the
575: * debugger while still being longer than any legitimate
576: * stretch).
577: */
578: exp += cyclic->cy_interval;
579:
580: if (now - exp > NANOSEC) {
581: hrtime_t interval = cyclic->cy_interval;
582:
583: exp += ((now - exp) / interval + 1) * interval;
584: }
585:
586: cyclic->cy_expire = exp;
587: cyclic_downheap(cpu, 0);
588: }
589:
590: /*
591: * Now we have a cyclic in the root slot which isn't in the past;
592: * reprogram the interrupt source.
593: */
1.2.6.1 ! yamt 594: be->cyb_reprogram(arg, exp);
! 595: }
1.1 darran 596:
1.2.6.1 ! yamt 597: static void
! 598: cyclic_expand_xcall(cyc_xcallarg_t *arg)
! 599: {
! 600: cyc_cpu_t *cpu = arg->cyx_cpu;
! 601: cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i;
! 602: cyc_index_t *new_heap = arg->cyx_heap;
! 603: cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics;
! 604:
! 605: /* Disable preemption and interrupts. */
! 606: mtx_lock_spin(&cpu->cyp_mtx);
! 607:
! 608: /*
! 609: * Assert that the new size is a power of 2.
! 610: */
! 611: ASSERT((new_size & (new_size - 1)) == 0);
! 612: ASSERT(new_size == (size << 1));
! 613: ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL);
! 614:
! 615: bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size);
! 616: bcopy(cyclics, new_cyclics, sizeof (cyclic_t) * size);
! 617:
! 618: /*
! 619: * Set up the free list, and set all of the new cyclics to be CYF_FREE.
! 620: */
! 621: for (i = size; i < new_size; i++) {
! 622: new_heap[i] = i;
! 623: new_cyclics[i].cy_flags = CYF_FREE;
! 624: }
! 625:
! 626: /*
! 627: * We can go ahead and plow the value of cyp_heap and cyp_cyclics;
! 628: * cyclic_expand() has kept a copy.
! 629: */
! 630: cpu->cyp_heap = new_heap;
! 631: cpu->cyp_cyclics = new_cyclics;
! 632: cpu->cyp_size = new_size;
1.1 darran 633: mtx_unlock_spin(&cpu->cyp_mtx);
634: }
635:
636: /*
637: * cyclic_expand() will cross call onto the CPU to perform the actual
638: * expand operation.
639: */
640: static void
641: cyclic_expand(cyc_cpu_t *cpu)
642: {
1.2.6.1 ! yamt 643: cyc_index_t new_size, old_size;
1.1 darran 644: cyc_index_t *new_heap, *old_heap;
645: cyclic_t *new_cyclics, *old_cyclics;
1.2.6.1 ! yamt 646: cyc_xcallarg_t arg;
! 647: cyc_backend_t *be = cpu->cyp_backend;
1.1 darran 648:
649: ASSERT(MUTEX_HELD(&cpu_lock));
650:
1.2.6.1 ! yamt 651: old_heap = cpu->cyp_heap;
! 652: old_cyclics = cpu->cyp_cyclics;
! 653:
! 654: if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) {
1.1 darran 655: new_size = CY_DEFAULT_PERCPU;
1.2.6.1 ! yamt 656: ASSERT(old_heap == NULL && old_cyclics == NULL);
! 657: }
1.1 darran 658:
659: /*
660: * Check that the new_size is a power of 2.
661: */
662: ASSERT(((new_size - 1) & new_size) == 0);
663:
664: new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK);
665: new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK);
666:
1.2.6.1 ! yamt 667: arg.cyx_cpu = cpu;
! 668: arg.cyx_heap = new_heap;
! 669: arg.cyx_cyclics = new_cyclics;
! 670: arg.cyx_size = new_size;
1.1 darran 671:
1.2.6.1 ! yamt 672: be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
! 673: (cyc_func_t)cyclic_expand_xcall, &arg);
1.1 darran 674:
675: if (old_cyclics != NULL) {
676: ASSERT(old_heap != NULL);
677: ASSERT(old_size != 0);
678: free(old_cyclics, M_CYCLIC);
679: free(old_heap, M_CYCLIC);
680: }
681: }
682:
1.2.6.1 ! yamt 683: static void
! 684: cyclic_add_xcall(cyc_xcallarg_t *arg)
1.1 darran 685: {
1.2.6.1 ! yamt 686: cyc_cpu_t *cpu = arg->cyx_cpu;
! 687: cyc_handler_t *hdlr = arg->cyx_hdlr;
! 688: cyc_time_t *when = arg->cyx_when;
! 689: cyc_backend_t *be = cpu->cyp_backend;
1.1 darran 690: cyc_index_t ndx, nelems;
1.2.6.1 ! yamt 691: cyb_arg_t bar = be->cyb_arg;
1.1 darran 692: cyclic_t *cyclic;
693:
694: ASSERT(cpu->cyp_nelems < cpu->cyp_size);
695:
1.2.6.1 ! yamt 696: /* Disable preemption and interrupts. */
! 697: mtx_lock_spin(&cpu->cyp_mtx);
1.1 darran 698: nelems = cpu->cyp_nelems++;
699:
1.2.6.1 ! yamt 700: if (nelems == 0) {
1.1 darran 701: /*
702: * If this is the first element, we need to enable the
703: * backend on this CPU.
704: */
1.2.6.1 ! yamt 705: be->cyb_enable(bar);
! 706: }
1.1 darran 707:
708: ndx = cpu->cyp_heap[nelems];
709: cyclic = &cpu->cyp_cyclics[ndx];
710:
711: ASSERT(cyclic->cy_flags == CYF_FREE);
712: cyclic->cy_interval = when->cyt_interval;
713:
1.2.6.1 ! yamt 714: if (when->cyt_when == 0) {
! 715: /*
! 716: * If a start time hasn't been explicitly specified, we'll
! 717: * start on the next interval boundary.
! 718: */
! 719: cyclic->cy_expire = (gethrtime() / cyclic->cy_interval + 1) *
! 720: cyclic->cy_interval;
! 721: } else {
1.1 darran 722: cyclic->cy_expire = when->cyt_when;
1.2.6.1 ! yamt 723: }
1.1 darran 724:
725: cyclic->cy_handler = hdlr->cyh_func;
726: cyclic->cy_arg = hdlr->cyh_arg;
1.2.6.1 ! yamt 727: cyclic->cy_flags = arg->cyx_flags;
1.1 darran 728:
729: if (cyclic_upheap(cpu, nelems)) {
730: hrtime_t exp = cyclic->cy_expire;
731:
732: /*
733: * If our upheap propagated to the root, we need to
734: * reprogram the interrupt source.
735: */
1.2.6.1 ! yamt 736: be->cyb_reprogram(bar, exp);
1.1 darran 737: }
738: mtx_unlock_spin(&cpu->cyp_mtx);
739:
1.2.6.1 ! yamt 740: arg->cyx_ndx = ndx;
1.1 darran 741: }
742:
1.2.6.1 ! yamt 743: static cyc_index_t
! 744: cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr,
! 745: cyc_time_t *when, uint16_t flags)
1.1 darran 746: {
1.2.6.1 ! yamt 747: cyc_backend_t *be = cpu->cyp_backend;
! 748: cyb_arg_t bar = be->cyb_arg;
! 749: cyc_xcallarg_t arg;
1.1 darran 750:
751: ASSERT(MUTEX_HELD(&cpu_lock));
1.2.6.1 ! yamt 752: ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE));
! 753: ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
1.1 darran 754:
1.2.6.1 ! yamt 755: if (cpu->cyp_nelems == cpu->cyp_size) {
! 756: /*
! 757: * This is expensive; it will cross call onto the other
! 758: * CPU to perform the expansion.
! 759: */
! 760: cyclic_expand(cpu);
! 761: ASSERT(cpu->cyp_nelems < cpu->cyp_size);
! 762: }
1.1 darran 763:
1.2.6.1 ! yamt 764: /*
! 765: * By now, we know that we're going to be able to successfully
! 766: * perform the add. Now cross call over to the CPU of interest to
! 767: * actually add our cyclic.
! 768: */
! 769: arg.cyx_cpu = cpu;
! 770: arg.cyx_hdlr = hdlr;
! 771: arg.cyx_when = when;
! 772: arg.cyx_flags = flags;
1.1 darran 773:
1.2.6.1 ! yamt 774: be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg);
! 775:
! 776: return (arg.cyx_ndx);
! 777: }
1.1 darran 778:
1.2.6.1 ! yamt 779: static void
! 780: cyclic_remove_xcall(cyc_xcallarg_t *arg)
! 781: {
! 782: cyc_cpu_t *cpu = arg->cyx_cpu;
! 783: cyc_backend_t *be = cpu->cyp_backend;
! 784: cyb_arg_t bar = be->cyb_arg;
! 785: cyc_index_t ndx = arg->cyx_ndx, nelems = cpu->cyp_nelems, i;
! 786: cyc_index_t *heap = cpu->cyp_heap, last;
! 787: cyclic_t *cyclic;
! 788:
! 789: ASSERT(nelems > 0);
! 790:
! 791: /* Disable preemption and interrupts. */
! 792: mtx_lock_spin(&cpu->cyp_mtx);
1.1 darran 793: cyclic = &cpu->cyp_cyclics[ndx];
794:
795: /*
796: * Grab the current expiration time. If this cyclic is being
797: * removed as part of a juggling operation, the expiration time
798: * will be used when the cyclic is added to the new CPU.
799: */
1.2.6.1 ! yamt 800: if (arg->cyx_when != NULL) {
! 801: arg->cyx_when->cyt_when = cyclic->cy_expire;
! 802: arg->cyx_when->cyt_interval = cyclic->cy_interval;
1.1 darran 803: }
804:
1.2.6.1 ! yamt 805: /*
! 806: * Now set the flags to CYF_FREE. We don't need a membar_enter()
! 807: * between zeroing pend and setting the flags because we're at
! 808: * CY_HIGH_LEVEL (that is, the zeroing of pend and the setting
! 809: * of cy_flags appear atomic to softints).
! 810: */
1.1 darran 811: cyclic->cy_flags = CYF_FREE;
812:
813: for (i = 0; i < nelems; i++) {
814: if (heap[i] == ndx)
815: break;
816: }
817:
818: if (i == nelems)
819: panic("attempt to remove non-existent cyclic");
820:
821: cpu->cyp_nelems = --nelems;
822:
1.2.6.1 ! yamt 823: if (nelems == 0) {
1.1 darran 824: /*
825: * If we just removed the last element, then we need to
826: * disable the backend on this CPU.
827: */
1.2.6.1 ! yamt 828: be->cyb_disable(bar);
! 829: }
1.1 darran 830:
1.2.6.1 ! yamt 831: if (i == nelems) {
1.1 darran 832: /*
833: * If we just removed the last element of the heap, then
834: * we don't have to downheap.
835: */
1.2.6.1 ! yamt 836: goto out;
! 837: }
1.1 darran 838:
839: /*
840: * Swap the last element of the heap with the one we want to
841: * remove, and downheap (this has the implicit effect of putting
842: * the newly freed element on the free list).
843: */
844: heap[i] = (last = heap[nelems]);
845: heap[nelems] = ndx;
846:
1.2.6.1 ! yamt 847: if (i == 0) {
1.1 darran 848: cyclic_downheap(cpu, 0);
1.2.6.1 ! yamt 849: } else {
1.1 darran 850: if (cyclic_upheap(cpu, i) == 0) {
851: /*
852: * The upheap didn't propagate to the root; if it
853: * didn't propagate at all, we need to downheap.
854: */
1.2.6.1 ! yamt 855: if (heap[i] == last) {
1.1 darran 856: cyclic_downheap(cpu, i);
1.2.6.1 ! yamt 857: }
! 858: goto out;
1.1 darran 859: }
860: }
861:
862: /*
863: * We're here because we changed the root; we need to reprogram
864: * the clock source.
865: */
866: cyclic = &cpu->cyp_cyclics[heap[0]];
867:
868: ASSERT(nelems != 0);
1.2.6.1 ! yamt 869: be->cyb_reprogram(bar, cyclic->cy_expire);
! 870: out:
1.1 darran 871: mtx_unlock_spin(&cpu->cyp_mtx);
1.2.6.1 ! yamt 872: }
! 873:
! 874: static int
! 875: cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait)
! 876: {
! 877: cyc_backend_t *be = cpu->cyp_backend;
! 878: cyc_xcallarg_t arg;
! 879:
! 880: ASSERT(MUTEX_HELD(&cpu_lock));
! 881: ASSERT(wait == CY_WAIT || wait == CY_NOWAIT);
! 882:
! 883: arg.cyx_ndx = ndx;
! 884: arg.cyx_cpu = cpu;
! 885: arg.cyx_when = when;
! 886: arg.cyx_wait = wait;
! 887:
! 888: be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
! 889: (cyc_func_t)cyclic_remove_xcall, &arg);
1.1 darran 890:
891: return (1);
892: }
893:
894: static void
895: cyclic_configure(cpu_t *c)
896: {
897: cyc_cpu_t *cpu = malloc(sizeof(cyc_cpu_t), M_CYCLIC, M_ZERO | M_WAITOK);
898: cyc_backend_t *nbe = malloc(sizeof(cyc_backend_t), M_CYCLIC, M_ZERO | M_WAITOK);
899:
900: ASSERT(MUTEX_HELD(&cpu_lock));
901:
902: if (cyclic_id_cache == NULL)
1.2.6.1 ! yamt 903: cyclic_id_cache = kmem_cache_create(__UNCONST("cyclic_id_cache"),
1.1 darran 904: sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
905:
906: cpu->cyp_cpu = c;
907:
908: cpu->cyp_size = 1;
909: cpu->cyp_heap = malloc(sizeof(cyc_index_t), M_CYCLIC, M_ZERO | M_WAITOK);
910: cpu->cyp_cyclics = malloc(sizeof(cyclic_t), M_CYCLIC, M_ZERO | M_WAITOK);
911: cpu->cyp_cyclics->cy_flags = CYF_FREE;
912:
913: mtx_init(&cpu->cyp_mtx, "cyclic cpu", NULL, MTX_SPIN);
914:
915: /*
916: * Setup the backend for this CPU.
917: */
918: bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t));
919: if (nbe->cyb_configure != NULL)
920: nbe->cyb_arg = nbe->cyb_configure(c);
921: cpu->cyp_backend = nbe;
922:
923: /*
924: * On platforms where stray interrupts may be taken during startup,
925: * the CPU's cpu_cyclic pointer serves as an indicator that the
926: * cyclic subsystem for this CPU is prepared to field interrupts.
927: */
928: membar_producer();
929:
930: c->cpu_cyclic = cpu;
931: }
932:
933: static void
934: cyclic_unconfigure(cpu_t *c)
935: {
936: cyc_cpu_t *cpu = c->cpu_cyclic;
937: cyc_backend_t *be = cpu->cyp_backend;
938: cyb_arg_t bar = be->cyb_arg;
939:
940: ASSERT(MUTEX_HELD(&cpu_lock));
941:
942: c->cpu_cyclic = NULL;
943:
944: /*
945: * Let the backend know that the CPU is being yanked, and free up
946: * the backend structure.
947: */
948: if (be->cyb_unconfigure != NULL)
949: be->cyb_unconfigure(bar);
950: free(be, M_CYCLIC);
951: cpu->cyp_backend = NULL;
952:
953: mtx_destroy(&cpu->cyp_mtx);
954:
955: /* Finally, clean up our remaining dynamic structures. */
956: free(cpu->cyp_cyclics, M_CYCLIC);
957: free(cpu->cyp_heap, M_CYCLIC);
958: free(cpu, M_CYCLIC);
959: }
960:
961: static void
962: cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu)
963: {
964: cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
965: cyc_omni_cpu_t *ocpu = malloc(sizeof(cyc_omni_cpu_t), M_CYCLIC , M_WAITOK);
966: cyc_handler_t hdlr;
967: cyc_time_t when;
968:
969: ASSERT(MUTEX_HELD(&cpu_lock));
970: ASSERT(idp->cyi_cpu == NULL);
971:
972: hdlr.cyh_func = NULL;
973: hdlr.cyh_arg = NULL;
974:
975: when.cyt_when = 0;
976: when.cyt_interval = 0;
977:
978: omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when);
979:
980: ASSERT(hdlr.cyh_func != NULL);
981: ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0);
982:
983: ocpu->cyo_cpu = cpu;
984: ocpu->cyo_arg = hdlr.cyh_arg;
985: ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0);
986: ocpu->cyo_next = idp->cyi_omni_list;
987: idp->cyi_omni_list = ocpu;
988: }
989:
990: static void
991: cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu)
992: {
993: cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
994: cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL;
995:
996: ASSERT(MUTEX_HELD(&cpu_lock));
997: ASSERT(idp->cyi_cpu == NULL);
998: ASSERT(ocpu != NULL);
999:
1000: while (ocpu != NULL && ocpu->cyo_cpu != cpu) {
1001: prev = ocpu;
1002: ocpu = ocpu->cyo_next;
1003: }
1004:
1005: /*
1006: * We _must_ have found an cyc_omni_cpu which corresponds to this
1007: * CPU -- the definition of an omnipresent cyclic is that it runs
1008: * on all online CPUs.
1009: */
1010: ASSERT(ocpu != NULL);
1011:
1012: if (prev == NULL) {
1013: idp->cyi_omni_list = ocpu->cyo_next;
1014: } else {
1015: prev->cyo_next = ocpu->cyo_next;
1016: }
1017:
1018: (void) cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL, CY_WAIT);
1019:
1020: /*
1021: * The cyclic has been removed from this CPU; time to call the
1022: * omnipresent offline handler.
1023: */
1024: if (omni->cyo_offline != NULL)
1025: omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg);
1026:
1027: free(ocpu, M_CYCLIC);
1028: }
1029:
1030: static cyc_id_t *
1031: cyclic_new_id(void)
1032: {
1033: cyc_id_t *idp;
1034:
1035: ASSERT(MUTEX_HELD(&cpu_lock));
1036:
1037: idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP);
1038:
1039: /*
1040: * The cyi_cpu field of the cyc_id_t structure tracks the CPU
1041: * associated with the cyclic. If and only if this field is NULL, the
1042: * cyc_id_t is an omnipresent cyclic. Note that cyi_omni_list may be
1043: * NULL for an omnipresent cyclic while the cyclic is being created
1044: * or destroyed.
1045: */
1046: idp->cyi_cpu = NULL;
1047: idp->cyi_ndx = 0;
1048:
1049: idp->cyi_next = cyclic_id_head;
1050: idp->cyi_prev = NULL;
1051: idp->cyi_omni_list = NULL;
1052:
1053: if (cyclic_id_head != NULL) {
1054: ASSERT(cyclic_id_head->cyi_prev == NULL);
1055: cyclic_id_head->cyi_prev = idp;
1056: }
1057:
1058: cyclic_id_head = idp;
1059:
1060: return (idp);
1061: }
1062:
1063: /*
1064: * cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *)
1065: *
1066: * Overview
1067: *
1068: * cyclic_add() will create an unbound cyclic with the specified handler and
1069: * interval. The cyclic will run on a CPU which both has interrupts enabled
1070: * and is in the system CPU partition.
1071: *
1072: * Arguments and notes
1073: *
1074: * As its first argument, cyclic_add() takes a cyc_handler, which has the
1075: * following members:
1076: *
1077: * cyc_func_t cyh_func <-- Cyclic handler
1078: * void *cyh_arg <-- Argument to cyclic handler
1079: *
1080: * In addition to a cyc_handler, cyclic_add() takes a cyc_time, which
1081: * has the following members:
1082: *
1083: * hrtime_t cyt_when <-- Absolute time, in nanoseconds since boot, at
1084: * which to start firing
1085: * hrtime_t cyt_interval <-- Length of interval, in nanoseconds
1086: *
1087: * gethrtime() is the time source for nanoseconds since boot. If cyt_when
1088: * is set to 0, the cyclic will start to fire when cyt_interval next
1089: * divides the number of nanoseconds since boot.
1090: *
1091: * The cyt_interval field _must_ be filled in by the caller; one-shots are
1092: * _not_ explicitly supported by the cyclic subsystem (cyclic_add() will
1093: * assert that cyt_interval is non-zero). The maximum value for either
1094: * field is INT64_MAX; the caller is responsible for assuring that
1095: * cyt_when + cyt_interval <= INT64_MAX. Neither field may be negative.
1096: *
1097: * For an arbitrary time t in the future, the cyclic handler is guaranteed
1098: * to have been called (t - cyt_when) / cyt_interval times. This will
1099: * be true even if interrupts have been disabled for periods greater than
1100: * cyt_interval nanoseconds. In order to compensate for such periods,
1101: * the cyclic handler may be called a finite number of times with an
1102: * arbitrarily small interval.
1103: *
1104: * The cyclic subsystem will not enforce any lower bound on the interval;
1105: * if the interval is less than the time required to process an interrupt,
1106: * the CPU will wedge. It's the responsibility of the caller to assure that
1107: * either the value of the interval is sane, or that its caller has
1108: * sufficient privilege to deny service (i.e. its caller is root).
1109: *
1110: * Return value
1111: *
1112: * cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value
1113: * other than CYCLIC_NONE. cyclic_add() cannot fail.
1114: *
1115: * Caller's context
1116: *
1117: * cpu_lock must be held by the caller, and the caller must not be in
1118: * interrupt context. cyclic_add() will perform a KM_SLEEP kernel
1119: * memory allocation, so the usual rules (e.g. p_lock cannot be held)
1120: * apply. A cyclic may be added even in the presence of CPUs that have
1121: * not been configured with respect to the cyclic subsystem, but only
1122: * configured CPUs will be eligible to run the new cyclic.
1123: *
1124: * Cyclic handler's context
1125: *
1126: * Cyclic handlers will be executed in the interrupt context corresponding
1127: * to the specified level (i.e. either high, lock or low level). The
1128: * usual context rules apply.
1129: *
1130: * A cyclic handler may not grab ANY locks held by the caller of any of
1131: * cyclic_add() or cyclic_remove(); the implementation of these functions
1132: * may require blocking on cyclic handler completion.
1133: * Moreover, cyclic handlers may not make any call back into the cyclic
1134: * subsystem.
1135: */
1136: cyclic_id_t
1137: cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when)
1138: {
1139: cyc_id_t *idp = cyclic_new_id();
1.2.6.1 ! yamt 1140: solaris_cpu_t *c = &solaris_cpu[cpu_number()];
1.1 darran 1141:
1142: ASSERT(MUTEX_HELD(&cpu_lock));
1143: ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
1144:
1145: idp->cyi_cpu = c->cpu_cyclic;
1146: idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0);
1147:
1148: return ((uintptr_t)idp);
1149: }
1150:
1151: /*
1152: * cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *)
1153: *
1154: * Overview
1155: *
1156: * cyclic_add_omni() will create an omnipresent cyclic with the specified
1157: * online and offline handlers. Omnipresent cyclics run on all online
1158: * CPUs, including CPUs which have unbound interrupts disabled.
1159: *
1160: * Arguments
1161: *
1162: * As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which
1163: * has the following members:
1164: *
1165: * void (*cyo_online)() <-- Online handler
1166: * void (*cyo_offline)() <-- Offline handler
1167: * void *cyo_arg <-- Argument to be passed to on/offline handlers
1168: *
1169: * Online handler
1170: *
1171: * The cyo_online member is a pointer to a function which has the following
1172: * four arguments:
1173: *
1174: * void * <-- Argument (cyo_arg)
1175: * cpu_t * <-- Pointer to CPU about to be onlined
1176: * cyc_handler_t * <-- Pointer to cyc_handler_t; must be filled in
1177: * by omni online handler
1178: * cyc_time_t * <-- Pointer to cyc_time_t; must be filled in by
1179: * omni online handler
1180: *
1181: * The omni cyclic online handler is always called _before_ the omni
1182: * cyclic begins to fire on the specified CPU. As the above argument
1183: * description implies, the online handler must fill in the two structures
1184: * passed to it: the cyc_handler_t and the cyc_time_t. These are the
1185: * same two structures passed to cyclic_add(), outlined above. This
1186: * allows the omni cyclic to have maximum flexibility; different CPUs may
1187: * optionally
1188: *
1189: * (a) have different intervals
1190: * (b) be explicitly in or out of phase with one another
1191: * (c) have different handlers
1192: * (d) have different handler arguments
1193: * (e) fire at different levels
1194: *
1195: * Of these, (e) seems somewhat dubious, but is nonetheless allowed.
1196: *
1197: * The omni online handler is called in the same context as cyclic_add(),
1198: * and has the same liberties: omni online handlers may perform KM_SLEEP
1199: * kernel memory allocations, and may grab locks which are also acquired
1200: * by cyclic handlers. However, omni cyclic online handlers may _not_
1201: * call back into the cyclic subsystem, and should be generally careful
1202: * about calling into arbitrary kernel subsystems.
1203: *
1204: * Offline handler
1205: *
1206: * The cyo_offline member is a pointer to a function which has the following
1207: * three arguments:
1208: *
1209: * void * <-- Argument (cyo_arg)
1210: * cpu_t * <-- Pointer to CPU about to be offlined
1211: * void * <-- CPU's cyclic argument (that is, value
1212: * to which cyh_arg member of the cyc_handler_t
1213: * was set in the omni online handler)
1214: *
1215: * The omni cyclic offline handler is always called _after_ the omni
1216: * cyclic has ceased firing on the specified CPU. Its purpose is to
1217: * allow cleanup of any resources dynamically allocated in the omni cyclic
1218: * online handler. The context of the offline handler is identical to
1219: * that of the online handler; the same constraints and liberties apply.
1220: *
1221: * The offline handler is optional; it may be NULL.
1222: *
1223: * Return value
1224: *
1225: * cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a
1226: * value other than CYCLIC_NONE. cyclic_add_omni() cannot fail.
1227: *
1228: * Caller's context
1229: *
1230: * The caller's context is identical to that of cyclic_add(), specified
1231: * above.
1232: */
1233: cyclic_id_t
1234: cyclic_add_omni(cyc_omni_handler_t *omni)
1235: {
1236: cyc_id_t *idp = cyclic_new_id();
1237: cyc_cpu_t *cpu;
1238: cpu_t *c;
1239: int i;
1240:
1241: ASSERT(MUTEX_HELD(&cpu_lock));
1242: ASSERT(omni != NULL && omni->cyo_online != NULL);
1243:
1244: idp->cyi_omni_hdlr = *omni;
1245:
1.2.6.1 ! yamt 1246: CPU_FOREACH(i) {
! 1247: i = cpu_index(ci);
1.1 darran 1248: c = &solaris_cpu[i];
1249: if ((cpu = c->cpu_cyclic) == NULL)
1250: continue;
1251: cyclic_omni_start(idp, cpu);
1252: }
1253:
1254: /*
1255: * We must have found at least one online CPU on which to run
1256: * this cyclic.
1257: */
1258: ASSERT(idp->cyi_omni_list != NULL);
1259: ASSERT(idp->cyi_cpu == NULL);
1260:
1261: return ((uintptr_t)idp);
1262: }
1263:
1264: /*
1265: * void cyclic_remove(cyclic_id_t)
1266: *
1267: * Overview
1268: *
1269: * cyclic_remove() will remove the specified cyclic from the system.
1270: *
1271: * Arguments and notes
1272: *
1273: * The only argument is a cyclic_id returned from either cyclic_add() or
1274: * cyclic_add_omni().
1275: *
1276: * By the time cyclic_remove() returns, the caller is guaranteed that the
1277: * removed cyclic handler has completed execution (this is the same
1278: * semantic that untimeout() provides). As a result, cyclic_remove() may
1279: * need to block, waiting for the removed cyclic to complete execution.
1280: * This leads to an important constraint on the caller: no lock may be
1281: * held across cyclic_remove() that also may be acquired by a cyclic
1282: * handler.
1283: *
1284: * Return value
1285: *
1286: * None; cyclic_remove() always succeeds.
1287: *
1288: * Caller's context
1289: *
1290: * cpu_lock must be held by the caller, and the caller must not be in
1291: * interrupt context. The caller may not hold any locks which are also
1292: * grabbed by any cyclic handler. See "Arguments and notes", above.
1293: */
1294: void
1295: cyclic_remove(cyclic_id_t id)
1296: {
1297: cyc_id_t *idp = (cyc_id_t *)id;
1298: cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next;
1299: cyc_cpu_t *cpu = idp->cyi_cpu;
1300:
1301: ASSERT(MUTEX_HELD(&cpu_lock));
1302:
1303: if (cpu != NULL) {
1304: (void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT);
1305: } else {
1306: ASSERT(idp->cyi_omni_list != NULL);
1307: while (idp->cyi_omni_list != NULL)
1308: cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu);
1309: }
1310:
1311: if (prev != NULL) {
1312: ASSERT(cyclic_id_head != idp);
1313: prev->cyi_next = next;
1314: } else {
1315: ASSERT(cyclic_id_head == idp);
1316: cyclic_id_head = next;
1317: }
1318:
1319: if (next != NULL)
1320: next->cyi_prev = prev;
1321:
1322: kmem_cache_free(cyclic_id_cache, idp);
1323: }
1324:
1325: static void
1326: cyclic_init(cyc_backend_t *be)
1327: {
1328: ASSERT(MUTEX_HELD(&cpu_lock));
1329:
1330: /*
1331: * Copy the passed cyc_backend into the backend template. This must
1332: * be done before the CPU can be configured.
1333: */
1334: bcopy(be, &cyclic_backend, sizeof (cyc_backend_t));
1335:
1.2.6.1 ! yamt 1336: cyclic_configure(&solaris_cpu[cpu_number()]);
1.1 darran 1337: }
1338:
1339: /*
1340: * It is assumed that cyclic_mp_init() is called some time after cyclic
1341: * init (and therefore, after cpu0 has been initialized). We grab cpu_lock,
1342: * find the already initialized CPU, and initialize every other CPU with the
1343: * same backend.
1344: */
1345: static void
1346: cyclic_mp_init(void)
1347: {
1348: cpu_t *c;
1349: int i;
1350:
1.2.6.1 ! yamt 1351: #ifndef __NetBSD__
1.1 darran 1352: mutex_enter(&cpu_lock);
1.2.6.1 ! yamt 1353: #endif
1.1 darran 1354:
1.2.6.1 ! yamt 1355: CPU_FOREACH(i) {
! 1356: i = cpu_index(ci);
1.1 darran 1357: c = &solaris_cpu[i];
1358: if (c->cpu_cyclic == NULL)
1359: cyclic_configure(c);
1360: }
1361:
1.2.6.1 ! yamt 1362: #ifndef __NetBSD__
1.1 darran 1363: mutex_exit(&cpu_lock);
1.2.6.1 ! yamt 1364: #endif
1.1 darran 1365: }
1366:
1367: static void
1368: cyclic_uninit(void)
1369: {
1370: cpu_t *c;
1371: int id;
1372:
1.2.6.1 ! yamt 1373: CPU_FOREACH(id) {
! 1374: id = cpu_index(ci);
1.1 darran 1375: c = &solaris_cpu[id];
1376: if (c->cpu_cyclic == NULL)
1377: continue;
1378: cyclic_unconfigure(c);
1379: }
1380:
1381: if (cyclic_id_cache != NULL)
1382: kmem_cache_destroy(cyclic_id_cache);
1383: }
1384:
1385: #include "cyclic_machdep.c"
1386:
1387: /*
1388: * Cyclic subsystem initialisation.
1389: */
1390: static void
1391: cyclic_load(void *dummy)
1392: {
1393: mutex_enter(&cpu_lock);
1394:
1395: /* Initialise the machine-dependent backend. */
1396: cyclic_machdep_init();
1397:
1398: mutex_exit(&cpu_lock);
1399: }
1400:
1401: SYSINIT(cyclic_register, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_load, NULL);
1402:
1403: static void
1404: cyclic_unload(void)
1405: {
1406: mutex_enter(&cpu_lock);
1407:
1408: /* Uninitialise the machine-dependent backend. */
1409: cyclic_machdep_uninit();
1410:
1411: mutex_exit(&cpu_lock);
1412: }
1413:
1414: SYSUNINIT(cyclic_unregister, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_unload, NULL);
1415:
1.2.6.1 ! yamt 1416: #ifdef __FreeBSD__
1.1 darran 1417: /* ARGSUSED */
1418: static int
1419: cyclic_modevent(module_t mod __unused, int type, void *data __unused)
1420: {
1421: int error = 0;
1422:
1423: switch (type) {
1424: case MOD_LOAD:
1425: break;
1426:
1427: case MOD_UNLOAD:
1428: break;
1429:
1430: case MOD_SHUTDOWN:
1431: break;
1432:
1433: default:
1434: error = EOPNOTSUPP;
1435: break;
1436:
1437: }
1438: return (error);
1439: }
1440:
1441: DEV_MODULE(cyclic, cyclic_modevent, NULL);
1442: MODULE_VERSION(cyclic, 1);
1443: MODULE_DEPEND(cyclic, opensolaris, 1, 1, 1);
1.2.6.1 ! yamt 1444: #endif
! 1445:
! 1446: #ifdef __NetBSD__
! 1447: static int
! 1448: cyclic_modcmd(modcmd_t cmd, void *data)
! 1449: {
! 1450: switch (cmd) {
! 1451: case MODULE_CMD_INIT:
! 1452: cyclic_load(NULL);
! 1453: return 0;
! 1454:
! 1455: case MODULE_CMD_FINI:
! 1456: cyclic_unload();
! 1457: return 0;
! 1458: default:
! 1459: return ENOTTY;
! 1460: }
! 1461: }
! 1462:
! 1463: MODULE(MODULE_CLASS_MISC, cyclic, "dtrace");
! 1464: #endif
CVSweb <webmaster@jp.NetBSD.org>