Annotation of src/sys/kern/kern_clock.c, Revision 1.39
1.39 ! cgd 1: /* $NetBSD: kern_clock.c,v 1.38 1997/01/15 04:27:35 cgd Exp $ */
1.19 cgd 2:
3: /*-
4: * Copyright (c) 1982, 1986, 1991, 1993
5: * The Regents of the University of California. All rights reserved.
6: * (c) UNIX System Laboratories, Inc.
7: * All or some portions of this file are derived from material licensed
8: * to the University of California by American Telephone and Telegraph
9: * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10: * the permission of UNIX System Laboratories, Inc.
11: *
12: * Redistribution and use in source and binary forms, with or without
13: * modification, are permitted provided that the following conditions
14: * are met:
15: * 1. Redistributions of source code must retain the above copyright
16: * notice, this list of conditions and the following disclaimer.
17: * 2. Redistributions in binary form must reproduce the above copyright
18: * notice, this list of conditions and the following disclaimer in the
19: * documentation and/or other materials provided with the distribution.
20: * 3. All advertising materials mentioning features or use of this software
21: * must display the following acknowledgement:
22: * This product includes software developed by the University of
23: * California, Berkeley and its contributors.
24: * 4. Neither the name of the University nor the names of its contributors
25: * may be used to endorse or promote products derived from this software
26: * without specific prior written permission.
27: *
28: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38: * SUCH DAMAGE.
39: *
40: * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
41: */
42:
43: #include <sys/param.h>
44: #include <sys/systm.h>
45: #include <sys/dkstat.h>
46: #include <sys/callout.h>
47: #include <sys/kernel.h>
48: #include <sys/proc.h>
49: #include <sys/resourcevar.h>
1.25 christos 50: #include <sys/signalvar.h>
1.26 christos 51: #include <vm/vm.h>
52: #include <sys/sysctl.h>
1.27 jonathan 53: #include <sys/timex.h>
1.19 cgd 54:
55: #include <machine/cpu.h>
1.25 christos 56:
1.19 cgd 57: #ifdef GPROF
58: #include <sys/gmon.h>
59: #endif
60:
61: /*
62: * Clock handling routines.
63: *
64: * This code is written to operate with two timers that run independently of
65: * each other. The main clock, running hz times per second, is used to keep
66: * track of real time. The second timer handles kernel and user profiling,
67: * and does resource use estimation. If the second timer is programmable,
68: * it is randomized to avoid aliasing between the two clocks. For example,
69: * the randomization prevents an adversary from always giving up the cpu
70: * just before its quantum expires. Otherwise, it would never accumulate
71: * cpu ticks. The mean frequency of the second timer is stathz.
72: *
73: * If no second timer exists, stathz will be zero; in this case we drive
74: * profiling and statistics off the main clock. This WILL NOT be accurate;
75: * do not do it unless absolutely necessary.
76: *
77: * The statistics clock may (or may not) be run at a higher rate while
78: * profiling. This profile clock runs at profhz. We require that profhz
79: * be an integral multiple of stathz.
80: *
81: * If the statistics clock is running fast, it must be divided by the ratio
82: * profhz/stathz for statistics. (For profiling, every tick counts.)
83: */
84:
85: /*
86: * TODO:
87: * allocate more timeout table slots when table overflows.
88: */
89:
1.27 jonathan 90:
91: #ifdef NTP /* NTP phase-locked loop in kernel */
92: /*
93: * Phase/frequency-lock loop (PLL/FLL) definitions
94: *
95: * The following variables are read and set by the ntp_adjtime() system
96: * call.
97: *
98: * time_state shows the state of the system clock, with values defined
99: * in the timex.h header file.
100: *
101: * time_status shows the status of the system clock, with bits defined
102: * in the timex.h header file.
103: *
104: * time_offset is used by the PLL/FLL to adjust the system time in small
105: * increments.
106: *
107: * time_constant determines the bandwidth or "stiffness" of the PLL.
108: *
109: * time_tolerance determines maximum frequency error or tolerance of the
110: * CPU clock oscillator and is a property of the architecture; however,
111: * in principle it could change as result of the presence of external
112: * discipline signals, for instance.
113: *
114: * time_precision is usually equal to the kernel tick variable; however,
115: * in cases where a precision clock counter or external clock is
116: * available, the resolution can be much less than this and depend on
117: * whether the external clock is working or not.
118: *
119: * time_maxerror is initialized by a ntp_adjtime() call and increased by
120: * the kernel once each second to reflect the maximum error bound
121: * growth.
122: *
123: * time_esterror is set and read by the ntp_adjtime() call, but
124: * otherwise not used by the kernel.
125: */
126: int time_state = TIME_OK; /* clock state */
127: int time_status = STA_UNSYNC; /* clock status bits */
128: long time_offset = 0; /* time offset (us) */
129: long time_constant = 0; /* pll time constant */
130: long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
131: long time_precision = 1; /* clock precision (us) */
132: long time_maxerror = MAXPHASE; /* maximum error (us) */
133: long time_esterror = MAXPHASE; /* estimated error (us) */
134:
135: /*
136: * The following variables establish the state of the PLL/FLL and the
137: * residual time and frequency offset of the local clock. The scale
138: * factors are defined in the timex.h header file.
139: *
140: * time_phase and time_freq are the phase increment and the frequency
141: * increment, respectively, of the kernel time variable.
142: *
143: * time_freq is set via ntp_adjtime() from a value stored in a file when
144: * the synchronization daemon is first started. Its value is retrieved
145: * via ntp_adjtime() and written to the file about once per hour by the
146: * daemon.
147: *
148: * time_adj is the adjustment added to the value of tick at each timer
149: * interrupt and is recomputed from time_phase and time_freq at each
150: * seconds rollover.
151: *
152: * time_reftime is the second's portion of the system time at the last
153: * call to ntp_adjtime(). It is used to adjust the time_freq variable
154: * and to increase the time_maxerror as the time since last update
155: * increases.
156: */
157: long time_phase = 0; /* phase offset (scaled us) */
158: long time_freq = 0; /* frequency offset (scaled ppm) */
159: long time_adj = 0; /* tick adjust (scaled 1 / hz) */
160: long time_reftime = 0; /* time at last adjustment (s) */
161:
162: #ifdef PPS_SYNC
163: /*
164: * The following variables are used only if the kernel PPS discipline
165: * code is configured (PPS_SYNC). The scale factors are defined in the
166: * timex.h header file.
167: *
168: * pps_time contains the time at each calibration interval, as read by
169: * microtime(). pps_count counts the seconds of the calibration
170: * interval, the duration of which is nominally pps_shift in powers of
171: * two.
172: *
173: * pps_offset is the time offset produced by the time median filter
174: * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
175: * this filter.
176: *
177: * pps_freq is the frequency offset produced by the frequency median
178: * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
179: * by this filter.
180: *
181: * pps_usec is latched from a high resolution counter or external clock
182: * at pps_time. Here we want the hardware counter contents only, not the
183: * contents plus the time_tv.usec as usual.
184: *
185: * pps_valid counts the number of seconds since the last PPS update. It
186: * is used as a watchdog timer to disable the PPS discipline should the
187: * PPS signal be lost.
188: *
189: * pps_glitch counts the number of seconds since the beginning of an
190: * offset burst more than tick/2 from current nominal offset. It is used
191: * mainly to suppress error bursts due to priority conflicts between the
192: * PPS interrupt and timer interrupt.
193: *
194: * pps_intcnt counts the calibration intervals for use in the interval-
195: * adaptation algorithm. It's just too complicated for words.
196: */
197: struct timeval pps_time; /* kernel time at last interval */
198: long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
199: long pps_offset = 0; /* pps time offset (us) */
200: long pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
201: long pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
202: long pps_freq = 0; /* frequency offset (scaled ppm) */
203: long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
204: long pps_usec = 0; /* microsec counter at last interval */
205: long pps_valid = PPS_VALID; /* pps signal watchdog counter */
206: int pps_glitch = 0; /* pps signal glitch counter */
207: int pps_count = 0; /* calibration interval counter (s) */
208: int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
209: int pps_intcnt = 0; /* intervals at current duration */
210:
211: /*
212: * PPS signal quality monitors
213: *
214: * pps_jitcnt counts the seconds that have been discarded because the
215: * jitter measured by the time median filter exceeds the limit MAXTIME
216: * (100 us).
217: *
218: * pps_calcnt counts the frequency calibration intervals, which are
219: * variable from 4 s to 256 s.
220: *
221: * pps_errcnt counts the calibration intervals which have been discarded
222: * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
223: * calibration interval jitter exceeds two ticks.
224: *
225: * pps_stbcnt counts the calibration intervals that have been discarded
226: * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
227: */
228: long pps_jitcnt = 0; /* jitter limit exceeded */
229: long pps_calcnt = 0; /* calibration intervals */
230: long pps_errcnt = 0; /* calibration errors */
231: long pps_stbcnt = 0; /* stability limit exceeded */
232: #endif /* PPS_SYNC */
233:
234: #ifdef EXT_CLOCK
235: /*
236: * External clock definitions
237: *
238: * The following definitions and declarations are used only if an
239: * external clock is configured on the system.
240: */
241: #define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */
242:
243: /*
244: * The clock_count variable is set to CLOCK_INTERVAL at each PPS
245: * interrupt and decremented once each second.
246: */
247: int clock_count = 0; /* CPU clock counter */
248:
249: #ifdef HIGHBALL
250: /*
251: * The clock_offset and clock_cpu variables are used by the HIGHBALL
252: * interface. The clock_offset variable defines the offset between
253: * system time and the HIGBALL counters. The clock_cpu variable contains
254: * the offset between the system clock and the HIGHBALL clock for use in
255: * disciplining the kernel time variable.
256: */
257: extern struct timeval clock_offset; /* Highball clock offset */
258: long clock_cpu = 0; /* CPU clock adjust */
259: #endif /* HIGHBALL */
260: #endif /* EXT_CLOCK */
261: #endif /* NTP */
262:
263:
1.19 cgd 264: /*
265: * Bump a timeval by a small number of usec's.
266: */
267: #define BUMPTIME(t, usec) { \
268: register volatile struct timeval *tp = (t); \
269: register long us; \
270: \
271: tp->tv_usec = us = tp->tv_usec + (usec); \
272: if (us >= 1000000) { \
273: tp->tv_usec = us - 1000000; \
274: tp->tv_sec++; \
275: } \
276: }
277:
278: int stathz;
279: int profhz;
280: int profprocs;
281: int ticks;
1.22 cgd 282: static int psdiv, pscnt; /* prof => stat divider */
283: int psratio; /* ratio: prof / stat */
284: int tickfix, tickfixinterval; /* used if tick not really integral */
1.34 briggs 285: #ifndef NTP
1.39 ! cgd 286: static int tickfixcnt; /* accumulated fractional error */
1.34 briggs 287: #else
1.27 jonathan 288: int fixtick; /* used by NTP for same */
1.31 mycroft 289: int shifthz;
290: #endif
1.19 cgd 291:
292: volatile struct timeval time;
293: volatile struct timeval mono_time;
294:
295: /*
296: * Initialize clock frequencies and start both clocks running.
297: */
298: void
299: initclocks()
300: {
301: register int i;
302:
303: /*
304: * Set divisors to 1 (normal case) and let the machine-specific
305: * code do its bit.
306: */
307: psdiv = pscnt = 1;
308: cpu_initclocks();
309:
310: /*
311: * Compute profhz/stathz, and fix profhz if needed.
312: */
313: i = stathz ? stathz : hz;
314: if (profhz == 0)
315: profhz = i;
316: psratio = profhz / i;
1.31 mycroft 317:
318: #ifdef NTP
319: switch (hz) {
320: case 60:
321: case 64:
322: shifthz = SHIFT_SCALE - 6;
323: break;
324: case 96:
325: case 100:
326: case 128:
327: shifthz = SHIFT_SCALE - 7;
328: break;
329: case 256:
330: shifthz = SHIFT_SCALE - 8;
331: break;
332: case 1024:
333: shifthz = SHIFT_SCALE - 10;
334: break;
335: default:
336: panic("weird hz");
337: }
338: #endif
1.19 cgd 339: }
340:
341: /*
342: * The real-time timer, interrupting hz times per second.
343: */
344: void
345: hardclock(frame)
346: register struct clockframe *frame;
347: {
348: register struct callout *p1;
349: register struct proc *p;
350: register int delta, needsoft;
351: extern int tickdelta;
352: extern long timedelta;
1.30 mycroft 353: #ifdef NTP
1.29 christos 354: register int time_update;
355: register int ltemp;
356: #endif
1.19 cgd 357:
358: /*
359: * Update real-time timeout queue.
360: * At front of queue are some number of events which are ``due''.
361: * The time to these is <= 0 and if negative represents the
362: * number of ticks which have passed since it was supposed to happen.
363: * The rest of the q elements (times > 0) are events yet to happen,
364: * where the time for each is given as a delta from the previous.
365: * Decrementing just the first of these serves to decrement the time
366: * to all events.
367: */
368: needsoft = 0;
369: for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
370: if (--p1->c_time > 0)
371: break;
372: needsoft = 1;
373: if (p1->c_time == 0)
374: break;
375: }
376:
377: p = curproc;
378: if (p) {
379: register struct pstats *pstats;
380:
381: /*
382: * Run current process's virtual and profile time, as needed.
383: */
384: pstats = p->p_stats;
385: if (CLKF_USERMODE(frame) &&
386: timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
387: itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
388: psignal(p, SIGVTALRM);
389: if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
390: itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
391: psignal(p, SIGPROF);
392: }
393:
394: /*
395: * If no separate statistics clock is available, run it from here.
396: */
397: if (stathz == 0)
398: statclock(frame);
399:
400: /*
1.22 cgd 401: * Increment the time-of-day. The increment is normally just
402: * ``tick''. If the machine is one which has a clock frequency
403: * such that ``hz'' would not divide the second evenly into
404: * milliseconds, a periodic adjustment must be applied. Finally,
405: * if we are still adjusting the time (see adjtime()),
406: * ``tickdelta'' may also be added in.
1.19 cgd 407: */
408: ticks++;
1.22 cgd 409: delta = tick;
1.27 jonathan 410:
411: #ifndef NTP
1.22 cgd 412: if (tickfix) {
1.39 ! cgd 413: tickfixcnt += tickfix;
1.24 cgd 414: if (tickfixcnt >= tickfixinterval) {
1.39 ! cgd 415: delta++;
! 416: tickfixcnt -= tickfixinterval;
1.22 cgd 417: }
418: }
1.27 jonathan 419: #endif /* !NTP */
420: /* Imprecise 4bsd adjtime() handling */
1.22 cgd 421: if (timedelta != 0) {
1.38 cgd 422: delta += tickdelta;
1.19 cgd 423: timedelta -= tickdelta;
424: }
1.27 jonathan 425:
426: #ifdef notyet
427: microset();
428: #endif
429:
430: #ifndef NTP
431: BUMPTIME(&time, delta); /* XXX Now done using NTP code below */
432: #endif
1.19 cgd 433: BUMPTIME(&mono_time, delta);
1.27 jonathan 434:
1.31 mycroft 435: #ifdef NTP
1.30 mycroft 436: time_update = delta;
1.27 jonathan 437:
438: /*
439: * Compute the phase adjustment. If the low-order bits
440: * (time_phase) of the update overflow, bump the high-order bits
441: * (time_update).
442: */
443: time_phase += time_adj;
444: if (time_phase <= -FINEUSEC) {
445: ltemp = -time_phase >> SHIFT_SCALE;
446: time_phase += ltemp << SHIFT_SCALE;
447: time_update -= ltemp;
1.31 mycroft 448: } else if (time_phase >= FINEUSEC) {
1.27 jonathan 449: ltemp = time_phase >> SHIFT_SCALE;
450: time_phase -= ltemp << SHIFT_SCALE;
451: time_update += ltemp;
452: }
453:
454: #ifdef HIGHBALL
455: /*
456: * If the HIGHBALL board is installed, we need to adjust the
457: * external clock offset in order to close the hardware feedback
458: * loop. This will adjust the external clock phase and frequency
459: * in small amounts. The additional phase noise and frequency
460: * wander this causes should be minimal. We also need to
461: * discipline the kernel time variable, since the PLL is used to
462: * discipline the external clock. If the Highball board is not
463: * present, we discipline kernel time with the PLL as usual. We
464: * assume that the external clock phase adjustment (time_update)
465: * and kernel phase adjustment (clock_cpu) are less than the
466: * value of tick.
467: */
468: clock_offset.tv_usec += time_update;
469: if (clock_offset.tv_usec >= 1000000) {
470: clock_offset.tv_sec++;
471: clock_offset.tv_usec -= 1000000;
472: }
473: if (clock_offset.tv_usec < 0) {
474: clock_offset.tv_sec--;
475: clock_offset.tv_usec += 1000000;
476: }
477: time.tv_usec += clock_cpu;
478: clock_cpu = 0;
479: #else
480: time.tv_usec += time_update;
481: #endif /* HIGHBALL */
482:
483: /*
484: * On rollover of the second the phase adjustment to be used for
485: * the next second is calculated. Also, the maximum error is
486: * increased by the tolerance. If the PPS frequency discipline
487: * code is present, the phase is increased to compensate for the
488: * CPU clock oscillator frequency error.
489: *
490: * On a 32-bit machine and given parameters in the timex.h
491: * header file, the maximum phase adjustment is +-512 ms and
492: * maximum frequency offset is a tad less than) +-512 ppm. On a
493: * 64-bit machine, you shouldn't need to ask.
494: */
495: if (time.tv_usec >= 1000000) {
496: time.tv_usec -= 1000000;
497: time.tv_sec++;
498: time_maxerror += time_tolerance >> SHIFT_USEC;
499:
500: /*
501: * Leap second processing. If in leap-insert state at
502: * the end of the day, the system clock is set back one
503: * second; if in leap-delete state, the system clock is
504: * set ahead one second. The microtime() routine or
505: * external clock driver will insure that reported time
506: * is always monotonic. The ugly divides should be
507: * replaced.
508: */
509: switch (time_state) {
1.31 mycroft 510: case TIME_OK:
1.27 jonathan 511: if (time_status & STA_INS)
512: time_state = TIME_INS;
513: else if (time_status & STA_DEL)
514: time_state = TIME_DEL;
515: break;
516:
1.31 mycroft 517: case TIME_INS:
1.27 jonathan 518: if (time.tv_sec % 86400 == 0) {
519: time.tv_sec--;
520: time_state = TIME_OOP;
521: }
522: break;
523:
1.31 mycroft 524: case TIME_DEL:
1.27 jonathan 525: if ((time.tv_sec + 1) % 86400 == 0) {
526: time.tv_sec++;
527: time_state = TIME_WAIT;
528: }
529: break;
530:
1.31 mycroft 531: case TIME_OOP:
1.27 jonathan 532: time_state = TIME_WAIT;
533: break;
534:
1.31 mycroft 535: case TIME_WAIT:
1.27 jonathan 536: if (!(time_status & (STA_INS | STA_DEL)))
537: time_state = TIME_OK;
1.31 mycroft 538: break;
1.27 jonathan 539: }
540:
541: /*
542: * Compute the phase adjustment for the next second. In
543: * PLL mode, the offset is reduced by a fixed factor
544: * times the time constant. In FLL mode the offset is
545: * used directly. In either mode, the maximum phase
546: * adjustment for each second is clamped so as to spread
547: * the adjustment over not more than the number of
548: * seconds between updates.
549: */
550: if (time_offset < 0) {
551: ltemp = -time_offset;
552: if (!(time_status & STA_FLL))
553: ltemp >>= SHIFT_KG + time_constant;
554: if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
555: ltemp = (MAXPHASE / MINSEC) <<
556: SHIFT_UPDATE;
557: time_offset += ltemp;
1.31 mycroft 558: time_adj = -ltemp << (shifthz - SHIFT_UPDATE);
559: } else if (time_offset > 0) {
1.27 jonathan 560: ltemp = time_offset;
561: if (!(time_status & STA_FLL))
562: ltemp >>= SHIFT_KG + time_constant;
563: if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
564: ltemp = (MAXPHASE / MINSEC) <<
565: SHIFT_UPDATE;
566: time_offset -= ltemp;
1.31 mycroft 567: time_adj = ltemp << (shifthz - SHIFT_UPDATE);
568: } else
569: time_adj = 0;
1.27 jonathan 570:
571: /*
572: * Compute the frequency estimate and additional phase
573: * adjustment due to frequency error for the next
574: * second. When the PPS signal is engaged, gnaw on the
575: * watchdog counter and update the frequency computed by
576: * the pll and the PPS signal.
577: */
578: #ifdef PPS_SYNC
579: pps_valid++;
580: if (pps_valid == PPS_VALID) {
581: pps_jitter = MAXTIME;
582: pps_stabil = MAXFREQ;
583: time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
584: STA_PPSWANDER | STA_PPSERROR);
585: }
586: ltemp = time_freq + pps_freq;
587: #else
588: ltemp = time_freq;
589: #endif /* PPS_SYNC */
590:
591: if (ltemp < 0)
1.31 mycroft 592: time_adj -= -ltemp >> (SHIFT_USEC - shifthz);
1.27 jonathan 593: else
1.31 mycroft 594: time_adj += ltemp >> (SHIFT_USEC - shifthz);
595: time_adj += (long)fixtick << shifthz;
1.27 jonathan 596:
597: /*
598: * When the CPU clock oscillator frequency is not a
1.31 mycroft 599: * power of 2 in Hz, shifthz is only an approximate
600: * scale factor.
1.27 jonathan 601: */
1.31 mycroft 602: switch (hz) {
603: case 96:
604: case 100:
605: /*
606: * In the following code the overall gain is increased
607: * by a factor of 1.25, which results in a residual
608: * error less than 3 percent.
609: */
1.27 jonathan 610: if (time_adj < 0)
611: time_adj -= -time_adj >> 2;
612: else
613: time_adj += time_adj >> 2;
1.31 mycroft 614: break;
615: case 60:
616: /*
617: * 60 Hz m68k and vaxes have a PLL gain factor of of
618: * 60/64 (15/16) of what it should be. In the following code
619: * the overall gain is increased by a factor of 1.0625,
620: * (17/16) which results in a residual error of just less
621: * than 0.4 percent.
622: */
1.27 jonathan 623: if (time_adj < 0)
624: time_adj -= -time_adj >> 4;
625: else
626: time_adj += time_adj >> 4;
1.31 mycroft 627: break;
1.27 jonathan 628: }
629:
630: #ifdef EXT_CLOCK
631: /*
632: * If an external clock is present, it is necessary to
633: * discipline the kernel time variable anyway, since not
634: * all system components use the microtime() interface.
635: * Here, the time offset between the external clock and
636: * kernel time variable is computed every so often.
637: */
638: clock_count++;
639: if (clock_count > CLOCK_INTERVAL) {
640: clock_count = 0;
641: microtime(&clock_ext);
642: delta.tv_sec = clock_ext.tv_sec - time.tv_sec;
643: delta.tv_usec = clock_ext.tv_usec -
644: time.tv_usec;
645: if (delta.tv_usec < 0)
646: delta.tv_sec--;
647: if (delta.tv_usec >= 500000) {
648: delta.tv_usec -= 1000000;
649: delta.tv_sec++;
650: }
651: if (delta.tv_usec < -500000) {
652: delta.tv_usec += 1000000;
653: delta.tv_sec--;
654: }
655: if (delta.tv_sec > 0 || (delta.tv_sec == 0 &&
656: delta.tv_usec > MAXPHASE) ||
657: delta.tv_sec < -1 || (delta.tv_sec == -1 &&
658: delta.tv_usec < -MAXPHASE)) {
659: time = clock_ext;
660: delta.tv_sec = 0;
661: delta.tv_usec = 0;
662: }
663: #ifdef HIGHBALL
664: clock_cpu = delta.tv_usec;
665: #else /* HIGHBALL */
666: hardupdate(delta.tv_usec);
667: #endif /* HIGHBALL */
668: }
669: #endif /* EXT_CLOCK */
670: }
671:
1.31 mycroft 672: #endif /* NTP */
1.19 cgd 673:
674: /*
675: * Process callouts at a very low cpu priority, so we don't keep the
676: * relatively high clock interrupt priority any longer than necessary.
677: */
678: if (needsoft) {
679: if (CLKF_BASEPRI(frame)) {
680: /*
681: * Save the overhead of a software interrupt;
682: * it will happen as soon as we return, so do it now.
683: */
684: (void)splsoftclock();
685: softclock();
686: } else
687: setsoftclock();
688: }
689: }
690:
691: /*
692: * Software (low priority) clock interrupt.
693: * Run periodic events from timeout queue.
694: */
695: /*ARGSUSED*/
696: void
697: softclock()
698: {
699: register struct callout *c;
700: register void *arg;
701: register void (*func) __P((void *));
702: register int s;
703:
704: s = splhigh();
705: while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
706: func = c->c_func;
707: arg = c->c_arg;
708: calltodo.c_next = c->c_next;
709: c->c_next = callfree;
710: callfree = c;
711: splx(s);
712: (*func)(arg);
713: (void) splhigh();
714: }
715: splx(s);
716: }
717:
718: /*
719: * timeout --
720: * Execute a function after a specified length of time.
721: *
722: * untimeout --
723: * Cancel previous timeout function call.
724: *
725: * See AT&T BCI Driver Reference Manual for specification. This
726: * implementation differs from that one in that no identification
727: * value is returned from timeout, rather, the original arguments
728: * to timeout are used to identify entries for untimeout.
729: */
730: void
731: timeout(ftn, arg, ticks)
732: void (*ftn) __P((void *));
733: void *arg;
734: register int ticks;
735: {
736: register struct callout *new, *p, *t;
737: register int s;
738:
739: if (ticks <= 0)
740: ticks = 1;
741:
742: /* Lock out the clock. */
743: s = splhigh();
744:
745: /* Fill in the next free callout structure. */
746: if (callfree == NULL)
747: panic("timeout table full");
748: new = callfree;
749: callfree = new->c_next;
750: new->c_arg = arg;
751: new->c_func = ftn;
752:
753: /*
754: * The time for each event is stored as a difference from the time
755: * of the previous event on the queue. Walk the queue, correcting
756: * the ticks argument for queue entries passed. Correct the ticks
757: * value for the queue entry immediately after the insertion point
758: * as well. Watch out for negative c_time values; these represent
759: * overdue events.
760: */
761: for (p = &calltodo;
762: (t = p->c_next) != NULL && ticks > t->c_time; p = t)
763: if (t->c_time > 0)
764: ticks -= t->c_time;
765: new->c_time = ticks;
766: if (t != NULL)
767: t->c_time -= ticks;
768:
769: /* Insert the new entry into the queue. */
770: p->c_next = new;
771: new->c_next = t;
772: splx(s);
773: }
774:
775: void
776: untimeout(ftn, arg)
777: void (*ftn) __P((void *));
778: void *arg;
779: {
780: register struct callout *p, *t;
781: register int s;
782:
783: s = splhigh();
784: for (p = &calltodo; (t = p->c_next) != NULL; p = t)
785: if (t->c_func == ftn && t->c_arg == arg) {
786: /* Increment next entry's tick count. */
787: if (t->c_next && t->c_time > 0)
788: t->c_next->c_time += t->c_time;
789:
790: /* Move entry from callout queue to callfree queue. */
791: p->c_next = t->c_next;
792: t->c_next = callfree;
793: callfree = t;
794: break;
795: }
796: splx(s);
797: }
798:
799: /*
800: * Compute number of hz until specified time. Used to
801: * compute third argument to timeout() from an absolute time.
802: */
803: int
804: hzto(tv)
805: struct timeval *tv;
806: {
807: register long ticks, sec;
808: int s;
809:
810: /*
1.22 cgd 811: * If number of microseconds will fit in 32 bit arithmetic,
812: * then compute number of microseconds to time and scale to
1.19 cgd 813: * ticks. Otherwise just compute number of hz in time, rounding
1.22 cgd 814: * times greater than representible to maximum value. (We must
815: * compute in microseconds, because hz can be greater than 1000,
816: * and thus tick can be less than one millisecond).
1.19 cgd 817: *
1.22 cgd 818: * Delta times less than 14 hours can be computed ``exactly''.
819: * (Note that if hz would yeild a non-integral number of us per
820: * tick, i.e. tickfix is nonzero, timouts can be a tick longer
821: * than they should be.) Maximum value for any timeout in 10ms
822: * ticks is 250 days.
1.19 cgd 823: */
824: s = splhigh();
825: sec = tv->tv_sec - time.tv_sec;
1.22 cgd 826: if (sec <= 0x7fffffff / 1000000 - 1)
827: ticks = ((tv->tv_sec - time.tv_sec) * 1000000 +
828: (tv->tv_usec - time.tv_usec)) / tick;
1.19 cgd 829: else if (sec <= 0x7fffffff / hz)
830: ticks = sec * hz;
831: else
832: ticks = 0x7fffffff;
833: splx(s);
834: return (ticks);
835: }
836:
837: /*
838: * Start profiling on a process.
839: *
840: * Kernel profiling passes proc0 which never exits and hence
841: * keeps the profile clock running constantly.
842: */
843: void
844: startprofclock(p)
845: register struct proc *p;
846: {
847: int s;
848:
849: if ((p->p_flag & P_PROFIL) == 0) {
850: p->p_flag |= P_PROFIL;
851: if (++profprocs == 1 && stathz != 0) {
852: s = splstatclock();
853: psdiv = pscnt = psratio;
854: setstatclockrate(profhz);
855: splx(s);
856: }
857: }
858: }
859:
860: /*
861: * Stop profiling on a process.
862: */
863: void
864: stopprofclock(p)
865: register struct proc *p;
866: {
867: int s;
868:
869: if (p->p_flag & P_PROFIL) {
870: p->p_flag &= ~P_PROFIL;
871: if (--profprocs == 0 && stathz != 0) {
872: s = splstatclock();
873: psdiv = pscnt = 1;
874: setstatclockrate(stathz);
875: splx(s);
876: }
877: }
878: }
879:
880: /*
881: * Statistics clock. Grab profile sample, and if divider reaches 0,
882: * do process and kernel statistics.
883: */
884: void
885: statclock(frame)
886: register struct clockframe *frame;
887: {
888: #ifdef GPROF
889: register struct gmonparam *g;
1.36 abrown 890: register int i;
1.19 cgd 891: #endif
892: register struct proc *p;
893:
894: if (CLKF_USERMODE(frame)) {
895: p = curproc;
896: if (p->p_flag & P_PROFIL)
897: addupc_intr(p, CLKF_PC(frame), 1);
898: if (--pscnt > 0)
899: return;
900: /*
901: * Came from user mode; CPU was in user state.
902: * If this process is being profiled record the tick.
903: */
904: p->p_uticks++;
905: if (p->p_nice > NZERO)
906: cp_time[CP_NICE]++;
907: else
908: cp_time[CP_USER]++;
909: } else {
910: #ifdef GPROF
911: /*
912: * Kernel statistics are just like addupc_intr, only easier.
913: */
914: g = &_gmonparam;
915: if (g->state == GMON_PROF_ON) {
916: i = CLKF_PC(frame) - g->lowpc;
917: if (i < g->textsize) {
918: i /= HISTFRACTION * sizeof(*g->kcount);
919: g->kcount[i]++;
920: }
921: }
922: #endif
923: if (--pscnt > 0)
924: return;
925: /*
926: * Came from kernel mode, so we were:
927: * - handling an interrupt,
928: * - doing syscall or trap work on behalf of the current
929: * user process, or
930: * - spinning in the idle loop.
931: * Whichever it is, charge the time as appropriate.
932: * Note that we charge interrupts to the current process,
933: * regardless of whether they are ``for'' that process,
934: * so that we know how much of its real time was spent
935: * in ``non-process'' (i.e., interrupt) work.
936: */
937: p = curproc;
938: if (CLKF_INTR(frame)) {
939: if (p != NULL)
940: p->p_iticks++;
941: cp_time[CP_INTR]++;
942: } else if (p != NULL) {
943: p->p_sticks++;
944: cp_time[CP_SYS]++;
945: } else
946: cp_time[CP_IDLE]++;
947: }
948: pscnt = psdiv;
949:
950: /*
951: * We adjust the priority of the current process. The priority of
952: * a process gets worse as it accumulates CPU time. The cpu usage
953: * estimator (p_estcpu) is increased here. The formula for computing
954: * priorities (in kern_synch.c) will compute a different value each
955: * time p_estcpu increases by 4. The cpu usage estimator ramps up
956: * quite quickly when the process is running (linearly), and decays
957: * away exponentially, at a rate which is proportionally slower when
958: * the system is busy. The basic principal is that the system will
959: * 90% forget that the process used a lot of CPU time in 5 * loadav
960: * seconds. This causes the system to favor processes which haven't
961: * run much recently, and to round-robin among other processes.
962: */
963: if (p != NULL) {
964: p->p_cpticks++;
965: if (++p->p_estcpu == 0)
966: p->p_estcpu--;
967: if ((p->p_estcpu & 3) == 0) {
968: resetpriority(p);
969: if (p->p_priority >= PUSER)
970: p->p_priority = p->p_usrpri;
971: }
972: }
973: }
1.27 jonathan 974:
975:
976: #ifdef NTP /* NTP phase-locked loop in kernel */
977:
978: /*
979: * hardupdate() - local clock update
980: *
981: * This routine is called by ntp_adjtime() to update the local clock
982: * phase and frequency. The implementation is of an adaptive-parameter,
983: * hybrid phase/frequency-lock loop (PLL/FLL). The routine computes new
984: * time and frequency offset estimates for each call. If the kernel PPS
985: * discipline code is configured (PPS_SYNC), the PPS signal itself
986: * determines the new time offset, instead of the calling argument.
987: * Presumably, calls to ntp_adjtime() occur only when the caller
988: * believes the local clock is valid within some bound (+-128 ms with
989: * NTP). If the caller's time is far different than the PPS time, an
990: * argument will ensue, and it's not clear who will lose.
991: *
992: * For uncompensated quartz crystal oscillatores and nominal update
993: * intervals less than 1024 s, operation should be in phase-lock mode
994: * (STA_FLL = 0), where the loop is disciplined to phase. For update
995: * intervals greater than thiss, operation should be in frequency-lock
996: * mode (STA_FLL = 1), where the loop is disciplined to frequency.
997: *
998: * Note: splclock() is in effect.
999: */
1000: void
1001: hardupdate(offset)
1002: long offset;
1003: {
1004: long ltemp, mtemp;
1005:
1006: if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1007: return;
1008: ltemp = offset;
1009: #ifdef PPS_SYNC
1010: if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
1011: ltemp = pps_offset;
1012: #endif /* PPS_SYNC */
1013:
1014: /*
1015: * Scale the phase adjustment and clamp to the operating range.
1016: */
1017: if (ltemp > MAXPHASE)
1018: time_offset = MAXPHASE << SHIFT_UPDATE;
1019: else if (ltemp < -MAXPHASE)
1020: time_offset = -(MAXPHASE << SHIFT_UPDATE);
1021: else
1022: time_offset = ltemp << SHIFT_UPDATE;
1023:
1024: /*
1025: * Select whether the frequency is to be controlled and in which
1026: * mode (PLL or FLL). Clamp to the operating range. Ugly
1027: * multiply/divide should be replaced someday.
1028: */
1029: if (time_status & STA_FREQHOLD || time_reftime == 0)
1030: time_reftime = time.tv_sec;
1031: mtemp = time.tv_sec - time_reftime;
1032: time_reftime = time.tv_sec;
1033: if (time_status & STA_FLL) {
1034: if (mtemp >= MINSEC) {
1035: ltemp = ((time_offset / mtemp) << (SHIFT_USEC -
1036: SHIFT_UPDATE));
1037: if (ltemp < 0)
1038: time_freq -= -ltemp >> SHIFT_KH;
1039: else
1040: time_freq += ltemp >> SHIFT_KH;
1041: }
1042: } else {
1043: if (mtemp < MAXSEC) {
1044: ltemp *= mtemp;
1045: if (ltemp < 0)
1046: time_freq -= -ltemp >> (time_constant +
1047: time_constant + SHIFT_KF -
1048: SHIFT_USEC);
1049: else
1050: time_freq += ltemp >> (time_constant +
1051: time_constant + SHIFT_KF -
1052: SHIFT_USEC);
1053: }
1054: }
1055: if (time_freq > time_tolerance)
1056: time_freq = time_tolerance;
1057: else if (time_freq < -time_tolerance)
1058: time_freq = -time_tolerance;
1059: }
1060:
1061: #ifdef PPS_SYNC
1062: /*
1063: * hardpps() - discipline CPU clock oscillator to external PPS signal
1064: *
1065: * This routine is called at each PPS interrupt in order to discipline
1066: * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1067: * and leaves it in a handy spot for the hardclock() routine. It
1068: * integrates successive PPS phase differences and calculates the
1069: * frequency offset. This is used in hardclock() to discipline the CPU
1070: * clock oscillator so that intrinsic frequency error is cancelled out.
1071: * The code requires the caller to capture the time and hardware counter
1072: * value at the on-time PPS signal transition.
1073: *
1074: * Note that, on some Unix systems, this routine runs at an interrupt
1075: * priority level higher than the timer interrupt routine hardclock().
1076: * Therefore, the variables used are distinct from the hardclock()
1077: * variables, except for certain exceptions: The PPS frequency pps_freq
1078: * and phase pps_offset variables are determined by this routine and
1079: * updated atomically. The time_tolerance variable can be considered a
1080: * constant, since it is infrequently changed, and then only when the
1081: * PPS signal is disabled. The watchdog counter pps_valid is updated
1082: * once per second by hardclock() and is atomically cleared in this
1083: * routine.
1084: */
1085: void
1086: hardpps(tvp, usec)
1087: struct timeval *tvp; /* time at PPS */
1088: long usec; /* hardware counter at PPS */
1089: {
1090: long u_usec, v_usec, bigtick;
1091: long cal_sec, cal_usec;
1092:
1093: /*
1094: * An occasional glitch can be produced when the PPS interrupt
1095: * occurs in the hardclock() routine before the time variable is
1096: * updated. Here the offset is discarded when the difference
1097: * between it and the last one is greater than tick/2, but not
1098: * if the interval since the first discard exceeds 30 s.
1099: */
1100: time_status |= STA_PPSSIGNAL;
1101: time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1102: pps_valid = 0;
1103: u_usec = -tvp->tv_usec;
1104: if (u_usec < -500000)
1105: u_usec += 1000000;
1106: v_usec = pps_offset - u_usec;
1107: if (v_usec < 0)
1108: v_usec = -v_usec;
1109: if (v_usec > (tick >> 1)) {
1110: if (pps_glitch > MAXGLITCH) {
1111: pps_glitch = 0;
1112: pps_tf[2] = u_usec;
1113: pps_tf[1] = u_usec;
1114: } else {
1115: pps_glitch++;
1116: u_usec = pps_offset;
1117: }
1118: } else
1119: pps_glitch = 0;
1120:
1121: /*
1122: * A three-stage median filter is used to help deglitch the pps
1123: * time. The median sample becomes the time offset estimate; the
1124: * difference between the other two samples becomes the time
1125: * dispersion (jitter) estimate.
1126: */
1127: pps_tf[2] = pps_tf[1];
1128: pps_tf[1] = pps_tf[0];
1129: pps_tf[0] = u_usec;
1130: if (pps_tf[0] > pps_tf[1]) {
1131: if (pps_tf[1] > pps_tf[2]) {
1132: pps_offset = pps_tf[1]; /* 0 1 2 */
1133: v_usec = pps_tf[0] - pps_tf[2];
1134: } else if (pps_tf[2] > pps_tf[0]) {
1135: pps_offset = pps_tf[0]; /* 2 0 1 */
1136: v_usec = pps_tf[2] - pps_tf[1];
1137: } else {
1138: pps_offset = pps_tf[2]; /* 0 2 1 */
1139: v_usec = pps_tf[0] - pps_tf[1];
1140: }
1141: } else {
1142: if (pps_tf[1] < pps_tf[2]) {
1143: pps_offset = pps_tf[1]; /* 2 1 0 */
1144: v_usec = pps_tf[2] - pps_tf[0];
1145: } else if (pps_tf[2] < pps_tf[0]) {
1146: pps_offset = pps_tf[0]; /* 1 0 2 */
1147: v_usec = pps_tf[1] - pps_tf[2];
1148: } else {
1149: pps_offset = pps_tf[2]; /* 1 2 0 */
1150: v_usec = pps_tf[1] - pps_tf[0];
1151: }
1152: }
1153: if (v_usec > MAXTIME)
1154: pps_jitcnt++;
1155: v_usec = (v_usec << PPS_AVG) - pps_jitter;
1156: if (v_usec < 0)
1157: pps_jitter -= -v_usec >> PPS_AVG;
1158: else
1159: pps_jitter += v_usec >> PPS_AVG;
1160: if (pps_jitter > (MAXTIME >> 1))
1161: time_status |= STA_PPSJITTER;
1162:
1163: /*
1164: * During the calibration interval adjust the starting time when
1165: * the tick overflows. At the end of the interval compute the
1166: * duration of the interval and the difference of the hardware
1167: * counters at the beginning and end of the interval. This code
1168: * is deliciously complicated by the fact valid differences may
1169: * exceed the value of tick when using long calibration
1170: * intervals and small ticks. Note that the counter can be
1171: * greater than tick if caught at just the wrong instant, but
1172: * the values returned and used here are correct.
1173: */
1174: bigtick = (long)tick << SHIFT_USEC;
1175: pps_usec -= pps_freq;
1176: if (pps_usec >= bigtick)
1177: pps_usec -= bigtick;
1178: if (pps_usec < 0)
1179: pps_usec += bigtick;
1180: pps_time.tv_sec++;
1181: pps_count++;
1182: if (pps_count < (1 << pps_shift))
1183: return;
1184: pps_count = 0;
1185: pps_calcnt++;
1186: u_usec = usec << SHIFT_USEC;
1187: v_usec = pps_usec - u_usec;
1188: if (v_usec >= bigtick >> 1)
1189: v_usec -= bigtick;
1190: if (v_usec < -(bigtick >> 1))
1191: v_usec += bigtick;
1192: if (v_usec < 0)
1193: v_usec = -(-v_usec >> pps_shift);
1194: else
1195: v_usec = v_usec >> pps_shift;
1196: pps_usec = u_usec;
1197: cal_sec = tvp->tv_sec;
1198: cal_usec = tvp->tv_usec;
1199: cal_sec -= pps_time.tv_sec;
1200: cal_usec -= pps_time.tv_usec;
1201: if (cal_usec < 0) {
1202: cal_usec += 1000000;
1203: cal_sec--;
1204: }
1205: pps_time = *tvp;
1206:
1207: /*
1208: * Check for lost interrupts, noise, excessive jitter and
1209: * excessive frequency error. The number of timer ticks during
1210: * the interval may vary +-1 tick. Add to this a margin of one
1211: * tick for the PPS signal jitter and maximum frequency
1212: * deviation. If the limits are exceeded, the calibration
1213: * interval is reset to the minimum and we start over.
1214: */
1215: u_usec = (long)tick << 1;
1216: if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec))
1217: || (cal_sec == 0 && cal_usec < u_usec))
1218: || v_usec > time_tolerance || v_usec < -time_tolerance) {
1219: pps_errcnt++;
1220: pps_shift = PPS_SHIFT;
1221: pps_intcnt = 0;
1222: time_status |= STA_PPSERROR;
1223: return;
1224: }
1225:
1226: /*
1227: * A three-stage median filter is used to help deglitch the pps
1228: * frequency. The median sample becomes the frequency offset
1229: * estimate; the difference between the other two samples
1230: * becomes the frequency dispersion (stability) estimate.
1231: */
1232: pps_ff[2] = pps_ff[1];
1233: pps_ff[1] = pps_ff[0];
1234: pps_ff[0] = v_usec;
1235: if (pps_ff[0] > pps_ff[1]) {
1236: if (pps_ff[1] > pps_ff[2]) {
1237: u_usec = pps_ff[1]; /* 0 1 2 */
1238: v_usec = pps_ff[0] - pps_ff[2];
1239: } else if (pps_ff[2] > pps_ff[0]) {
1240: u_usec = pps_ff[0]; /* 2 0 1 */
1241: v_usec = pps_ff[2] - pps_ff[1];
1242: } else {
1243: u_usec = pps_ff[2]; /* 0 2 1 */
1244: v_usec = pps_ff[0] - pps_ff[1];
1245: }
1246: } else {
1247: if (pps_ff[1] < pps_ff[2]) {
1248: u_usec = pps_ff[1]; /* 2 1 0 */
1249: v_usec = pps_ff[2] - pps_ff[0];
1250: } else if (pps_ff[2] < pps_ff[0]) {
1251: u_usec = pps_ff[0]; /* 1 0 2 */
1252: v_usec = pps_ff[1] - pps_ff[2];
1253: } else {
1254: u_usec = pps_ff[2]; /* 1 2 0 */
1255: v_usec = pps_ff[1] - pps_ff[0];
1256: }
1257: }
1258:
1259: /*
1260: * Here the frequency dispersion (stability) is updated. If it
1261: * is less than one-fourth the maximum (MAXFREQ), the frequency
1262: * offset is updated as well, but clamped to the tolerance. It
1263: * will be processed later by the hardclock() routine.
1264: */
1265: v_usec = (v_usec >> 1) - pps_stabil;
1266: if (v_usec < 0)
1267: pps_stabil -= -v_usec >> PPS_AVG;
1268: else
1269: pps_stabil += v_usec >> PPS_AVG;
1270: if (pps_stabil > MAXFREQ >> 2) {
1271: pps_stbcnt++;
1272: time_status |= STA_PPSWANDER;
1273: return;
1274: }
1275: if (time_status & STA_PPSFREQ) {
1276: if (u_usec < 0) {
1277: pps_freq -= -u_usec >> PPS_AVG;
1278: if (pps_freq < -time_tolerance)
1279: pps_freq = -time_tolerance;
1280: u_usec = -u_usec;
1281: } else {
1282: pps_freq += u_usec >> PPS_AVG;
1283: if (pps_freq > time_tolerance)
1284: pps_freq = time_tolerance;
1285: }
1286: }
1287:
1288: /*
1289: * Here the calibration interval is adjusted. If the maximum
1290: * time difference is greater than tick / 4, reduce the interval
1291: * by half. If this is not the case for four consecutive
1292: * intervals, double the interval.
1293: */
1294: if (u_usec << pps_shift > bigtick >> 2) {
1295: pps_intcnt = 0;
1296: if (pps_shift > PPS_SHIFT)
1297: pps_shift--;
1298: } else if (pps_intcnt >= 4) {
1299: pps_intcnt = 0;
1300: if (pps_shift < PPS_SHIFTMAX)
1301: pps_shift++;
1302: } else
1303: pps_intcnt++;
1304: }
1305: #endif /* PPS_SYNC */
1306: #endif /* NTP */
1307:
1.19 cgd 1308:
1309: /*
1310: * Return information about system clocks.
1311: */
1.25 christos 1312: int
1.19 cgd 1313: sysctl_clockrate(where, sizep)
1314: register char *where;
1315: size_t *sizep;
1316: {
1317: struct clockinfo clkinfo;
1318:
1319: /*
1320: * Construct clockinfo structure.
1321: */
1.20 mycroft 1322: clkinfo.tick = tick;
1323: clkinfo.tickadj = tickadj;
1.19 cgd 1324: clkinfo.hz = hz;
1325: clkinfo.profhz = profhz;
1326: clkinfo.stathz = stathz ? stathz : hz;
1327: return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)));
1328: }
1329:
1330: #ifdef DDB
1.21 mycroft 1331: #include <machine/db_machdep.h>
1332:
1.25 christos 1333: #include <ddb/db_interface.h>
1.19 cgd 1334: #include <ddb/db_access.h>
1335: #include <ddb/db_sym.h>
1.25 christos 1336: #include <ddb/db_output.h>
1.19 cgd 1337:
1.25 christos 1338: void db_show_callout(addr, haddr, count, modif)
1339: db_expr_t addr;
1340: int haddr;
1341: db_expr_t count;
1342: char *modif;
1.19 cgd 1343: {
1344: register struct callout *p1;
1345: register int cum;
1346: register int s;
1347: db_expr_t offset;
1348: char *name;
1349:
1.37 cgd 1350: db_printf(" cum ticks arg func\n");
1.19 cgd 1351: s = splhigh();
1352: for (cum = 0, p1 = calltodo.c_next; p1; p1 = p1->c_next) {
1353: register int t = p1->c_time;
1354:
1355: if (t > 0)
1356: cum += t;
1357:
1.21 mycroft 1358: db_find_sym_and_offset((db_addr_t)p1->c_func, &name, &offset);
1.19 cgd 1359: if (name == NULL)
1360: name = "?";
1361:
1.37 cgd 1362: db_printf("%9d %9d %p %s (%p)\n",
1.19 cgd 1363: cum, t, p1->c_arg, name, p1->c_func);
1364: }
1365: splx(s);
1366: }
1367: #endif
CVSweb <webmaster@jp.NetBSD.org>