Annotation of src/sys/kern/sched_4bsd.c, Revision 1.36
1.36 ! ad 1: /* $NetBSD: sched_4bsd.c,v 1.35 2018/09/03 16:29:35 riastradh Exp $ */
1.2 yamt 2:
1.31 maxv 3: /*
1.36 ! ad 4: * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2019
! 5: * The NetBSD Foundation, Inc.
1.2 yamt 6: * All rights reserved.
7: *
8: * This code is derived from software contributed to The NetBSD Foundation
9: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10: * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran, and
11: * Daniel Sieger.
12: *
13: * Redistribution and use in source and binary forms, with or without
14: * modification, are permitted provided that the following conditions
15: * are met:
16: * 1. Redistributions of source code must retain the above copyright
17: * notice, this list of conditions and the following disclaimer.
18: * 2. Redistributions in binary form must reproduce the above copyright
19: * notice, this list of conditions and the following disclaimer in the
20: * documentation and/or other materials provided with the distribution.
21: *
22: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32: * POSSIBILITY OF SUCH DAMAGE.
33: */
34:
1.31 maxv 35: /*
1.2 yamt 36: * Copyright (c) 1982, 1986, 1990, 1991, 1993
37: * The Regents of the University of California. All rights reserved.
38: * (c) UNIX System Laboratories, Inc.
39: * All or some portions of this file are derived from material licensed
40: * to the University of California by American Telephone and Telegraph
41: * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42: * the permission of UNIX System Laboratories, Inc.
43: *
44: * Redistribution and use in source and binary forms, with or without
45: * modification, are permitted provided that the following conditions
46: * are met:
47: * 1. Redistributions of source code must retain the above copyright
48: * notice, this list of conditions and the following disclaimer.
49: * 2. Redistributions in binary form must reproduce the above copyright
50: * notice, this list of conditions and the following disclaimer in the
51: * documentation and/or other materials provided with the distribution.
52: * 3. Neither the name of the University nor the names of its contributors
53: * may be used to endorse or promote products derived from this software
54: * without specific prior written permission.
55: *
56: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66: * SUCH DAMAGE.
67: *
68: * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69: */
70:
71: #include <sys/cdefs.h>
1.36 ! ad 72: __KERNEL_RCSID(0, "$NetBSD: sched_4bsd.c,v 1.35 2018/09/03 16:29:35 riastradh Exp $");
1.2 yamt 73:
74: #include "opt_ddb.h"
75: #include "opt_lockdebug.h"
76:
77: #include <sys/param.h>
78: #include <sys/systm.h>
79: #include <sys/callout.h>
80: #include <sys/cpu.h>
81: #include <sys/proc.h>
82: #include <sys/kernel.h>
83: #include <sys/resourcevar.h>
84: #include <sys/sched.h>
85: #include <sys/sysctl.h>
86: #include <sys/lockdebug.h>
1.5 ad 87: #include <sys/intr.h>
1.2 yamt 88:
89: static void updatepri(struct lwp *);
90: static void resetpriority(struct lwp *);
91:
92: extern unsigned int sched_pstats_ticks; /* defined in kern_synch.c */
93:
94: /* Number of hardclock ticks per sched_tick() */
1.31 maxv 95: static int rrticks __read_mostly;
1.2 yamt 96:
97: /*
98: * Force switch among equal priority processes every 100ms.
99: * Called from hardclock every hz/10 == rrticks hardclock ticks.
100: */
101: /* ARGSUSED */
102: void
103: sched_tick(struct cpu_info *ci)
104: {
105: struct schedstate_percpu *spc = &ci->ci_schedstate;
1.23 ad 106: lwp_t *l;
1.2 yamt 107:
108: spc->spc_ticks = rrticks;
109:
1.15 ad 110: if (CURCPU_IDLE_P()) {
1.36 ! ad 111: atomic_or_uint(&ci->ci_want_resched,
! 112: RESCHED_IDLE | RESCHED_UPREEMPT);
1.7 rmind 113: return;
1.15 ad 114: }
1.23 ad 115: l = ci->ci_data.cpu_onproc;
116: if (l == NULL) {
1.19 yamt 117: return;
118: }
1.36 ! ad 119: /*
! 120: * Can only be spc_lwplock or a turnstile lock at this point
! 121: * (if we interrupted priority inheritance trylock dance).
! 122: */
! 123: KASSERT(l->l_mutex != spc->spc_mutex);
1.23 ad 124: switch (l->l_class) {
125: case SCHED_FIFO:
126: /* No timeslicing for FIFO jobs. */
127: break;
128: case SCHED_RR:
129: /* Force it into mi_switch() to look for other jobs to run. */
1.36 ! ad 130: atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
! 131: cpu_need_resched(ci, l, RESCHED_KPREEMPT);
1.23 ad 132: break;
133: default:
134: if (spc->spc_flags & SPCF_SHOULDYIELD) {
135: /*
136: * Process is stuck in kernel somewhere, probably
1.31 maxv 137: * due to buggy or inefficient code. Force a
1.23 ad 138: * kernel preemption.
139: */
1.36 ! ad 140: atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
! 141: cpu_need_resched(ci, l, RESCHED_KPREEMPT);
1.23 ad 142: } else if (spc->spc_flags & SPCF_SEENRR) {
143: /*
144: * The process has already been through a roundrobin
145: * without switching and may be hogging the CPU.
146: * Indicate that the process should yield.
147: */
148: spc->spc_flags |= SPCF_SHOULDYIELD;
1.36 ! ad 149: cpu_need_resched(ci, l, RESCHED_UPREEMPT);
1.23 ad 150: } else {
151: spc->spc_flags |= SPCF_SEENRR;
152: }
153: break;
154: }
1.2 yamt 155: }
156:
1.8 ad 157: /*
158: * Why PRIO_MAX - 2? From setpriority(2):
159: *
160: * prio is a value in the range -20 to 20. The default priority is
161: * 0; lower priorities cause more favorable scheduling. A value of
162: * 19 or 20 will schedule a process only when nothing at priority <=
163: * 0 is runnable.
164: *
165: * This gives estcpu influence over 18 priority levels, and leaves nice
166: * with 40 levels. One way to think about it is that nice has 20 levels
167: * either side of estcpu's 18.
168: */
1.2 yamt 169: #define ESTCPU_SHIFT 11
1.8 ad 170: #define ESTCPU_MAX ((PRIO_MAX - 2) << ESTCPU_SHIFT)
171: #define ESTCPU_ACCUM (1 << (ESTCPU_SHIFT - 1))
1.35 riastrad 172: #define ESTCPULIM(e) uimin((e), ESTCPU_MAX)
1.2 yamt 173:
174: /*
1.31 maxv 175: * The main parameter used by this algorithm is 'l_estcpu'. It is an estimate
176: * of the recent CPU utilization of the thread.
177: *
178: * l_estcpu is:
179: * - increased each time the hardclock ticks and the thread is found to
180: * be executing, in sched_schedclock() called from hardclock()
181: * - decreased (filtered) on each sched tick, in sched_pstats_hook()
182: * If the lwp is sleeping for more than a second, we don't touch l_estcpu: it
183: * will be updated in sched_setrunnable() when the lwp wakes up, in burst mode
184: * (ie, we decrease it n times).
1.2 yamt 185: *
1.8 ad 186: * Note that hardclock updates l_estcpu and l_cpticks independently.
1.2 yamt 187: *
1.31 maxv 188: * -----------------------------------------------------------------------------
189: *
190: * Here we describe how l_estcpu is decreased.
191: *
192: * Constants for digital decay (filter):
193: * 90% of l_estcpu usage in (5 * loadavg) seconds
194: *
195: * We wish to decay away 90% of l_estcpu in (5 * loadavg) seconds. That is, we
196: * want to compute a value of decay such that the following loop:
197: * for (i = 0; i < (5 * loadavg); i++)
198: * l_estcpu *= decay;
199: * will result in
200: * l_estcpu *= 0.1;
201: * for all values of loadavg.
1.2 yamt 202: *
203: * Mathematically this loop can be expressed by saying:
1.31 maxv 204: * decay ** (5 * loadavg) ~= .1
205: *
206: * And finally, the corresponding value of decay we're using is:
207: * decay = (2 * loadavg) / (2 * loadavg + 1)
1.2 yamt 208: *
1.31 maxv 209: * -----------------------------------------------------------------------------
1.2 yamt 210: *
1.31 maxv 211: * Now, let's prove that the value of decay stated above will always fulfill
212: * the equation:
213: * decay ** (5 * loadavg) ~= .1
1.2 yamt 214: *
215: * If we compute b as:
1.31 maxv 216: * b = 2 * loadavg
1.2 yamt 217: * then
1.31 maxv 218: * decay = b / (b + 1)
1.2 yamt 219: *
220: * We now need to prove two things:
1.31 maxv 221: * 1) Given [factor ** (5 * loadavg) =~ .1], prove [factor == b/(b+1)].
222: * 2) Given [b/(b+1) ** power =~ .1], prove [power == (5 * loadavg)].
1.2 yamt 223: *
224: * Facts:
1.31 maxv 225: * * For x real: exp(x) = 0! + x**1/1! + x**2/2! + ...
226: * Therefore, for x close to zero, exp(x) =~ 1 + x.
227: * In turn, for b large enough, exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
228: *
229: * * For b large enough, (b-1)/b =~ b/(b+1).
230: *
231: * * For x belonging to [-1;1[, ln(1-x) = - x - x**2/2 - x**3/3 - ...
232: * Therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
233: *
234: * * ln(0.1) =~ -2.30
1.2 yamt 235: *
236: * Proof of (1):
1.31 maxv 237: * factor ** (5 * loadavg) =~ 0.1
238: * => ln(factor) =~ -2.30 / (5 * loadavg)
239: * => factor =~ exp(-1 / ((5 / 2.30) * loadavg))
240: * =~ exp(-1 / (2 * loadavg))
241: * =~ exp(-1 / b)
242: * =~ (b - 1) / b
243: * =~ b / (b + 1)
244: * =~ (2 * loadavg) / ((2 * loadavg) + 1)
1.2 yamt 245: *
246: * Proof of (2):
1.31 maxv 247: * (b / (b + 1)) ** power =~ .1
248: * => power * ln(b / (b + 1)) =~ -2.30
249: * => power * (-1 / (b + 1)) =~ -2.30
250: * => power =~ 2.30 * (b + 1)
251: * => power =~ 4.60 * loadavg + 2.30
252: * => power =~ 5 * loadavg
253: *
254: * Conclusion: decay = (2 * loadavg) / (2 * loadavg + 1)
1.2 yamt 255: */
256:
1.31 maxv 257: /* See calculations above */
1.32 maxv 258: #define loadfactor(loadavg) (2 * (loadavg))
1.2 yamt 259:
1.17 yamt 260: static fixpt_t
1.2 yamt 261: decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
262: {
263:
264: if (estcpu == 0) {
265: return 0;
266: }
267:
268: #if !defined(_LP64)
269: /* avoid 64bit arithmetics. */
270: #define FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
271: if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
272: return estcpu * loadfac / (loadfac + FSCALE);
273: }
1.31 maxv 274: #endif
1.2 yamt 275:
276: return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
277: }
278:
279: static fixpt_t
280: decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
281: {
282:
1.31 maxv 283: /*
284: * For all load averages >= 1 and max l_estcpu of (255 << ESTCPU_SHIFT),
285: * if we slept for at least seven times the loadfactor, we will decay
286: * l_estcpu to less than (1 << ESTCPU_SHIFT), and therefore we can
287: * return zero directly.
288: *
289: * Note that our ESTCPU_MAX is actually much smaller than
290: * (255 << ESTCPU_SHIFT).
291: */
1.2 yamt 292: if ((n << FSHIFT) >= 7 * loadfac) {
293: return 0;
294: }
295:
296: while (estcpu != 0 && n > 1) {
297: estcpu = decay_cpu(loadfac, estcpu);
298: n--;
299: }
300:
301: return estcpu;
302: }
303:
304: /*
305: * sched_pstats_hook:
306: *
307: * Periodically called from sched_pstats(); used to recalculate priorities.
308: */
309: void
1.22 rmind 310: sched_pstats_hook(struct lwp *l, int batch)
1.2 yamt 311: {
1.25 yamt 312: fixpt_t loadfac;
1.2 yamt 313:
1.8 ad 314: /*
315: * If the LWP has slept an entire second, stop recalculating
316: * its priority until it wakes up.
317: */
1.24 rmind 318: KASSERT(lwp_locked(l, NULL));
1.25 yamt 319: if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
320: l->l_stat == LSSUSPENDED) {
321: if (l->l_slptime > 1) {
322: return;
323: }
1.8 ad 324: }
1.33 maxv 325:
326: loadfac = loadfactor(averunnable.ldavg[0]);
1.25 yamt 327: l->l_estcpu = decay_cpu(loadfac, l->l_estcpu);
328: resetpriority(l);
1.2 yamt 329: }
330:
331: /*
1.31 maxv 332: * Recalculate the priority of an LWP after it has slept for a while.
1.2 yamt 333: */
334: static void
335: updatepri(struct lwp *l)
336: {
337: fixpt_t loadfac;
338:
1.3 ad 339: KASSERT(lwp_locked(l, NULL));
1.2 yamt 340: KASSERT(l->l_slptime > 1);
341:
342: loadfac = loadfactor(averunnable.ldavg[0]);
343:
344: l->l_slptime--; /* the first time was done in sched_pstats */
1.8 ad 345: l->l_estcpu = decay_cpu_batch(loadfac, l->l_estcpu, l->l_slptime);
1.2 yamt 346: resetpriority(l);
347: }
348:
349: void
1.14 matt 350: sched_rqinit(void)
1.2 yamt 351: {
352:
353: }
354:
355: void
356: sched_setrunnable(struct lwp *l)
357: {
358:
359: if (l->l_slptime > 1)
360: updatepri(l);
361: }
362:
363: void
1.8 ad 364: sched_nice(struct proc *p, int n)
1.2 yamt 365: {
1.8 ad 366: struct lwp *l;
367:
1.20 ad 368: KASSERT(mutex_owned(p->p_lock));
1.2 yamt 369:
1.8 ad 370: p->p_nice = n;
371: LIST_FOREACH(l, &p->p_lwps, l_sibling) {
372: lwp_lock(l);
373: resetpriority(l);
374: lwp_unlock(l);
375: }
1.2 yamt 376: }
377:
378: /*
1.8 ad 379: * Recompute the priority of an LWP. Arrange to reschedule if
380: * the resulting priority is better than that of the current LWP.
1.2 yamt 381: */
382: static void
383: resetpriority(struct lwp *l)
384: {
1.8 ad 385: pri_t pri;
1.2 yamt 386: struct proc *p = l->l_proc;
387:
1.8 ad 388: KASSERT(lwp_locked(l, NULL));
1.2 yamt 389:
1.8 ad 390: if (l->l_class != SCHED_OTHER)
1.2 yamt 391: return;
392:
1.8 ad 393: /* See comments above ESTCPU_SHIFT definition. */
394: pri = (PRI_KERNEL - 1) - (l->l_estcpu >> ESTCPU_SHIFT) - p->p_nice;
395: pri = imax(pri, 0);
396: if (pri != l->l_priority)
397: lwp_changepri(l, pri);
1.2 yamt 398: }
399:
400: /*
1.28 yamt 401: * We adjust the priority of the current LWP. The priority of a LWP
1.8 ad 402: * gets worse as it accumulates CPU time. The CPU usage estimator (l_estcpu)
1.28 yamt 403: * is increased here. The formula for computing priorities will compute a
404: * different value each time l_estcpu increases. This can cause a switch,
405: * but unless the priority crosses a PPQ boundary the actual queue will not
406: * change. The CPU usage estimator ramps up quite quickly when the process
407: * is running (linearly), and decays away exponentially, at a rate which is
408: * proportionally slower when the system is busy. The basic principle is
409: * that the system will 90% forget that the process used a lot of CPU time
1.31 maxv 410: * in (5 * loadavg) seconds. This causes the system to favor processes which
1.28 yamt 411: * haven't run much recently, and to round-robin among other processes.
1.2 yamt 412: */
413: void
414: sched_schedclock(struct lwp *l)
415: {
1.8 ad 416:
417: if (l->l_class != SCHED_OTHER)
418: return;
1.2 yamt 419:
420: KASSERT(!CURCPU_IDLE_P());
1.8 ad 421: l->l_estcpu = ESTCPULIM(l->l_estcpu + ESTCPU_ACCUM);
1.2 yamt 422: lwp_lock(l);
423: resetpriority(l);
424: lwp_unlock(l);
425: }
426:
427: /*
428: * sched_proc_fork:
429: *
430: * Inherit the parent's scheduler history.
431: */
432: void
433: sched_proc_fork(struct proc *parent, struct proc *child)
434: {
1.8 ad 435: lwp_t *pl;
1.2 yamt 436:
1.20 ad 437: KASSERT(mutex_owned(parent->p_lock));
1.2 yamt 438:
1.8 ad 439: pl = LIST_FIRST(&parent->p_lwps);
440: child->p_estcpu_inherited = pl->l_estcpu;
1.2 yamt 441: child->p_forktime = sched_pstats_ticks;
442: }
443:
444: /*
445: * sched_proc_exit:
446: *
447: * Chargeback parents for the sins of their children.
448: */
449: void
450: sched_proc_exit(struct proc *parent, struct proc *child)
451: {
452: fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
453: fixpt_t estcpu;
1.8 ad 454: lwp_t *pl, *cl;
1.2 yamt 455:
456: /* XXX Only if parent != init?? */
457:
1.20 ad 458: mutex_enter(parent->p_lock);
1.8 ad 459: pl = LIST_FIRST(&parent->p_lwps);
460: cl = LIST_FIRST(&child->p_lwps);
1.2 yamt 461: estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
462: sched_pstats_ticks - child->p_forktime);
1.8 ad 463: if (cl->l_estcpu > estcpu) {
464: lwp_lock(pl);
465: pl->l_estcpu = ESTCPULIM(pl->l_estcpu + cl->l_estcpu - estcpu);
466: lwp_unlock(pl);
467: }
1.20 ad 468: mutex_exit(parent->p_lock);
1.2 yamt 469: }
470:
471: void
1.6 rmind 472: sched_wakeup(struct lwp *l)
473: {
474:
475: }
476:
477: void
478: sched_slept(struct lwp *l)
479: {
480:
481: }
482:
1.2 yamt 483: void
1.8 ad 484: sched_lwp_fork(struct lwp *l1, struct lwp *l2)
1.2 yamt 485: {
486:
1.8 ad 487: l2->l_estcpu = l1->l_estcpu;
1.2 yamt 488: }
489:
490: void
1.8 ad 491: sched_lwp_collect(struct lwp *t)
492: {
493: lwp_t *l;
494:
495: /* Absorb estcpu value of collected LWP. */
496: l = curlwp;
497: lwp_lock(l);
498: l->l_estcpu += t->l_estcpu;
499: lwp_unlock(l);
500: }
501:
1.16 ad 502: void
503: sched_oncpu(lwp_t *l)
504: {
505:
506: }
507:
508: void
509: sched_newts(lwp_t *l)
510: {
511:
512: }
513:
1.5 ad 514: /*
1.12 rmind 515: * Sysctl nodes and initialization.
1.5 ad 516: */
1.12 rmind 517:
518: static int
519: sysctl_sched_rtts(SYSCTLFN_ARGS)
520: {
521: struct sysctlnode node;
522: int rttsms = hztoms(rrticks);
523:
524: node = *rnode;
525: node.sysctl_data = &rttsms;
526: return sysctl_lookup(SYSCTLFN_CALL(&node));
527: }
528:
1.16 ad 529: SYSCTL_SETUP(sysctl_sched_4bsd_setup, "sysctl sched setup")
1.2 yamt 530: {
531: const struct sysctlnode *node = NULL;
532:
533: sysctl_createv(clog, 0, NULL, &node,
534: CTLFLAG_PERMANENT,
535: CTLTYPE_NODE, "sched",
536: SYSCTL_DESCR("Scheduler options"),
537: NULL, 0, NULL, 0,
538: CTL_KERN, CTL_CREATE, CTL_EOL);
539:
1.16 ad 540: if (node == NULL)
541: return;
1.5 ad 542:
1.16 ad 543: rrticks = hz / 10;
544:
545: sysctl_createv(NULL, 0, &node, NULL,
1.5 ad 546: CTLFLAG_PERMANENT,
547: CTLTYPE_STRING, "name", NULL,
548: NULL, 0, __UNCONST("4.4BSD"), 0,
549: CTL_CREATE, CTL_EOL);
1.16 ad 550: sysctl_createv(NULL, 0, &node, NULL,
1.12 rmind 551: CTLFLAG_PERMANENT,
552: CTLTYPE_INT, "rtts",
1.30 maxv 553: SYSCTL_DESCR("Round-robin time quantum (in milliseconds)"),
1.12 rmind 554: sysctl_sched_rtts, 0, NULL, 0,
555: CTL_CREATE, CTL_EOL);
1.2 yamt 556: }
CVSweb <webmaster@jp.NetBSD.org>