Annotation of src/sys/arch/i386/i386/machdep.c, Revision 1.493
1.493 ! mycroft 1: /* $NetBSD: machdep.c,v 1.492 2002/10/20 10:35:41 kanaoka Exp $ */
1.231 thorpej 2:
3: /*-
1.401 thorpej 4: * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
1.231 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.316 mycroft 8: * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.231 thorpej 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.125 cgd 39:
1.1 cgd 40: /*-
41: * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
42: * All rights reserved.
43: *
44: * This code is derived from software contributed to Berkeley by
45: * William Jolitz.
46: *
47: * Redistribution and use in source and binary forms, with or without
48: * modification, are permitted provided that the following conditions
49: * are met:
50: * 1. Redistributions of source code must retain the above copyright
51: * notice, this list of conditions and the following disclaimer.
52: * 2. Redistributions in binary form must reproduce the above copyright
53: * notice, this list of conditions and the following disclaimer in the
54: * documentation and/or other materials provided with the distribution.
55: * 3. All advertising materials mentioning features or use of this software
56: * must display the following acknowledgement:
57: * This product includes software developed by the University of
58: * California, Berkeley and its contributors.
59: * 4. Neither the name of the University nor the names of its contributors
60: * may be used to endorse or promote products derived from this software
61: * without specific prior written permission.
62: *
63: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73: * SUCH DAMAGE.
74: *
1.125 cgd 75: * @(#)machdep.c 7.4 (Berkeley) 6/3/91
1.1 cgd 76: */
1.460 lukem 77:
78: #include <sys/cdefs.h>
1.493 ! mycroft 79: __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.492 2002/10/20 10:35:41 kanaoka Exp $");
1.271 thorpej 80:
81: #include "opt_cputype.h"
1.309 jonathan 82: #include "opt_ddb.h"
1.377 ws 83: #include "opt_ipkdb.h"
1.443 lukem 84: #include "opt_kgdb.h"
1.272 thorpej 85: #include "opt_vm86.h"
1.274 thorpej 86: #include "opt_user_ldt.h"
1.310 jonathan 87: #include "opt_compat_netbsd.h"
1.327 bouyer 88: #include "opt_cpureset_delay.h"
1.333 christos 89: #include "opt_compat_svr4.h"
1.429 chs 90: #include "opt_realmem.h"
1.447 christos 91: #include "opt_compat_mach.h" /* need to get the right segment def */
1.455 fvdl 92: #include "opt_mtrr.h"
1.1 cgd 93:
1.59 mycroft 94: #include <sys/param.h>
95: #include <sys/systm.h>
96: #include <sys/signalvar.h>
97: #include <sys/kernel.h>
98: #include <sys/proc.h>
99: #include <sys/user.h>
100: #include <sys/exec.h>
101: #include <sys/buf.h>
102: #include <sys/reboot.h>
103: #include <sys/conf.h>
104: #include <sys/file.h>
105: #include <sys/malloc.h>
106: #include <sys/mbuf.h>
107: #include <sys/msgbuf.h>
108: #include <sys/mount.h>
109: #include <sys/vnode.h>
1.204 thorpej 110: #include <sys/extent.h>
1.123 cgd 111: #include <sys/syscallargs.h>
1.291 thorpej 112: #include <sys/core.h>
113: #include <sys/kcore.h>
114: #include <machine/kcore.h>
1.57 cgd 115:
1.377 ws 116: #ifdef IPKDB
117: #include <ipkdb/ipkdb.h>
118: #endif
119:
1.235 thorpej 120: #ifdef KGDB
121: #include <sys/kgdb.h>
122: #endif
123:
1.104 cgd 124: #include <dev/cons.h>
1.390 mrg 125:
126: #include <uvm/uvm_extern.h>
1.393 fvdl 127: #include <uvm/uvm_page.h>
1.284 mrg 128:
1.200 christos 129: #include <sys/sysctl.h>
130:
1.59 mycroft 131: #include <machine/cpu.h>
132: #include <machine/cpufunc.h>
1.484 fvdl 133: #include <machine/cpuvar.h>
1.178 mycroft 134: #include <machine/gdt.h>
1.149 mycroft 135: #include <machine/pio.h>
1.59 mycroft 136: #include <machine/psl.h>
137: #include <machine/reg.h>
138: #include <machine/specialreg.h>
1.255 drochner 139: #include <machine/bootinfo.h>
1.455 fvdl 140: #include <machine/mtrr.h>
1.43 brezak 141:
1.146 cgd 142: #include <dev/isa/isareg.h>
1.372 drochner 143: #include <machine/isa_machdep.h>
1.164 cgd 144: #include <dev/ic/i8042reg.h>
1.43 brezak 145:
1.200 christos 146: #ifdef DDB
147: #include <machine/db_machdep.h>
148: #include <ddb/db_extern.h>
149: #endif
150:
1.184 mycroft 151: #ifdef VM86
152: #include <machine/vm86.h>
153: #endif
154:
1.473 tshiozak 155: #include "acpi.h"
1.207 jtk 156: #include "apm.h"
1.258 jtk 157: #include "bioscall.h"
1.207 jtk 158:
1.259 jtk 159: #if NBIOSCALL > 0
160: #include <machine/bioscall.h>
161: #endif
162:
1.473 tshiozak 163: #if NACPI > 0
164: #include <dev/acpi/acpivar.h>
165: #define ACPI_MACHDEP_PRIVATE
166: #include <machine/acpi_machdep.h>
167: #endif
168:
1.207 jtk 169: #if NAPM > 0
170: #include <machine/apmvar.h>
1.258 jtk 171: #endif
172:
1.59 mycroft 173: #include "isa.h"
1.231 thorpej 174: #include "isadma.h"
1.59 mycroft 175: #include "npx.h"
1.2 cgd 176:
1.384 jdolecek 177: #include "mca.h"
178: #if NMCA > 0
179: #include <machine/mca_machdep.h> /* for mca_busprobe() */
180: #endif
181:
1.484 fvdl 182: #ifdef MULTIPROCESSOR /* XXX */
183: #include <machine/mpbiosvar.h> /* XXX */
184: #endif /* XXX */
185:
1.104 cgd 186: /* the following is used externally (sysctl_hw) */
187: char machine[] = "i386"; /* cpu "architecture" */
1.232 veego 188: char machine_arch[] = "i386"; /* machine == machine_arch */
1.104 cgd 189:
1.484 fvdl 190: volatile u_int32_t ipending;
1.471 matt 191:
1.484 fvdl 192: int imasks[NIPL];
193: int iunmask[NIPL];
1.471 matt 194:
1.402 explorer 195: u_int cpu_serial[3];
196:
1.255 drochner 197: char bootinfo[BOOTINFO_MAXSIZE];
1.386 thorpej 198:
1.484 fvdl 199: /* Our exported CPU info; we have only one right now. */
1.386 thorpej 200: struct cpu_info cpu_info_store;
1.255 drochner 201:
1.343 fvdl 202: struct bi_devmatch *i386_alldisks = NULL;
203: int i386_ndisks = 0;
1.342 fvdl 204:
1.328 bouyer 205: #ifdef CPURESET_DELAY
206: int cpureset_delay = CPURESET_DELAY;
207: #else
208: int cpureset_delay = 2000; /* default to 2s */
209: #endif
210:
1.455 fvdl 211: #ifdef MTRR
212: struct mtrr_funcs *mtrr_funcs;
213: #endif
214:
1.1 cgd 215:
1.59 mycroft 216: int physmem;
1.163 cgd 217: int dumpmem_low;
218: int dumpmem_high;
1.59 mycroft 219: int cpu_class;
1.428 fvdl 220: int i386_fpu_present;
221: int i386_fpu_exception;
222: int i386_fpu_fdivbug;
1.59 mycroft 223:
1.451 thorpej 224: int i386_use_fxsave;
225: int i386_has_sse;
226: int i386_has_sse2;
1.450 thorpej 227:
1.461 christos 228: int tmx86_has_longrun;
229:
1.457 thorpej 230: #define CPUID2FAMILY(cpuid) (((cpuid) >> 8) & 15)
1.379 jdolecek 231: #define CPUID2MODEL(cpuid) (((cpuid) >> 4) & 15)
1.457 thorpej 232: #define CPUID2STEPPING(cpuid) ((cpuid) & 15)
1.379 jdolecek 233:
1.314 thorpej 234: vaddr_t msgbuf_vaddr;
235: paddr_t msgbuf_paddr;
236:
237: vaddr_t idt_vaddr;
238: paddr_t idt_paddr;
239:
1.264 mycroft 240: #ifdef I586_CPU
1.314 thorpej 241: vaddr_t pentium_idt_vaddr;
1.264 mycroft 242: #endif
1.59 mycroft 243:
1.444 chs 244: struct vm_map *exec_map = NULL;
245: struct vm_map *mb_map = NULL;
246: struct vm_map *phys_map = NULL;
1.48 brezak 247:
1.314 thorpej 248: extern paddr_t avail_start, avail_end;
1.1 cgd 249:
1.484 fvdl 250: void (*delay_func) __P((int)) = i8254_delay;
251: void (*microtime_func) __P((struct timeval *)) = i8254_microtime;
252: void (*initclock_func) __P((void)) = i8254_initclocks;
253:
1.204 thorpej 254: /*
1.291 thorpej 255: * Size of memory segments, before any memory is stolen.
256: */
257: phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
258: int mem_cluster_cnt;
259:
260: int cpu_dump __P((void));
261: int cpu_dumpsize __P((void));
262: u_long cpu_dump_mempagecnt __P((void));
1.200 christos 263: void dumpsys __P((void));
1.314 thorpej 264: void init386 __P((paddr_t));
1.484 fvdl 265: void initgdt __P((union descriptor *));
1.255 drochner 266:
1.433 kanaoka 267: #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
268: void add_mem_cluster __P((u_int64_t, u_int64_t, u_int32_t));
269: #endif /* !defnied(REALBASEMEM) && !defined(REALEXTMEM) */
270:
1.484 fvdl 271: static const struct i386_cache_info
272: intel_cpuid_cache_info[] = {
273: { CAI_ITLB, 0x01, 4, 32, 4 * 1024 },
274: { CAI_ITLB2, 0x02, 0xff, 2, 4 * 1024 * 1024 },
275: { CAI_DTLB, 0x03, 4, 64, 4 * 1024 },
276: { CAI_DTLB2, 0x04, 4, 8, 4 * 1024 * 1024 },
277: { CAI_ITLB, 0x50, 0xff, 64, 4 * 1024, "4K/4M: 64 entries" },
278: { CAI_ITLB, 0x51, 0xff, 64, 4 * 1024, "4K/4M: 128 entries" },
279: { CAI_ITLB, 0x52, 0xff, 64, 4 * 1024, "4K/4M: 256 entries" },
280: { CAI_DTLB, 0x5b, 0xff, 64, 4 * 1024, "4K/4M: 64 entries" },
281: { CAI_DTLB, 0x5c, 0xff, 64, 4 * 1024, "4K/4M: 128 entries" },
282: { CAI_DTLB, 0x5d, 0xff, 64, 4 * 1024, "4K/4M: 256 entries" },
283:
284: { CAI_ICACHE, 0x06, 4, 8 * 1024, 32 },
285: { CAI_ICACHE, 0x08, 4, 16 * 1024, 32 },
286: { CAI_DCACHE, 0x0a, 2, 8 * 1024, 32 },
287: { CAI_DCACHE, 0x0c, 4, 16 * 1024, 32 },
288: { CAI_L2CACHE, 0x40, 0, 0, 0, "not present" },
289: { CAI_L2CACHE, 0x41, 4, 128 * 1024, 32 },
290: { CAI_L2CACHE, 0x42, 4, 256 * 1024, 32 },
291: { CAI_L2CACHE, 0x43, 4, 512 * 1024, 32 },
292: { CAI_L2CACHE, 0x44, 4, 1 * 1024 * 1024, 32 },
293: { CAI_L2CACHE, 0x45, 4, 2 * 1024 * 1024, 32 },
294: { CAI_DCACHE, 0x66, 4, 8 * 1024, 64 },
295: { CAI_DCACHE, 0x67, 4, 16 * 1024, 64 },
296: { CAI_DCACHE, 0x68, 4, 32 * 1024, 64 },
297: { CAI_ICACHE, 0x70, 8, 12 * 1024, 64, "12K uOp cache"},
298: { CAI_ICACHE, 0x71, 8, 16 * 1024, 64, "16K uOp cache"},
299: { CAI_ICACHE, 0x72, 8, 32 * 1024, 64, "32K uOp cache"},
300: { CAI_L2CACHE, 0x79, 8, 128 * 1024, 64 },
301: { CAI_L2CACHE, 0x7a, 8, 256 * 1024, 64 },
302: { CAI_L2CACHE, 0x7b, 8, 512 * 1024, 64 },
303: { CAI_L2CACHE, 0x7c, 8, 1 * 1024 * 1024, 64 },
304: { CAI_L2CACHE, 0x82, 8, 256 * 1024, 32 },
1.491 yamt 305: { CAI_L2CACHE, 0x83, 8, 512 * 1024, 32 },
1.484 fvdl 306: { CAI_L2CACHE, 0x84, 8, 1 * 1024 * 1024, 32 },
307: { CAI_L2CACHE, 0x85, 8, 2 * 1024 * 1024, 32 },
308: { 0, 0, 0, 0, 0 },
309: };
310:
311: static const struct i386_cache_info *
312: cache_info_lookup(const struct i386_cache_info *cai, u_int8_t desc);
313:
1.417 jdolecek 314: /*
315: * Map Brand ID from cpuid instruction to brand name.
316: * Source: Intel Processor Identification and the CPUID Instruction, AP-485
317: */
1.484 fvdl 318: static const char * const i386_intel_brand[] = {
319: "", /* Unsupported */
320: "Celeron", /* Intel (R) Celeron (TM) processor */
321: "Pentium III", /* Intel (R) Pentium (R) III processor */
322: "Pentium III Xeon", /* Intel (R) Pentium (R) III Xeon (TM) processor */
323: "", "", "", /* Reserved */
324: "Pentium 4" /* Intel (R) Pentium (R) 4 processor */
1.417 jdolecek 325: };
326:
1.487 junyoung 327: /*
328: * AMD processors don't have Brand IDs, so we need these names for probe.
329: */
330: static const char * const amd_brand[] = {
331: "",
332: "Duron", /* AMD Duron(tm) */
333: "MP", /* AMD Athlon(tm) MP */
334: "XP", /* AMD Athlon(tm) XP */
335: "4" /* AMD Athlon(tm) 4 */
336: };
337:
338: static char amd_brand_name[48];
339:
1.200 christos 340: #ifdef COMPAT_NOMID
341: static int exec_nomid __P((struct proc *, struct exec_package *));
342: #endif
1.59 mycroft 343:
1.484 fvdl 344: void cyrix6x86_cpu_setup __P((struct cpu_info *));
345: void winchip_cpu_setup __P((struct cpu_info *));
346: void amd_family5_setup __P((struct cpu_info *));
347: void transmeta_cpu_setup __P((struct cpu_info *));
1.267 bouyer 348:
1.487 junyoung 349: static void amd_family6_probe __P((struct cpu_info *));
350:
1.484 fvdl 351: static void transmeta_cpu_info __P((struct cpu_info *));
352: static void amd_cpuid_cpu_cacheinfo __P((struct cpu_info *));
1.437 thorpej 353:
1.267 bouyer 354: static __inline u_char
355: cyrix_read_reg(u_char reg)
356: {
357: outb(0x22, reg);
358: return inb(0x23);
359: }
360:
361: static __inline void
362: cyrix_write_reg(u_char reg, u_char data)
363: {
364: outb(0x22, reg);
365: outb(0x23, data);
366: }
367:
1.484 fvdl 368: static char *
369: print_cache_config(struct cpu_info *ci, int cache_tag, char *name, char *sep)
370: {
371: char cbuf[7];
372: struct i386_cache_info *cai = &ci->ci_cinfo[cache_tag];
373:
374: if (cai->cai_totalsize == 0)
375: return sep;
376:
377: if (sep == NULL)
378: printf("%s: ", ci->ci_dev->dv_xname);
379: else
380: printf("%s", sep);
381: if (name != NULL)
382: printf("%s ", name);
383:
384: if (cai->cai_string != NULL) {
385: printf("%s ", cai->cai_string);
386: } else {
387: format_bytes(cbuf, sizeof(cbuf), cai->cai_totalsize);
388: printf("%s %db/line ", cbuf, cai->cai_linesize);
389: }
390: switch (cai->cai_associativity) {
391: case 0:
392: printf("disabled");
393: break;
394: case 1:
395: printf("direct-mapped");
396: break;
397: case 0xff:
398: printf("fully associative");
399: break;
400: default:
401: printf("%d-way", cai->cai_associativity);
402: break;
403: }
404: return ", ";
405: }
406:
407: static char *
408: print_tlb_config(struct cpu_info *ci, int cache_tag, char *name, char *sep)
409: {
410: char cbuf[7];
411: struct i386_cache_info *cai = &ci->ci_cinfo[cache_tag];
412:
413: if (cai->cai_totalsize == 0)
414: return sep;
415:
416: if (sep == NULL)
417: printf("%s: ", ci->ci_dev->dv_xname);
418: else
419: printf("%s", sep);
420: if (name != NULL)
421: printf("%s ", name);
422:
423: if (cai->cai_string != NULL) {
424: printf("%s", cai->cai_string);
425: } else {
426: format_bytes(cbuf, sizeof(cbuf), cai->cai_linesize);
427: printf("%d %s entries ", cai->cai_totalsize, cbuf);
428: switch (cai->cai_associativity) {
429: case 0:
430: printf("disabled");
431: break;
432: case 1:
433: printf("direct-mapped");
434: break;
435: case 0xff:
436: printf("fully associative");
437: break;
438: default:
439: printf("%d-way", cai->cai_associativity);
440: break;
441: }
442: }
443: return ", ";
444: }
445:
1.59 mycroft 446: /*
447: * Machine-dependent startup code
448: */
1.32 andrew 449: void
1.1 cgd 450: cpu_startup()
451: {
1.484 fvdl 452: #if 0
1.437 thorpej 453: struct cpu_info *ci = curcpu();
1.484 fvdl 454: #endif
1.59 mycroft 455: caddr_t v;
1.349 thorpej 456: int sz, x;
1.314 thorpej 457: vaddr_t minaddr, maxaddr;
458: vsize_t size;
1.354 lukem 459: char pbuf[9];
1.484 fvdl 460: #if 0
1.440 thorpej 461: int bigcache, cachesize;
1.484 fvdl 462: #endif
1.1 cgd 463:
1.284 mrg 464: /*
465: * Initialize error message buffer (et end of core).
466: */
1.354 lukem 467: msgbuf_vaddr = uvm_km_valloc(kernel_map, i386_round_page(MSGBUFSIZE));
1.385 thorpej 468: if (msgbuf_vaddr == 0)
1.284 mrg 469: panic("failed to valloc msgbuf_vaddr");
1.359 thorpej 470:
1.284 mrg 471: /* msgbuf_paddr was init'd in pmap */
472: for (x = 0; x < btoc(MSGBUFSIZE); x++)
1.414 thorpej 473: pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE,
474: msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
1.456 chris 475: pmap_update(pmap_kernel());
1.359 thorpej 476:
1.284 mrg 477: initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
478:
1.392 sommerfe 479: printf("%s", version);
1.393 fvdl 480:
1.484 fvdl 481: #if 0
1.455 fvdl 482: #ifdef MTRR
483: if (cpu_feature & CPUID_MTRR) {
484: mtrr_funcs = &i686_mtrr_funcs;
485: i686_mtrr_init_first();
486: mtrr_init_cpu(ci);
1.457 thorpej 487: } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
488: /*
489: * Must be a K6-2 Step >= 7 or a K6-III.
490: */
491: if (CPUID2FAMILY(cpu_id) == 5) {
492: if (CPUID2MODEL(cpu_id) > 8 ||
493: (CPUID2MODEL(cpu_id) == 8 &&
494: CPUID2STEPPING(cpu_id) >= 7)) {
495: mtrr_funcs = &k6_mtrr_funcs;
496: k6_mtrr_init_first();
497: mtrr_init_cpu(ci);
498: }
499: }
1.455 fvdl 500: }
501: #endif
1.484 fvdl 502: #endif
503:
504: #ifdef TRAPLOG
505: /*
506: * Enable recording of branch from/to in MSR's
507: */
508: wrmsr(MSR_DEBUGCTLMSR, 0x1);
509: #endif
510:
1.382 mycroft 511: format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
1.354 lukem 512: printf("total memory = %s\n", pbuf);
1.1 cgd 513:
514: /*
1.59 mycroft 515: * Find out how much space we need, allocate it,
516: * and then give everything true virtual addresses.
1.1 cgd 517: */
1.354 lukem 518: sz = (int)allocsys(NULL, NULL);
1.284 mrg 519: if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
520: panic("startup: no room for tables");
1.354 lukem 521: if (allocsys(v, NULL) - v != sz)
1.1 cgd 522: panic("startup: table size inconsistency");
1.50 cgd 523:
1.36 cgd 524: /*
1.284 mrg 525: * Allocate virtual address space for the buffers. The area
526: * is not managed by the VM system.
1.36 cgd 527: */
528: size = MAXBSIZE * nbuf;
1.314 thorpej 529: if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
1.398 thorpej 530: NULL, UVM_UNKNOWN_OFFSET, 0,
1.284 mrg 531: UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1.430 chs 532: UVM_ADV_NORMAL, 0)) != 0)
1.284 mrg 533: panic("cpu_startup: cannot allocate VM for buffers");
1.314 thorpej 534: minaddr = (vaddr_t)buffers;
1.54 cgd 535: if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
536: /* don't want to alloc more physical mem than needed */
537: bufpages = btoc(MAXBSIZE) * nbuf;
538: }
1.36 cgd 539:
1.268 thorpej 540: /*
541: * XXX We defer allocation of physical pages for buffers until
542: * XXX after autoconfiguration has run. We must do this because
543: * XXX on system with large amounts of memory or with large
544: * XXX user-configured buffer caches, the buffer cache will eat
545: * XXX up all of the lower 16M of RAM. This prevents ISA DMA
546: * XXX maps from allocating bounce pages.
547: *
548: * XXX Note that nothing can use buffer cache buffers until after
549: * XXX autoconfiguration completes!!
550: *
551: * XXX This is a hack, and needs to be replaced with a better
552: * XXX solution! --thorpej@netbsd.org, December 6, 1997
553: */
1.41 cgd 554:
1.1 cgd 555: /*
1.36 cgd 556: * Allocate a submap for exec arguments. This map effectively
557: * limits the number of processes exec'ing at any time.
1.1 cgd 558: */
1.284 mrg 559: exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
1.358 thorpej 560: 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
1.59 mycroft 561:
1.1 cgd 562: /*
563: * Allocate a submap for physio
564: */
1.284 mrg 565: phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
1.358 thorpej 566: VM_PHYS_SIZE, 0, FALSE, NULL);
1.1 cgd 567:
568: /*
1.229 thorpej 569: * Finally, allocate mbuf cluster submap.
1.1 cgd 570: */
1.334 thorpej 571: mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
1.358 thorpej 572: nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);
1.1 cgd 573:
1.284 mrg 574: /*
575: * XXX Buffer cache pages haven't yet been allocated, so
576: * XXX we need to account for those pages when printing
577: * XXX the amount of free memory.
578: */
1.354 lukem 579: format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free - bufpages));
580: printf("avail memory = %s\n", pbuf);
1.414 thorpej 581: format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
1.484 fvdl 582: printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
1.1 cgd 583:
1.375 drochner 584: /* Safe for i/o port / memory space allocation to use malloc now. */
585: i386_bus_space_mallocok();
1.349 thorpej 586: }
587:
588: /*
589: * Set up proc0's TSS and LDT.
590: */
591: void
592: i386_proc0_tss_ldt_init()
593: {
594: struct pcb *pcb;
595: int x;
1.268 thorpej 596:
1.326 thorpej 597: gdt_init();
1.484 fvdl 598:
599: cpu_info_primary.ci_curpcb = pcb = &proc0.p_addr->u_pcb;
600:
1.326 thorpej 601: pcb->pcb_tss.tss_ioopt =
602: ((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss) << 16;
1.484 fvdl 603:
1.326 thorpej 604: for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
605: pcb->pcb_iomap[x] = 0xffffffff;
606:
1.394 thorpej 607: pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
1.326 thorpej 608: pcb->pcb_cr0 = rcr0();
609: pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
610: pcb->pcb_tss.tss_esp0 = (int)proc0.p_addr + USPACE - 16;
1.484 fvdl 611: proc0.p_md.md_regs = (struct trapframe *)pcb->pcb_tss.tss_esp0 - 1;
612: proc0.p_md.md_tss_sel = tss_alloc(pcb);
1.326 thorpej 613:
1.394 thorpej 614: ltr(proc0.p_md.md_tss_sel);
1.326 thorpej 615: lldt(pcb->pcb_ldt_sel);
1.484 fvdl 616: }
617:
618: /*
619: * Set up TSS and LDT for a new PCB.
620: */
621:
622: void
623: i386_init_pcb_tss_ldt(ci)
624: struct cpu_info *ci;
625: {
626: int x;
627: struct pcb *pcb = ci->ci_idle_pcb;
1.326 thorpej 628:
1.484 fvdl 629: pcb->pcb_tss.tss_ioopt =
630: ((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss) << 16;
631: for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
632: pcb->pcb_iomap[x] = 0xffffffff;
633:
634: pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
635: pcb->pcb_cr0 = rcr0();
636:
637: ci->ci_idle_tss_sel = tss_alloc(pcb);
1.326 thorpej 638: }
639:
640: /*
641: * XXX Finish up the deferred buffer cache allocation and initialization.
642: */
643: void
644: i386_bufinit()
645: {
1.484 fvdl 646: int i, base, residual;
1.326 thorpej 647:
1.268 thorpej 648: base = bufpages / nbuf;
649: residual = bufpages % nbuf;
650: for (i = 0; i < nbuf; i++) {
1.314 thorpej 651: vsize_t curbufsize;
652: vaddr_t curbuf;
1.284 mrg 653: struct vm_page *pg;
654:
655: /*
656: * Each buffer has MAXBSIZE bytes of VM space allocated. Of
657: * that MAXBSIZE space, we allocate and map (base+1) pages
658: * for the first "residual" buffers, and then we allocate
659: * "base" pages for the rest.
660: */
1.314 thorpej 661: curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
1.414 thorpej 662: curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
1.284 mrg 663:
664: while (curbufsize) {
1.311 thorpej 665: /*
666: * Attempt to allocate buffers from the first
667: * 16M of RAM to avoid bouncing file system
668: * transfers.
669: */
1.350 chs 670: pg = uvm_pagealloc_strat(NULL, 0, NULL, 0,
1.311 thorpej 671: UVM_PGA_STRAT_FALLBACK, VM_FREELIST_FIRST16);
1.284 mrg 672: if (pg == NULL)
673: panic("cpu_startup: not enough memory for "
674: "buffer cache");
1.434 thorpej 675: pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
676: VM_PROT_READ|VM_PROT_WRITE);
1.284 mrg 677: curbuf += PAGE_SIZE;
678: curbufsize -= PAGE_SIZE;
679: }
1.268 thorpej 680: }
1.456 chris 681: pmap_update(pmap_kernel());
1.268 thorpej 682:
683: /*
684: * Set up buffers, so they can be used to read disk labels.
685: */
686: bufinit();
1.16 cgd 687: }
688:
1.484 fvdl 689: /*
1.104 cgd 690: * Info for CTL_HW
691: */
692: char cpu_model[120];
693:
1.216 fvdl 694: /*
695: * Note: these are just the ones that may not have a cpuid instruction.
696: * We deal with the rest in a different way.
697: */
1.418 jdolecek 698: const struct cpu_nocpuid_nameclass i386_nocpuid_cpus[] = {
1.267 bouyer 699: { CPUVENDOR_INTEL, "Intel", "386SX", CPUCLASS_386,
1.437 thorpej 700: NULL, NULL}, /* CPU_386SX */
1.267 bouyer 701: { CPUVENDOR_INTEL, "Intel", "386DX", CPUCLASS_386,
1.437 thorpej 702: NULL, NULL}, /* CPU_386 */
1.267 bouyer 703: { CPUVENDOR_INTEL, "Intel", "486SX", CPUCLASS_486,
1.437 thorpej 704: NULL, NULL}, /* CPU_486SX */
1.267 bouyer 705: { CPUVENDOR_INTEL, "Intel", "486DX", CPUCLASS_486,
1.437 thorpej 706: NULL, NULL}, /* CPU_486 */
1.267 bouyer 707: { CPUVENDOR_CYRIX, "Cyrix", "486DLC", CPUCLASS_486,
1.437 thorpej 708: NULL, NULL}, /* CPU_486DLC */
709: { CPUVENDOR_CYRIX, "Cyrix", "6x86", CPUCLASS_486,
710: cyrix6x86_cpu_setup, NULL}, /* CPU_6x86 */
1.267 bouyer 711: { CPUVENDOR_NEXGEN,"NexGen","586", CPUCLASS_386,
1.437 thorpej 712: NULL, NULL}, /* CPU_NX586 */
1.216 fvdl 713: };
714:
715: const char *classnames[] = {
716: "386",
717: "486",
718: "586",
719: "686"
720: };
721:
722: const char *modifiers[] = {
723: "",
1.454 enami 724: "OverDrive",
725: "Dual",
1.216 fvdl 726: ""
1.18 cgd 727: };
728:
1.418 jdolecek 729: const struct cpu_cpuid_nameclass i386_cpuid_cpus[] = {
1.216 fvdl 730: {
731: "GenuineIntel",
732: CPUVENDOR_INTEL,
733: "Intel",
734: /* Family 4 */
735: { {
1.484 fvdl 736: CPUCLASS_486,
1.216 fvdl 737: {
1.219 perry 738: "486DX", "486DX", "486SX", "486DX2", "486SL",
1.216 fvdl 739: "486SX2", 0, "486DX2 W/B Enhanced",
740: "486DX4", 0, 0, 0, 0, 0, 0, 0,
741: "486" /* Default */
1.267 bouyer 742: },
1.437 thorpej 743: NULL,
1.484 fvdl 744: NULL,
1.464 christos 745: NULL,
1.216 fvdl 746: },
747: /* Family 5 */
748: {
749: CPUCLASS_586,
750: {
1.361 tron 751: "Pentium (P5 A-step)", "Pentium (P5)",
1.484 fvdl 752: "Pentium (P54C)", "Pentium (P24T)",
1.361 tron 753: "Pentium/MMX", "Pentium", 0,
754: "Pentium (P54C)", "Pentium/MMX (Tillamook)",
755: 0, 0, 0, 0, 0, 0, 0,
1.216 fvdl 756: "Pentium" /* Default */
1.267 bouyer 757: },
1.437 thorpej 758: NULL,
1.484 fvdl 759: NULL,
1.464 christos 760: NULL,
1.216 fvdl 761: },
762: /* Family 6 */
763: {
764: CPUCLASS_686,
765: {
1.361 tron 766: "Pentium Pro (A-step)", "Pentium Pro", 0,
767: "Pentium II (Klamath)", "Pentium Pro",
1.416 jdolecek 768: "Pentium II/Celeron (Deschutes)",
769: "Celeron (Mendocino)",
770: "Pentium III (Katmai)",
771: "Pentium III (Coppermine)",
1.458 jdolecek 772: 0, "Pentium III (Cascades)",
773: "Pentium III (Tualatin)", 0, 0, 0, 0,
1.340 fvdl 774: "Pentium Pro, II or III" /* Default */
1.267 bouyer 775: },
1.437 thorpej 776: NULL,
1.484 fvdl 777: NULL,
1.464 christos 778: NULL,
1.406 fvdl 779: },
780: /* Family > 6 */
781: {
782: CPUCLASS_686,
783: {
1.459 jdolecek 784: 0, 0, 0, 0, 0, 0, 0, 0,
785: 0, 0, 0, 0, 0, 0, 0, 0,
1.406 fvdl 786: "Pentium 4" /* Default */
787: },
1.437 thorpej 788: NULL,
1.484 fvdl 789: NULL,
1.464 christos 790: NULL,
1.216 fvdl 791: } }
792: },
793: {
794: "AuthenticAMD",
795: CPUVENDOR_AMD,
796: "AMD",
797: /* Family 4 */
798: { {
1.484 fvdl 799: CPUCLASS_486,
1.216 fvdl 800: {
801: 0, 0, 0, "Am486DX2 W/T",
802: 0, 0, 0, "Am486DX2 W/B",
803: "Am486DX4 W/T or Am5x86 W/T 150",
804: "Am486DX4 W/B or Am5x86 W/B 150", 0, 0,
805: 0, 0, "Am5x86 W/T 133/160",
806: "Am5x86 W/B 133/160",
807: "Am486 or Am5x86" /* Default */
808: },
1.437 thorpej 809: NULL,
810: NULL,
1.464 christos 811: NULL,
1.216 fvdl 812: },
813: /* Family 5 */
814: {
815: CPUCLASS_586,
816: {
1.241 fvdl 817: "K5", "K5", "K5", "K5", 0, 0, "K6",
1.416 jdolecek 818: "K6", "K6-2", "K6-III", 0, 0, 0,
819: "K6-2+/III+", 0, 0,
1.267 bouyer 820: "K5 or K6" /* Default */
1.216 fvdl 821: },
1.441 thorpej 822: amd_family5_setup,
1.439 thorpej 823: amd_cpuid_cpu_cacheinfo,
1.464 christos 824: NULL,
1.216 fvdl 825: },
1.363 fvdl 826: /* Family 6 */
1.216 fvdl 827: {
828: CPUCLASS_686,
829: {
1.416 jdolecek 830: 0, "Athlon Model 1", "Athlon Model 2",
831: "Duron", "Athlon Model 4 (Thunderbird)",
1.487 junyoung 832: 0, "Athlon", "Duron", "Athlon", 0, 0, 0,
1.465 drochner 833: 0, 0, 0, 0,
1.362 fvdl 834: "K7 (Athlon)" /* Default */
1.216 fvdl 835: },
1.437 thorpej 836: NULL,
1.487 junyoung 837: amd_family6_probe,
1.439 thorpej 838: amd_cpuid_cpu_cacheinfo,
1.406 fvdl 839: },
840: /* Family > 6 */
841: {
842: CPUCLASS_686,
843: {
1.459 jdolecek 844: 0, 0, 0, 0, 0, 0, 0, 0,
845: 0, 0, 0, 0, 0, 0, 0, 0,
1.406 fvdl 846: "Unknown K7 (Athlon)" /* Default */
847: },
1.437 thorpej 848: NULL,
1.439 thorpej 849: amd_cpuid_cpu_cacheinfo,
1.464 christos 850: NULL,
1.216 fvdl 851: } }
852: },
853: {
854: "CyrixInstead",
855: CPUVENDOR_CYRIX,
856: "Cyrix",
857: /* Family 4 */
858: { {
859: CPUCLASS_486,
860: {
1.376 minoura 861: 0, 0, 0,
862: "MediaGX",
1.459 jdolecek 863: 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.216 fvdl 864: "486" /* Default */
865: },
1.484 fvdl 866: cyrix6x86_cpu_setup, /* XXX ?? */
1.437 thorpej 867: NULL,
1.464 christos 868: NULL,
1.216 fvdl 869: },
870: /* Family 5 */
871: {
872: CPUCLASS_586,
873: {
1.376 minoura 874: 0, 0, "6x86", 0,
875: "MMX-enhanced MediaGX (GXm)", /* or Geode? */
876: 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.216 fvdl 877: "6x86" /* Default */
1.267 bouyer 878: },
1.437 thorpej 879: cyrix6x86_cpu_setup,
880: NULL,
1.464 christos 881: NULL,
1.216 fvdl 882: },
1.278 bouyer 883: /* Family 6 */
1.216 fvdl 884: {
885: CPUCLASS_686,
886: {
1.459 jdolecek 887: "6x86MX", 0, 0, 0, 0, 0, 0, 0,
888: 0, 0, 0, 0, 0, 0, 0, 0,
1.278 bouyer 889: "6x86MX" /* Default */
1.329 bad 890: },
1.437 thorpej 891: cyrix6x86_cpu_setup,
892: NULL,
1.464 christos 893: NULL,
1.406 fvdl 894: },
895: /* Family > 6 */
896: {
897: CPUCLASS_686,
898: {
1.459 jdolecek 899: 0, 0, 0, 0, 0, 0, 0, 0,
900: 0, 0, 0, 0, 0, 0, 0, 0,
1.406 fvdl 901: "Unknown 6x86MX" /* Default */
902: },
1.437 thorpej 903: NULL,
904: NULL,
1.464 christos 905: NULL,
1.329 bad 906: } }
907: },
908: {
909: "CentaurHauls",
910: CPUVENDOR_IDT,
911: "IDT",
912: /* Family 4, IDT never had any of these */
913: { {
1.484 fvdl 914: CPUCLASS_486,
1.329 bad 915: {
1.459 jdolecek 916: 0, 0, 0, 0, 0, 0, 0, 0,
917: 0, 0, 0, 0, 0, 0, 0, 0,
1.329 bad 918: "486 compatible" /* Default */
919: },
1.437 thorpej 920: NULL,
921: NULL,
1.464 christos 922: NULL,
1.329 bad 923: },
924: /* Family 5 */
925: {
926: CPUCLASS_586,
927: {
928: 0, 0, 0, 0, "WinChip C6", 0, 0, 0,
1.379 jdolecek 929: "WinChip 2", "WinChip 3", 0, 0, 0, 0, 0, 0,
1.329 bad 930: "WinChip" /* Default */
931: },
1.437 thorpej 932: winchip_cpu_setup,
933: NULL,
1.464 christos 934: NULL,
1.329 bad 935: },
936: /* Family 6, not yet available from IDT */
1.406 fvdl 937: {
938: CPUCLASS_686,
939: {
1.459 jdolecek 940: 0, 0, 0, 0, 0, 0, 0, 0,
941: 0, 0, 0, 0, 0, 0, 0, 0,
1.406 fvdl 942: "Pentium Pro compatible" /* Default */
943: },
1.437 thorpej 944: NULL,
945: NULL,
1.464 christos 946: NULL,
1.406 fvdl 947: },
948: /* Family > 6, not yet available from IDT */
1.329 bad 949: {
950: CPUCLASS_686,
951: {
1.459 jdolecek 952: 0, 0, 0, 0, 0, 0, 0, 0,
953: 0, 0, 0, 0, 0, 0, 0, 0,
1.329 bad 954: "Pentium Pro compatible" /* Default */
1.267 bouyer 955: },
1.437 thorpej 956: NULL,
957: NULL,
1.464 christos 958: NULL,
1.216 fvdl 959: } }
1.461 christos 960: },
961: {
962: "GenuineTMx86",
963: CPUVENDOR_TRANSMETA,
964: "Transmeta",
965: /* Family 4, Transmeta never had any of these */
966: { {
1.484 fvdl 967: CPUCLASS_486,
1.461 christos 968: {
1.462 enami 969: 0, 0, 0, 0, 0, 0, 0, 0,
970: 0, 0, 0, 0, 0, 0, 0, 0,
1.461 christos 971: "486 compatible" /* Default */
972: },
973: NULL,
974: NULL,
1.464 christos 975: NULL,
1.461 christos 976: },
977: /* Family 5 */
978: {
979: CPUCLASS_586,
980: {
1.462 enami 981: 0, 0, 0, 0, 0, 0, 0, 0,
982: 0, 0, 0, 0, 0, 0, 0, 0,
1.461 christos 983: "Crusoe" /* Default */
984: },
985: transmeta_cpu_setup,
986: NULL,
1.464 christos 987: transmeta_cpu_info,
1.461 christos 988: },
989: /* Family 6, not yet available from Transmeta */
990: {
991: CPUCLASS_686,
992: {
1.462 enami 993: 0, 0, 0, 0, 0, 0, 0, 0,
994: 0, 0, 0, 0, 0, 0, 0, 0,
1.461 christos 995: "Pentium Pro compatible" /* Default */
996: },
997: NULL,
998: NULL,
1.464 christos 999: NULL,
1.461 christos 1000: },
1001: /* Family > 6, not yet available from Transmeta */
1002: {
1003: CPUCLASS_686,
1004: {
1.462 enami 1005: 0, 0, 0, 0, 0, 0, 0, 0,
1006: 0, 0, 0, 0, 0, 0, 0, 0,
1.461 christos 1007: "Pentium Pro compatible" /* Default */
1008: },
1009: NULL,
1010: NULL,
1.464 christos 1011: NULL,
1.461 christos 1012: } }
1.216 fvdl 1013: }
1014: };
1015:
1.484 fvdl 1016: void
1017: cyrix6x86_cpu_setup(ci)
1018: struct cpu_info *ci;
1.437 thorpej 1019: {
1.484 fvdl 1020: /*
1021: * i8254 latch check routine:
1022: * National Geode (formerly Cyrix MediaGX) has a serious bug in
1023: * its built-in i8254-compatible clock module.
1024: * Set the variable 'clock_broken_latch' to indicate it.
1025: */
1.437 thorpej 1026:
1.484 fvdl 1027: extern int clock_broken_latch;
1.437 thorpej 1028:
1.484 fvdl 1029: switch (ci->ci_signature) {
1030: case 0x440: /* Cyrix MediaGX */
1031: case 0x540: /* GXm */
1032: clock_broken_latch = 1;
1033: break;
1034: }
1.437 thorpej 1035:
1.267 bouyer 1036: /* set up various cyrix registers */
1037: /* Enable suspend on halt */
1038: cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
1039: /* enable access to ccr4/ccr5 */
1040: cyrix_write_reg(0xC3, cyrix_read_reg(0xC3) | 0x10);
1041: /* cyrix's workaround for the "coma bug" */
1042: cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
1043: cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
1044: cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
1045: cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
1046: /* disable access to ccr4/ccr5 */
1047: cyrix_write_reg(0xC3, cyrix_read_reg(0xC3) & ~0x10);
1.393 fvdl 1048:
1.484 fvdl 1049: /*
1.393 fvdl 1050: * XXX disable page zero in the idle loop, it seems to
1051: * cause panics on these CPUs.
1052: */
1053: vm_page_zero_enable = FALSE;
1.267 bouyer 1054: }
1055:
1056: void
1.484 fvdl 1057: winchip_cpu_setup(ci)
1058: struct cpu_info *ci;
1.379 jdolecek 1059: {
1.380 jdolecek 1060: #if defined(I586_CPU)
1.484 fvdl 1061: switch (CPUID2MODEL(ci->ci_signature)) { /* model */
1.379 jdolecek 1062: case 4: /* WinChip C6 */
1063: cpu_feature &= ~CPUID_TSC;
1064: printf("WARNING: WinChip C6: broken TSC disabled\n");
1065: }
1066: #endif
1.441 thorpej 1067: }
1068:
1.484 fvdl 1069: #define CPUID(code, eax, ebx, ecx, edx) \
1070: asm ("cpuid" \
1071: : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
1072: : "a" (code));
1073:
1074: static void
1075: cpu_probe_base_features(struct cpu_info *ci)
1076: {
1077: const struct i386_cache_info *cai;
1078: u_int descs[4];
1079: int iterations, i, j;
1080: u_int8_t desc;
1081: u_int32_t dummy1, dummy2, miscbytes;
1082:
1083: if (ci->ci_cpuid_level < 0)
1084: return;
1085:
1086: CPUID(0, ci->ci_cpuid_level,
1087: ci->ci_vendor[0],
1088: ci->ci_vendor[2],
1089: ci->ci_vendor[1]);
1090: ci->ci_vendor[3] = 0;
1091:
1092: if (ci->ci_cpuid_level < 1)
1093: return;
1094:
1095: CPUID(1, ci->ci_signature, miscbytes, dummy1, ci->ci_feature_flags);
1096:
1097: /* Brand is low order 8 bits of ebx */
1098: ci->ci_brand_id = miscbytes & 0xff;
1099:
1100: /* CLFLUSH line size is next 8 bits */
1101: if (ci->ci_feature_flags & CPUID_CFLUSH)
1102: ci->ci_cflush_lsize = ((miscbytes >> 8) & 0xff) << 3;
1103:
1104: if (ci->ci_cpuid_level < 2)
1105: return;
1106:
1107: /*
1108: * Parse the cache info from `cpuid', if we have it.
1109: * XXX This is kinda ugly, but hey, so is the architecture...
1110: */
1111:
1112: CPUID(2, descs[0], descs[1], descs[2], descs[3]);
1113:
1114: iterations = descs[0] & 0xff;
1115: while (iterations-- > 0) {
1116: for (i = 0; i < 4; i++) {
1117: if (descs[i] & 0x80000000)
1118: continue;
1119: for (j = 0; j < 4; j++) {
1120: if (i == 0 && j == 0)
1121: continue;
1122: desc = (descs[i] >> (j * 8)) & 0xff;
1123: if (desc == 0)
1124: continue;
1125: cai = cache_info_lookup(intel_cpuid_cache_info,
1126: desc);
1127: if (cai != NULL)
1128: ci->ci_cinfo[cai->cai_index] = *cai;
1129: }
1130: }
1131: CPUID(2, descs[0], descs[1], descs[2], descs[3]);
1132: }
1133:
1134: if (ci->ci_cpuid_level < 3)
1135: return;
1136:
1137: /*
1138: * If the processor serial number misfeature is present and supported,
1139: * extract it here.
1140: */
1141: if ((ci->ci_feature_flags & CPUID_PN) != 0)
1142: {
1143: ci->ci_cpu_serial[0] = ci->ci_signature;
1144: CPUID(3, dummy1, dummy2,
1145: ci->ci_cpu_serial[2],
1146: ci->ci_cpu_serial[1]);
1147: }
1148: }
1149:
1150: void
1151: cpu_probe_features(struct cpu_info *ci)
1152: {
1153: const struct cpu_cpuid_nameclass *cpup = NULL;
1154: int i, max, family;
1155:
1156: cpu_probe_base_features(ci);
1157:
1158: if (ci->ci_cpuid_level < 1)
1159: return;
1160:
1161: max = sizeof (i386_cpuid_cpus) / sizeof (i386_cpuid_cpus[0]);
1162: for (i = 0; i < max; i++) {
1163: if (!strncmp((char *)ci->ci_vendor,
1164: i386_cpuid_cpus[i].cpu_id, 12)) {
1165: cpup = &i386_cpuid_cpus[i];
1166: break;
1167: }
1168: }
1169:
1170: if (cpup == NULL)
1171: return;
1172:
1173: family = (ci->ci_signature >> 8) & 0xf;
1174:
1175: if (family > CPU_MAXFAMILY) {
1176: family = CPU_MAXFAMILY;
1177: }
1178: i = family - CPU_MINFAMILY;
1179:
1180: if (cpup->cpu_family[i].cpu_probe == NULL)
1181: return;
1182:
1183: (*cpup->cpu_family[i].cpu_probe)(ci);
1184: }
1185:
1.487 junyoung 1186: void
1187: amd_family6_probe(struct cpu_info *ci)
1188: {
1189: u_int32_t eax;
1190: u_int32_t dummy1, dummy2, dummy3;
1191: u_int32_t brand[12];
1192: char *p;
1193: int i;
1194:
1195: CPUID(0x80000000, eax, dummy1, dummy2, dummy3);
1196: if (eax < 0x80000004)
1197: return;
1198:
1199: CPUID(0x80000002, brand[0], brand[1], brand[2], brand[3]);
1200: CPUID(0x80000003, brand[4], brand[5], brand[6], brand[7]);
1201: CPUID(0x80000004, brand[8], brand[9], brand[10], brand[11]);
1202:
1203: for (i = 1; i < sizeof(amd_brand) / sizeof(amd_brand[0]); i++)
1204: if ((p = strstr((char *)brand, amd_brand[i])) != NULL) {
1205: ci->ci_brand_id = i;
1206: strcpy(amd_brand_name, p);
1207: break;
1208: }
1209: }
1.484 fvdl 1210:
1.441 thorpej 1211: void
1.484 fvdl 1212: amd_family5_setup(struct cpu_info *ci)
1.441 thorpej 1213: {
1214:
1.484 fvdl 1215: switch (CPUID2MODEL(ci->ci_signature)) {
1.441 thorpej 1216: case 0: /* AMD-K5 Model 0 */
1217: /*
1218: * According to the AMD Processor Recognition App Note,
1219: * the AMD-K5 Model 0 uses the wrong bit to indicate
1220: * support for global PTEs, instead using bit 9 (APIC)
1221: * rather than bit 13 (i.e. "0x200" vs. 0x2000". Oops!).
1222: */
1223: if (cpu_feature & CPUID_APIC)
1224: cpu_feature = (cpu_feature & ~CPUID_APIC) | CPUID_PGE;
1225: /*
1226: * XXX But pmap_pg_g is already initialized -- need to kick
1227: * XXX the pmap somehow. How does the MP branch do this?
1228: */
1229: break;
1230: }
1.379 jdolecek 1231: }
1232:
1.461 christos 1233: /*
1234: * Transmeta Crusoe LongRun Support by Tamotsu Hattori.
1235: * Port from FreeBSD-current(August, 2001) to NetBSD by tshiozak.
1236: */
1237:
1.462 enami 1238: #define MSR_TMx86_LONGRUN 0x80868010
1239: #define MSR_TMx86_LONGRUN_FLAGS 0x80868011
1.461 christos 1240:
1.467 christos 1241: #define LONGRUN_MODE_MASK(x) ((x) & 0x0000007f)
1.462 enami 1242: #define LONGRUN_MODE_RESERVED(x) ((x) & 0xffffff80)
1243: #define LONGRUN_MODE_WRITE(x, y) (LONGRUN_MODE_RESERVED(x) | \
1244: LONGRUN_MODE_MASK(y))
1245:
1246: #define LONGRUN_MODE_MINFREQUENCY 0x00
1247: #define LONGRUN_MODE_ECONOMY 0x01
1248: #define LONGRUN_MODE_PERFORMANCE 0x02
1249: #define LONGRUN_MODE_MAXFREQUENCY 0x03
1250: #define LONGRUN_MODE_UNKNOWN 0x04
1251: #define LONGRUN_MODE_MAX 0x04
1.461 christos 1252:
1253: union msrinfo {
1254: u_int64_t msr;
1255: u_int32_t regs[2];
1256: };
1257:
1258: u_int32_t longrun_modes[LONGRUN_MODE_MAX][3] = {
1259: /* MSR low, MSR high, flags bit0 */
1260: { 0, 0, 0}, /* LONGRUN_MODE_MINFREQUENCY */
1261: { 0, 100, 0}, /* LONGRUN_MODE_ECONOMY */
1262: { 0, 100, 1}, /* LONGRUN_MODE_PERFORMANCE */
1263: { 100, 100, 1}, /* LONGRUN_MODE_MAXFREQUENCY */
1264: };
1265:
1.484 fvdl 1266: static u_int
1.461 christos 1267: tmx86_get_longrun_mode(void)
1268: {
1269: u_long eflags;
1270: union msrinfo msrinfo;
1271: u_int low, high, flags, mode;
1272:
1273: eflags = read_eflags();
1274: disable_intr();
1275:
1276: msrinfo.msr = rdmsr(MSR_TMx86_LONGRUN);
1277: low = LONGRUN_MODE_MASK(msrinfo.regs[0]);
1278: high = LONGRUN_MODE_MASK(msrinfo.regs[1]);
1279: flags = rdmsr(MSR_TMx86_LONGRUN_FLAGS) & 0x01;
1280:
1281: for (mode = 0; mode < LONGRUN_MODE_MAX; mode++) {
1282: if (low == longrun_modes[mode][0] &&
1283: high == longrun_modes[mode][1] &&
1284: flags == longrun_modes[mode][2]) {
1285: goto out;
1286: }
1287: }
1288: mode = LONGRUN_MODE_UNKNOWN;
1289: out:
1290: write_eflags(eflags);
1291: return (mode);
1292: }
1293:
1.484 fvdl 1294: static u_int
1.462 enami 1295: tmx86_get_longrun_status(u_int *frequency, u_int *voltage, u_int *percentage)
1.461 christos 1296: {
1297: u_long eflags;
1.484 fvdl 1298: u_int eax, ebx, ecx, edx;
1.461 christos 1299:
1300: eflags = read_eflags();
1301: disable_intr();
1302:
1.484 fvdl 1303: CPUID(0x80860007, eax, ebx, ecx, edx);
1304: *frequency = eax;
1305: *voltage = ebx;
1306: *percentage = ecx;
1.461 christos 1307:
1308: write_eflags(eflags);
1309: return (1);
1310: }
1311:
1.484 fvdl 1312: static u_int
1.461 christos 1313: tmx86_set_longrun_mode(u_int mode)
1314: {
1315: u_long eflags;
1316: union msrinfo msrinfo;
1317:
1318: if (mode >= LONGRUN_MODE_UNKNOWN) {
1319: return (0);
1320: }
1321:
1322: eflags = read_eflags();
1323: disable_intr();
1324:
1325: /* Write LongRun mode values to Model Specific Register. */
1326: msrinfo.msr = rdmsr(MSR_TMx86_LONGRUN);
1327: msrinfo.regs[0] = LONGRUN_MODE_WRITE(msrinfo.regs[0],
1.462 enami 1328: longrun_modes[mode][0]);
1.461 christos 1329: msrinfo.regs[1] = LONGRUN_MODE_WRITE(msrinfo.regs[1],
1.462 enami 1330: longrun_modes[mode][1]);
1.461 christos 1331: wrmsr(MSR_TMx86_LONGRUN, msrinfo.msr);
1332:
1333: /* Write LongRun mode flags to Model Specific Register. */
1334: msrinfo.msr = rdmsr(MSR_TMx86_LONGRUN_FLAGS);
1335: msrinfo.regs[0] = (msrinfo.regs[0] & ~0x01) | longrun_modes[mode][2];
1336: wrmsr(MSR_TMx86_LONGRUN_FLAGS, msrinfo.msr);
1337:
1338: write_eflags(eflags);
1339: return (1);
1340: }
1341:
1342: static u_int crusoe_longrun;
1343: static u_int crusoe_frequency;
1344: static u_int crusoe_voltage;
1345: static u_int crusoe_percentage;
1346:
1347: static void
1348: tmx86_get_longrun_status_all(void)
1349: {
1.462 enami 1350:
1.461 christos 1351: tmx86_get_longrun_status(&crusoe_frequency,
1.462 enami 1352: &crusoe_voltage, &crusoe_percentage);
1.461 christos 1353: }
1354:
1355:
1356: static void
1.484 fvdl 1357: transmeta_cpu_info(struct cpu_info *ci)
1.461 christos 1358: {
1.484 fvdl 1359: u_int eax, ebx, ecx, edx, nreg = 0;
1.461 christos 1360:
1.484 fvdl 1361: CPUID(0x80860000, eax, ebx, ecx, edx);
1362: nreg = eax;
1.461 christos 1363: if (nreg >= 0x80860001) {
1.484 fvdl 1364: CPUID(0x80860001, eax, ebx, ecx, edx);
1365: printf("%s: Processor revision %u.%u.%u.%u\n",
1366: ci->ci_dev->dv_xname,
1367: (ebx >> 24) & 0xff,
1368: (ebx >> 16) & 0xff,
1369: (ebx >> 8) & 0xff,
1370: ebx & 0xff);
1.461 christos 1371: }
1372: if (nreg >= 0x80860002) {
1.484 fvdl 1373: CPUID(0x80860002, eax, ebx, ecx, edx);
1374: printf("%s: Code Morphing Software Rev: %u.%u.%u-%u-%u\n",
1375: ci->ci_dev->dv_xname, (ebx >> 24) & 0xff,
1376: (ebx >> 16) & 0xff,
1377: (ebx >> 8) & 0xff,
1378: ebx & 0xff,
1379: ecx);
1.461 christos 1380: }
1381: if (nreg >= 0x80860006) {
1.484 fvdl 1382: union {
1383: char text[65];
1384: struct
1385: {
1386: u_int eax;
1387: u_int ebx;
1388: u_int ecx;
1389: u_int edx;
1390: } regs[4];
1391: } info;
1392: int i;
1393:
1394: for (i=0; i<4; i++) {
1395: CPUID(0x80860003 + i,
1396: info.regs[i].eax, info.regs[i].ebx,
1397: info.regs[i].ecx, info.regs[i].edx);
1398: }
1399: info.text[64] = 0;
1400: printf("%s: %s\n", ci->ci_dev->dv_xname, info.text);
1.461 christos 1401: }
1402:
1.493 ! mycroft 1403: if (nreg >= 0x80860007) {
! 1404: crusoe_longrun = tmx86_get_longrun_mode();
! 1405: tmx86_get_longrun_status(&crusoe_frequency,
! 1406: &crusoe_voltage, &crusoe_percentage);
! 1407: printf("%s: LongRun mode: %d <%dMHz %dmV %d%%>\n",
! 1408: ci->ci_dev->dv_xname,
! 1409: crusoe_longrun, crusoe_frequency, crusoe_voltage,
! 1410: crusoe_percentage);
! 1411: }
1.461 christos 1412: }
1413:
1414: void
1.484 fvdl 1415: transmeta_cpu_setup(struct cpu_info *ci)
1.461 christos 1416: {
1.493 ! mycroft 1417: u_int regs[4], nreg = 0;
1.462 enami 1418:
1.493 ! mycroft 1419: do_cpuid(0x80860000, regs);
! 1420: nreg = regs[0];
! 1421: if (nreg >= 0x80860007)
! 1422: tmx86_has_longrun = 1;
1.461 christos 1423: }
1424:
1425:
1426: /* ---------------------------------------------------------------------- */
1427:
1.437 thorpej 1428: static const struct i386_cache_info *
1.484 fvdl 1429: cache_info_lookup(const struct i386_cache_info *cai, u_int8_t desc)
1.397 thorpej 1430: {
1.438 thorpej 1431: int i;
1.397 thorpej 1432:
1.484 fvdl 1433: for (i = 0; cai[i].cai_desc != 0; i++) {
1.438 thorpej 1434: if (cai[i].cai_desc == desc)
1435: return (&cai[i]);
1.437 thorpej 1436: }
1437:
1438: return (NULL);
1.397 thorpej 1439: }
1440:
1.438 thorpej 1441: /*
1442: * AMD Cache Info:
1443: *
1444: * Athlon, Duron:
1445: *
1446: * Function 8000.0005 L1 TLB/Cache Information
1447: * EAX -- L1 TLB 2/4MB pages
1448: * EBX -- L1 TLB 4K pages
1449: * ECX -- L1 D-cache
1450: * EDX -- L1 I-cache
1451: *
1452: * Function 8000.0006 L2 TLB/Cache Information
1453: * EAX -- L2 TLB 2/4MB pages
1454: * EBX -- L2 TLB 4K pages
1455: * ECX -- L2 Unified cache
1456: * EDX -- reserved
1457: *
1458: * K5, K6:
1459: *
1460: * Function 8000.0005 L1 TLB/Cache Information
1461: * EAX -- reserved
1462: * EBX -- TLB 4K pages
1463: * ECX -- L1 D-cache
1464: * EDX -- L1 I-cache
1465: *
1466: * K6-III:
1467: *
1468: * Function 8000.0006 L2 Cache Information
1469: * EAX -- reserved
1470: * EBX -- reserved
1471: * ECX -- L2 Unified cache
1472: * EDX -- reserved
1473: */
1474:
1475: /* L1 TLB 2/4MB pages */
1476: #define AMD_L1_EAX_DTLB_ASSOC(x) (((x) >> 24) & 0xff)
1477: #define AMD_L1_EAX_DTLB_ENTRIES(x) (((x) >> 16) & 0xff)
1478: #define AMD_L1_EAX_ITLB_ASSOC(x) (((x) >> 8) & 0xff)
1479: #define AMD_L1_EAX_ITLB_ENTRIES(x) ( (x) & 0xff)
1480:
1481: /* L1 TLB 4K pages */
1482: #define AMD_L1_EBX_DTLB_ASSOC(x) (((x) >> 24) & 0xff)
1483: #define AMD_L1_EBX_DTLB_ENTRIES(x) (((x) >> 16) & 0xff)
1484: #define AMD_L1_EBX_ITLB_ASSOC(x) (((x) >> 8) & 0xff)
1485: #define AMD_L1_EBX_ITLB_ENTRIES(x) ( (x) & 0xff)
1486:
1487: /* L1 Data Cache */
1488: #define AMD_L1_ECX_DC_SIZE(x) ((((x) >> 24) & 0xff) * 1024)
1489: #define AMD_L1_ECX_DC_ASSOC(x) (((x) >> 16) & 0xff)
1490: #define AMD_L1_ECX_DC_LPT(x) (((x) >> 8) & 0xff)
1491: #define AMD_L1_ECX_DC_LS(x) ( (x) & 0xff)
1492:
1493: /* L1 Instruction Cache */
1494: #define AMD_L1_EDX_IC_SIZE(x) ((((x) >> 24) & 0xff) * 1024)
1495: #define AMD_L1_EDX_IC_ASSOC(x) (((x) >> 16) & 0xff)
1496: #define AMD_L1_EDX_IC_LPT(x) (((x) >> 8) & 0xff)
1497: #define AMD_L1_EDX_IC_LS(x) ( (x) & 0xff)
1498:
1499: /* Note for L2 TLB -- if the upper 16 bits are 0, it is a unified TLB */
1500:
1501: /* L2 TLB 2/4MB pages */
1502: #define AMD_L2_EAX_DTLB_ASSOC(x) (((x) >> 28) & 0xf)
1503: #define AMD_L2_EAX_DTLB_ENTRIES(x) (((x) >> 16) & 0xfff)
1504: #define AMD_L2_EAX_IUTLB_ASSOC(x) (((x) >> 12) & 0xf)
1505: #define AMD_L2_EAX_IUTLB_ENTRIES(x) ( (x) & 0xfff)
1506:
1507: /* L2 TLB 4K pages */
1.440 thorpej 1508: #define AMD_L2_EBX_DTLB_ASSOC(x) (((x) >> 28) & 0xf)
1.484 fvdl 1509: #define AMD_L2_EBX_DTLB_ENTRIES(x) (((x) >> 16) & 0xfff)
1.440 thorpej 1510: #define AMD_L2_EBX_IUTLB_ASSOC(x) (((x) >> 12) & 0xf)
1511: #define AMD_L2_EBX_IUTLB_ENTRIES(x) ( (x) & 0xfff)
1.438 thorpej 1512:
1513: /* L2 Cache */
1.440 thorpej 1514: #define AMD_L2_ECX_C_SIZE(x) ((((x) >> 16) & 0xffff) * 1024)
1515: #define AMD_L2_ECX_C_ASSOC(x) (((x) >> 12) & 0xf)
1516: #define AMD_L2_ECX_C_LPT(x) (((x) >> 8) & 0xf)
1.438 thorpej 1517: #define AMD_L2_ECX_C_LS(x) ( (x) & 0xff)
1518:
1519: static const struct i386_cache_info amd_cpuid_l2cache_assoc_info[] = {
1.484 fvdl 1520: { 0, 0x01, 1 },
1521: { 0, 0x02, 2 },
1522: { 0, 0x04, 4 },
1523: { 0, 0x06, 8 },
1524: { 0, 0x08, 16 },
1525: { 0, 0x0f, 0xff },
1526: { 0, 0x00, 0 },
1.438 thorpej 1527: };
1528:
1529: void
1530: amd_cpuid_cpu_cacheinfo(struct cpu_info *ci)
1531: {
1532: const struct i386_cache_info *cp;
1533: struct i386_cache_info *cai;
1534: int family, model;
1535: u_int descs[4];
1536: u_int lfunc;
1537:
1.484 fvdl 1538: family = (ci->ci_signature >> 8) & 15;
1.438 thorpej 1539: if (family < CPU_MINFAMILY)
1540: panic("amd_cpuid_cpu_cacheinfo: strange family value");
1.484 fvdl 1541: model = CPUID2MODEL(ci->ci_signature);
1.438 thorpej 1542:
1543: /*
1544: * K5 model 0 has none of this info.
1545: */
1546: if (family == 5 && model == 0)
1547: return;
1548:
1549: /*
1550: * Determine the largest extended function value.
1551: */
1.484 fvdl 1552: CPUID(0x80000000, descs[0], descs[1], descs[2], descs[3]);
1.438 thorpej 1553: lfunc = descs[0];
1554:
1555: /*
1556: * Determine L1 cache/TLB info.
1557: */
1558: if (lfunc < 0x80000005) {
1559: /* No L1 cache info available. */
1560: return;
1561: }
1562:
1.484 fvdl 1563: CPUID(0x80000005, descs[0], descs[1], descs[2], descs[3]);
1.438 thorpej 1564:
1565: /*
1566: * K6-III and higher have large page TLBs.
1567: */
1568: if ((family == 5 && model >= 9) || family >= 6) {
1569: cai = &ci->ci_cinfo[CAI_ITLB2];
1570: cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
1571: cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
1572: cai->cai_linesize = (4 * 1024 * 1024);
1573:
1574: cai = &ci->ci_cinfo[CAI_DTLB2];
1575: cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
1576: cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
1577: cai->cai_linesize = (4 * 1024 * 1024);
1578: }
1579:
1580: cai = &ci->ci_cinfo[CAI_ITLB];
1581: cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
1582: cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
1583: cai->cai_linesize = (4 * 1024);
1584:
1585: cai = &ci->ci_cinfo[CAI_DTLB];
1586: cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
1587: cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
1.484 fvdl 1588: cai->cai_linesize = (4 * 1024);
1.438 thorpej 1589:
1590: cai = &ci->ci_cinfo[CAI_DCACHE];
1591: cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
1592: cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
1593: cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[2]);
1594:
1595: cai = &ci->ci_cinfo[CAI_ICACHE];
1596: cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
1597: cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
1598: cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
1599:
1600: /*
1601: * Determine L2 cache/TLB info.
1602: */
1603: if (lfunc < 0x80000006) {
1604: /* No L2 cache info available. */
1605: return;
1606: }
1607:
1.484 fvdl 1608: CPUID(0x80000006, descs[0], descs[1], descs[2], descs[3]);
1.438 thorpej 1609:
1610: cai = &ci->ci_cinfo[CAI_L2CACHE];
1611: cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
1612: cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
1613: cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
1614:
1615: cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
1.484 fvdl 1616: cai->cai_associativity);
1.438 thorpej 1617: if (cp != NULL)
1618: cai->cai_associativity = cp->cai_associativity;
1619: else
1620: cai->cai_associativity = 0; /* XXX Unknown/reserved */
1.402 explorer 1621: }
1622:
1.472 thorpej 1623: static const char n_support[] __attribute__((__unused__)) =
1.467 christos 1624: "NOTICE: this kernel does not support %s CPU class\n";
1.472 thorpej 1625: static const char n_lower[] __attribute__((__unused__)) =
1626: "NOTICE: lowering CPU class to %s\n";
1.467 christos 1627:
1.379 jdolecek 1628: void
1.437 thorpej 1629: identifycpu(struct cpu_info *ci)
1.16 cgd 1630: {
1.417 jdolecek 1631: const char *name, *modifier, *vendorname, *brand = "";
1.216 fvdl 1632: int class = CPUCLASS_386, vendor, i, max;
1.486 simonb 1633: int family, model, modif;
1634: #ifdef CPUDEBUG
1635: int step;
1636: #endif
1.418 jdolecek 1637: const struct cpu_cpuid_nameclass *cpup = NULL;
1.454 enami 1638: const struct cpu_cpuid_family *cpufam;
1.484 fvdl 1639: char *cpuname = ci->ci_dev->dv_xname;
1640: char buf[1024];
1641: char *sep;
1.86 mycroft 1642:
1.484 fvdl 1643: if (ci->ci_cpuid_level == -1) {
1.59 mycroft 1644: #ifdef DIAGNOSTIC
1.216 fvdl 1645: if (cpu < 0 || cpu >=
1.454 enami 1646: sizeof(i386_nocpuid_cpus) / sizeof(i386_nocpuid_cpus[0]))
1.483 provos 1647: panic("unknown cpu type %d", cpu);
1.216 fvdl 1648: #endif
1649: name = i386_nocpuid_cpus[cpu].cpu_name;
1650: vendor = i386_nocpuid_cpus[cpu].cpu_vendor;
1651: vendorname = i386_nocpuid_cpus[cpu].cpu_vendorname;
1652: class = i386_nocpuid_cpus[cpu].cpu_class;
1.484 fvdl 1653: ci->cpu_setup = i386_nocpuid_cpus[cpu].cpu_setup;
1.464 christos 1654: ci->ci_info = i386_nocpuid_cpus[cpu].cpu_info;
1.216 fvdl 1655: modifier = "";
1656: } else {
1657: max = sizeof (i386_cpuid_cpus) / sizeof (i386_cpuid_cpus[0]);
1.484 fvdl 1658: modif = (ci->ci_signature >> 12) & 0x3;
1659: family = (ci->ci_signature >> 8) & 0xf;
1.216 fvdl 1660: if (family < CPU_MINFAMILY)
1661: panic("identifycpu: strange family value");
1.484 fvdl 1662: model = CPUID2MODEL(ci->ci_signature);
1.486 simonb 1663: #ifdef CPUDEBUG
1.484 fvdl 1664: step = ci->ci_signature & 0xf;
1665: printf("%s: family %x model %x step %x\n", cpuname, family,
1666: model, step);
1.216 fvdl 1667: #endif
1668:
1669: for (i = 0; i < max; i++) {
1.484 fvdl 1670: if (!strncmp((char *)ci->ci_vendor,
1.216 fvdl 1671: i386_cpuid_cpus[i].cpu_id, 12)) {
1672: cpup = &i386_cpuid_cpus[i];
1673: break;
1674: }
1675: }
1676:
1677: if (cpup == NULL) {
1678: vendor = CPUVENDOR_UNKNOWN;
1.484 fvdl 1679: if (ci->ci_vendor[0] != '\0')
1680: vendorname = (char *)&ci->ci_vendor[0];
1.216 fvdl 1681: else
1682: vendorname = "Unknown";
1683: if (family > CPU_MAXFAMILY)
1684: family = CPU_MAXFAMILY;
1685: class = family - 3;
1686: modifier = "";
1687: name = "";
1.484 fvdl 1688: ci->cpu_setup = NULL;
1.464 christos 1689: ci->ci_info = NULL;
1.216 fvdl 1690: } else {
1691: vendor = cpup->cpu_vendor;
1692: vendorname = cpup->cpu_vendorname;
1693: modifier = modifiers[modif];
1694: if (family > CPU_MAXFAMILY) {
1695: family = CPU_MAXFAMILY;
1696: model = CPU_DEFMODEL;
1697: } else if (model > CPU_MAXMODEL)
1698: model = CPU_DEFMODEL;
1.454 enami 1699: cpufam = &cpup->cpu_family[family - CPU_MINFAMILY];
1700: name = cpufam->cpu_models[model];
1.216 fvdl 1701: if (name == NULL)
1.484 fvdl 1702: name = cpufam->cpu_models[CPU_DEFMODEL];
1.454 enami 1703: class = cpufam->cpu_class;
1.484 fvdl 1704: ci->cpu_setup = cpufam->cpu_setup;
1.464 christos 1705: ci->ci_info = cpufam->cpu_info;
1.417 jdolecek 1706:
1707: /*
1708: * Intel processors family >= 6, model 8 allow to
1709: * recognize brand by Brand ID value.
1710: */
1.454 enami 1711: if (vendor == CPUVENDOR_INTEL && family >= 6 &&
1.484 fvdl 1712: model >= 8 && ci->ci_brand_id &&
1.485 fvdl 1713: ci->ci_brand_id < 8)
1.484 fvdl 1714: brand = i386_intel_brand[ci->ci_brand_id];
1.487 junyoung 1715:
1716: if (vendor == CPUVENDOR_AMD && family == 6 &&
1717: model >= 6) {
1718: if (ci->ci_brand_id == 1)
1719: /*
1720: * It's Duron. We override the
1721: * name, since it might have been
1722: * misidentified as Athlon.
1723: */
1724: name = amd_brand[ci->ci_brand_id];
1725: else
1726: brand = amd_brand_name;
1727: }
1.216 fvdl 1728: }
1.104 cgd 1729: }
1730:
1.484 fvdl 1731: cpu_class = class;
1732: ci->ci_cpu_class = class;
1733:
1734: #if defined(I586_CPU) || defined(I686_CPU)
1735: /*
1736: * If we have a cycle counter, compute the approximate
1737: * CPU speed in MHz.
1738: * XXX this needs to run on the CPU being probed..
1739: */
1740: if (ci->ci_feature_flags & CPUID_TSC) {
1741: u_int64_t last_tsc;
1742:
1743: last_tsc = rdtsc();
1744: delay(100000);
1745: ci->ci_tsc_freq = (rdtsc() - last_tsc) * 10;
1746: microtime_func = tsc_microtime;
1747: }
1748: /* XXX end XXX */
1749: #endif
1750:
1.454 enami 1751: snprintf(cpu_model, sizeof(cpu_model), "%s%s%s%s%s%s%s (%s-class)",
1752: vendorname,
1753: *modifier ? " " : "", modifier,
1754: *name ? " " : "", name,
1755: *brand ? " " : "", brand,
1756: classnames[class]);
1.484 fvdl 1757: printf("%s: %s", cpuname, cpu_model);
1.216 fvdl 1758:
1.484 fvdl 1759: if (ci->ci_tsc_freq != 0)
1760: printf(", %qd.%02qd MHz", (ci->ci_tsc_freq + 4999) / 1000000,
1761: ((ci->ci_tsc_freq + 4999) / 10000) % 100);
1762: printf("\n");
1763:
1764: if (ci->ci_info)
1765: (*ci->ci_info)(ci);
1766:
1767: if (ci->ci_feature_flags) {
1768: if ((ci->ci_feature_flags & CPUID_MASK1) != 0) {
1769: bitmask_snprintf(ci->ci_feature_flags, CPUID_FLAGS1,
1770: buf, sizeof(buf));
1771: printf("%s: features %s\n", cpuname, buf);
1772: }
1773: if ((ci->ci_feature_flags & CPUID_MASK2) != 0) {
1774: bitmask_snprintf(ci->ci_feature_flags, CPUID_FLAGS2,
1775: buf, sizeof(buf));
1776: printf("%s: features %s\n", cpuname, buf);
1777: }
1778: if ((ci->ci_feature_flags & CPUID_MASK3) != 0) {
1779: bitmask_snprintf(ci->ci_feature_flags, CPUID_FLAGS3,
1780: buf, sizeof(buf));
1781: printf("%s: features %s\n", cpuname, buf);
1782: }
1783: }
1.397 thorpej 1784:
1.402 explorer 1785:
1.484 fvdl 1786: if (ci->ci_cinfo[CAI_ICACHE].cai_totalsize != 0 ||
1787: ci->ci_cinfo[CAI_DCACHE].cai_totalsize != 0) {
1788: sep = print_cache_config(ci, CAI_ICACHE, "I-cache", NULL);
1789: sep = print_cache_config(ci, CAI_DCACHE, "D-cache", sep);
1790: if (sep != NULL)
1791: printf("\n");
1792: }
1793: if (ci->ci_cinfo[CAI_L2CACHE].cai_totalsize != 0) {
1794: sep = print_cache_config(ci, CAI_L2CACHE, "L2 cache", NULL);
1795: if (sep != NULL)
1796: printf("\n");
1797: }
1798: if (ci->ci_cinfo[CAI_ITLB].cai_totalsize != 0) {
1799: sep = print_tlb_config(ci, CAI_ITLB, "ITLB", NULL);
1800: sep = print_tlb_config(ci, CAI_ITLB2, NULL, sep);
1801: if (sep != NULL)
1802: printf("\n");
1803: }
1804: if (ci->ci_cinfo[CAI_DTLB].cai_totalsize != 0) {
1805: sep = print_tlb_config(ci, CAI_DTLB, "DTLB", NULL);
1806: sep = print_tlb_config(ci, CAI_DTLB2, NULL, sep);
1807: if (sep != NULL)
1808: printf("\n");
1809: }
1810:
1811: if (ci->ci_cpuid_level >= 3 && (ci->ci_feature_flags & CPUID_PN)) {
1812: printf("%s: serial number %04X-%04X-%04X-%04X-%04X-%04X\n",
1813: cpuname,
1814: ci->ci_cpu_serial[0] / 65536, ci->ci_cpu_serial[0] % 65536,
1815: ci->ci_cpu_serial[1] / 65536, ci->ci_cpu_serial[1] % 65536,
1816: ci->ci_cpu_serial[2] / 65536, ci->ci_cpu_serial[2] % 65536);
1817: }
1.18 cgd 1818:
1.16 cgd 1819: /*
1820: * Now that we have told the user what they have,
1821: * let them know if that machine type isn't configured.
1822: */
1.24 cgd 1823: switch (cpu_class) {
1.216 fvdl 1824: #if !defined(I386_CPU) && !defined(I486_CPU) && !defined(I586_CPU) && !defined(I686_CPU)
1.100 mycroft 1825: #error No CPU classes configured.
1826: #endif
1.216 fvdl 1827: #ifndef I686_CPU
1828: case CPUCLASS_686:
1.467 christos 1829: printf(n_support, "Pentium Pro");
1.216 fvdl 1830: #ifdef I586_CPU
1.467 christos 1831: printf(n_lower, "i586");
1.216 fvdl 1832: cpu_class = CPUCLASS_586;
1833: break;
1834: #endif
1835: #endif
1.165 mycroft 1836: #ifndef I586_CPU
1.118 mycroft 1837: case CPUCLASS_586:
1.467 christos 1838: printf(n_support, "Pentium");
1.165 mycroft 1839: #ifdef I486_CPU
1.467 christos 1840: printf(n_lower, "i486");
1.118 mycroft 1841: cpu_class = CPUCLASS_486;
1842: break;
1.16 cgd 1843: #endif
1.165 mycroft 1844: #endif
1845: #ifndef I486_CPU
1.18 cgd 1846: case CPUCLASS_486:
1.467 christos 1847: printf(n_support, "i486");
1.165 mycroft 1848: #ifdef I386_CPU
1.467 christos 1849: printf(n_lower, "i386");
1.118 mycroft 1850: cpu_class = CPUCLASS_386;
1851: break;
1852: #endif
1.165 mycroft 1853: #endif
1854: #ifndef I386_CPU
1.118 mycroft 1855: case CPUCLASS_386:
1.467 christos 1856: printf(n_support, "i386");
1.187 mycroft 1857: panic("no appropriate CPU class available");
1.59 mycroft 1858: #endif
1.16 cgd 1859: default:
1.448 thorpej 1860: break;
1861: }
1862:
1863: /*
1864: * Now plug in optimized versions of various routines we
1865: * might have.
1866: */
1867: switch (cpu_class) {
1868: #if defined(I686_CPU)
1869: case CPUCLASS_686:
1870: copyout_func = i486_copyout;
1871: break;
1872: #endif
1873: #if defined(I586_CPU)
1874: case CPUCLASS_586:
1875: copyout_func = i486_copyout;
1876: break;
1877: #endif
1878: #if defined(I486_CPU)
1879: case CPUCLASS_486:
1880: copyout_func = i486_copyout;
1881: break;
1882: #endif
1883: default:
1884: /* We just inherit the default i386 versions. */
1.16 cgd 1885: break;
1.121 mycroft 1886: }
1887:
1888: if (cpu == CPU_486DLC) {
1889: #ifndef CYRIX_CACHE_WORKS
1.210 christos 1890: printf("WARNING: CYRIX 486DLC CACHE UNCHANGED.\n");
1.121 mycroft 1891: #else
1892: #ifndef CYRIX_CACHE_REALLY_WORKS
1.210 christos 1893: printf("WARNING: CYRIX 486DLC CACHE ENABLED IN HOLD-FLUSH MODE.\n");
1.121 mycroft 1894: #else
1.210 christos 1895: printf("WARNING: CYRIX 486DLC CACHE ENABLED.\n");
1.121 mycroft 1896: #endif
1897: #endif
1.16 cgd 1898: }
1.147 mycroft 1899:
1.450 thorpej 1900: #if defined(I686_CPU)
1901: /*
1902: * If we have FXSAVE/FXRESTOR, use them.
1903: */
1904: if (cpu_feature & CPUID_FXSR) {
1.451 thorpej 1905: i386_use_fxsave = 1;
1.484 fvdl 1906:
1.451 thorpej 1907: /*
1908: * If we have SSE/SSE2, enable XMM exceptions, and
1909: * notify userland.
1910: */
1911: if (cpu_feature & (CPUID_SSE|CPUID_SSE2)) {
1912: if (cpu_feature & CPUID_SSE)
1913: i386_has_sse = 1;
1914: if (cpu_feature & CPUID_SSE2)
1915: i386_has_sse2 = 1;
1916: }
1.450 thorpej 1917: } else
1.451 thorpej 1918: i386_use_fxsave = 0;
1.450 thorpej 1919: #endif /* I686_CPU */
1.1 cgd 1920: }
1921:
1.484 fvdl 1922: /*
1.104 cgd 1923: * machine dependent system variables.
1.484 fvdl 1924: */
1.195 mycroft 1925: int
1.104 cgd 1926: cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1927: int *name;
1928: u_int namelen;
1929: void *oldp;
1930: size_t *oldlenp;
1931: void *newp;
1932: size_t newlen;
1933: struct proc *p;
1934: {
1935: dev_t consdev;
1.255 drochner 1936: struct btinfo_bootpath *bibp;
1.461 christos 1937: int error, mode;
1.104 cgd 1938:
1939: /* all sysctl names at this level are terminal */
1940: if (namelen != 1)
1941: return (ENOTDIR); /* overloaded */
1942:
1943: switch (name[0]) {
1944: case CPU_CONSDEV:
1945: if (cn_tab != NULL)
1946: consdev = cn_tab->cn_dev;
1947: else
1948: consdev = NODEV;
1949: return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1950: sizeof consdev));
1.215 fvdl 1951:
1952: case CPU_BIOSBASEMEM:
1953: return (sysctl_rdint(oldp, oldlenp, newp, biosbasemem));
1954:
1955: case CPU_BIOSEXTMEM:
1956: return (sysctl_rdint(oldp, oldlenp, newp, biosextmem));
1957:
1958: case CPU_NKPDE:
1959: return (sysctl_rdint(oldp, oldlenp, newp, nkpde));
1.366 fvdl 1960:
1961: case CPU_FPU_PRESENT:
1962: return (sysctl_rdint(oldp, oldlenp, newp, i386_fpu_present));
1.215 fvdl 1963:
1.255 drochner 1964: case CPU_BOOTED_KERNEL:
1965: bibp = lookup_bootinfo(BTINFO_BOOTPATH);
1966: if(!bibp)
1967: return(ENOENT); /* ??? */
1968: return (sysctl_rdstring(oldp, oldlenp, newp, bibp->bootpath));
1.343 fvdl 1969: case CPU_DISKINFO:
1970: if (i386_alldisks == NULL)
1.339 fvdl 1971: return (ENOENT);
1.343 fvdl 1972: return (sysctl_rdstruct(oldp, oldlenp, newp, i386_alldisks,
1973: sizeof (struct disklist) +
1974: (i386_ndisks - 1) * sizeof (struct nativedisk_info)));
1.451 thorpej 1975: case CPU_OSFXSR:
1976: return (sysctl_rdint(oldp, oldlenp, newp, i386_use_fxsave));
1977: case CPU_SSE:
1978: return (sysctl_rdint(oldp, oldlenp, newp, i386_has_sse));
1979: case CPU_SSE2:
1980: return (sysctl_rdint(oldp, oldlenp, newp, i386_has_sse2));
1.461 christos 1981: case CPU_TMLR_MODE:
1982: if (!tmx86_has_longrun)
1983: return (EOPNOTSUPP);
1984: mode = (int)(crusoe_longrun = tmx86_get_longrun_mode());
1985: error = sysctl_int(oldp, oldlenp, newp, newlen, &mode);
1.462 enami 1986: if (!error && (u_int)mode != crusoe_longrun) {
1.461 christos 1987: if (tmx86_set_longrun_mode(mode)) {
1988: crusoe_longrun = (u_int)mode;
1989: } else {
1990: error = EINVAL;
1991: }
1992: }
1993: return (error);
1994: case CPU_TMLR_FREQUENCY:
1995: if (!tmx86_has_longrun)
1996: return (EOPNOTSUPP);
1997: tmx86_get_longrun_status_all();
1998: return (sysctl_rdint(oldp, oldlenp, newp, crusoe_frequency));
1999: case CPU_TMLR_VOLTAGE:
2000: if (!tmx86_has_longrun)
2001: return (EOPNOTSUPP);
2002: tmx86_get_longrun_status_all();
2003: return (sysctl_rdint(oldp, oldlenp, newp, crusoe_voltage));
2004: case CPU_TMLR_PERCENTAGE:
2005: if (!tmx86_has_longrun)
2006: return (EOPNOTSUPP);
2007: tmx86_get_longrun_status_all();
2008: return (sysctl_rdint(oldp, oldlenp, newp, crusoe_percentage));
1.104 cgd 2009: default:
2010: return (EOPNOTSUPP);
2011: }
2012: /* NOTREACHED */
2013: }
1.151 christos 2014:
1.1 cgd 2015: /*
2016: * Send an interrupt to process.
2017: *
2018: * Stack is set up to allow sigcode stored
2019: * in u. to call routine, followed by kcall
2020: * to sigreturn routine below. After sigreturn
2021: * resets the signal mask, the stack, and the
2022: * frame pointer, it returns to the user
2023: * specified pc, psl.
2024: */
2025: void
1.477 thorpej 2026: sendsig(sig, mask, code)
1.319 mycroft 2027: int sig;
2028: sigset_t *mask;
1.126 cgd 2029: u_long code;
1.1 cgd 2030: {
1.298 mycroft 2031: struct proc *p = curproc;
1.477 thorpej 2032: struct sigacts *ps = p->p_sigacts;
1.298 mycroft 2033: struct trapframe *tf;
1.82 ws 2034: struct sigframe *fp, frame;
1.319 mycroft 2035: int onstack;
1.477 thorpej 2036: sig_t catcher = SIGACTION(p, sig).sa_handler;
1.1 cgd 2037:
1.319 mycroft 2038: tf = p->p_md.md_regs;
1.135 christos 2039:
1.319 mycroft 2040: /* Do we need to jump onto the signal stack? */
2041: onstack =
1.425 jdolecek 2042: (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
2043: (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
1.135 christos 2044:
1.319 mycroft 2045: /* Allocate space for the signal handler context. */
2046: if (onstack)
1.425 jdolecek 2047: fp = (struct sigframe *)((caddr_t)p->p_sigctx.ps_sigstk.ss_sp +
2048: p->p_sigctx.ps_sigstk.ss_size);
1.319 mycroft 2049: else
2050: fp = (struct sigframe *)tf->tf_esp;
2051: fp--;
1.1 cgd 2052:
1.319 mycroft 2053: /* Build stack frame for signal trampoline. */
1.477 thorpej 2054: switch (ps->sa_sigdesc[sig].sd_vers) {
2055: #if 1 /* COMPAT_16 */
2056: case 0: /* legacy on-stack sigtramp */
2057: frame.sf_ra = (int)p->p_sigctx.ps_sigcode;
2058: break;
2059: #endif /* COMPAT_16 */
2060:
2061: case 1:
2062: frame.sf_ra = (int)ps->sa_sigdesc[sig].sd_tramp;
2063: break;
2064:
2065: default:
2066: /* Don't know what trampoline version; kill it. */
2067: sigexit(p, SIGILL);
2068: }
2069:
1.319 mycroft 2070: frame.sf_signum = sig;
1.82 ws 2071: frame.sf_code = code;
2072: frame.sf_scp = &fp->sf_sc;
2073:
1.319 mycroft 2074: /* Save register context. */
1.157 mycroft 2075: #ifdef VM86
2076: if (tf->tf_eflags & PSL_VM) {
2077: frame.sf_sc.sc_gs = tf->tf_vm86_gs;
2078: frame.sf_sc.sc_fs = tf->tf_vm86_fs;
2079: frame.sf_sc.sc_es = tf->tf_vm86_es;
2080: frame.sf_sc.sc_ds = tf->tf_vm86_ds;
1.196 mycroft 2081: frame.sf_sc.sc_eflags = get_vflags(p);
1.422 mycroft 2082: (*p->p_emul->e_syscall_intern)(p);
1.157 mycroft 2083: } else
2084: #endif
2085: {
1.445 sommerfe 2086: frame.sf_sc.sc_gs = tf->tf_gs;
2087: frame.sf_sc.sc_fs = tf->tf_fs;
1.157 mycroft 2088: frame.sf_sc.sc_es = tf->tf_es;
2089: frame.sf_sc.sc_ds = tf->tf_ds;
1.184 mycroft 2090: frame.sf_sc.sc_eflags = tf->tf_eflags;
1.157 mycroft 2091: }
1.184 mycroft 2092: frame.sf_sc.sc_edi = tf->tf_edi;
2093: frame.sf_sc.sc_esi = tf->tf_esi;
2094: frame.sf_sc.sc_ebp = tf->tf_ebp;
2095: frame.sf_sc.sc_ebx = tf->tf_ebx;
2096: frame.sf_sc.sc_edx = tf->tf_edx;
2097: frame.sf_sc.sc_ecx = tf->tf_ecx;
2098: frame.sf_sc.sc_eax = tf->tf_eax;
2099: frame.sf_sc.sc_eip = tf->tf_eip;
2100: frame.sf_sc.sc_cs = tf->tf_cs;
2101: frame.sf_sc.sc_esp = tf->tf_esp;
2102: frame.sf_sc.sc_ss = tf->tf_ss;
1.319 mycroft 2103: frame.sf_sc.sc_trapno = tf->tf_trapno;
2104: frame.sf_sc.sc_err = tf->tf_err;
2105:
2106: /* Save signal stack. */
1.425 jdolecek 2107: frame.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK;
1.319 mycroft 2108:
2109: /* Save signal mask. */
2110: frame.sf_sc.sc_mask = *mask;
1.322 thorpej 2111:
2112: #ifdef COMPAT_13
2113: /*
2114: * XXX We always have to save an old style signal mask because
2115: * XXX we might be delivering a signal to a process which will
2116: * XXX escape from the signal in a non-standard way and invoke
2117: * XXX sigreturn() directly.
2118: */
2119: native_sigset_to_sigset13(mask, &frame.sf_sc.__sc_mask13);
2120: #endif
1.1 cgd 2121:
1.87 mycroft 2122: if (copyout(&frame, fp, sizeof(frame)) != 0) {
1.1 cgd 2123: /*
2124: * Process has trashed its stack; give it an illegal
2125: * instruction to halt it in its tracks.
2126: */
1.93 mycroft 2127: sigexit(p, SIGILL);
2128: /* NOTREACHED */
1.1 cgd 2129: }
2130:
1.73 mycroft 2131: /*
1.475 thorpej 2132: * Build context to run handler in. We invoke the handler
1.477 thorpej 2133: * directly, only returning via the trampoline. Note the
2134: * trampoline version numbers are coordinated with machine-
2135: * dependent code in libc.
1.59 mycroft 2136: */
1.445 sommerfe 2137: tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL);
2138: tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL);
1.185 mycroft 2139: tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
2140: tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
1.475 thorpej 2141: tf->tf_eip = (int)catcher;
1.185 mycroft 2142: tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
1.198 mycroft 2143: tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
1.185 mycroft 2144: tf->tf_esp = (int)fp;
1.177 mycroft 2145: tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
1.319 mycroft 2146:
2147: /* Remember that we're now on the signal stack. */
2148: if (onstack)
1.425 jdolecek 2149: p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
1.1 cgd 2150: }
2151:
2152: /*
2153: * System call to cleanup state after a signal
2154: * has been taken. Reset signal mask and
2155: * stack state from context left by sendsig (above).
2156: * Return to previous pc and psl as specified by
2157: * context left by sendsig. Check carefully to
2158: * make sure that the user has not modified the
1.110 mycroft 2159: * psl to gain improper privileges or to cause
1.1 cgd 2160: * a machine fault.
2161: */
1.195 mycroft 2162: int
1.320 mycroft 2163: sys___sigreturn14(p, v, retval)
1.1 cgd 2164: struct proc *p;
1.172 thorpej 2165: void *v;
2166: register_t *retval;
2167: {
1.320 mycroft 2168: struct sys___sigreturn14_args /* {
1.123 cgd 2169: syscallarg(struct sigcontext *) sigcntxp;
1.172 thorpej 2170: } */ *uap = v;
1.82 ws 2171: struct sigcontext *scp, context;
1.298 mycroft 2172: struct trapframe *tf;
1.59 mycroft 2173:
1.27 cgd 2174: /*
1.59 mycroft 2175: * The trampoline code hands us the context.
2176: * It is unsafe to keep track of it ourselves, in the event that a
2177: * program jumps out of a signal handler.
1.27 cgd 2178: */
1.123 cgd 2179: scp = SCARG(uap, sigcntxp);
1.87 mycroft 2180: if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
1.122 mycroft 2181: return (EFAULT);
1.1 cgd 2182:
1.319 mycroft 2183: /* Restore register context. */
2184: tf = p->p_md.md_regs;
1.157 mycroft 2185: #ifdef VM86
2186: if (context.sc_eflags & PSL_VM) {
1.422 mycroft 2187: void syscall_vm86 __P((struct trapframe));
2188:
1.157 mycroft 2189: tf->tf_vm86_gs = context.sc_gs;
2190: tf->tf_vm86_fs = context.sc_fs;
2191: tf->tf_vm86_es = context.sc_es;
2192: tf->tf_vm86_ds = context.sc_ds;
1.196 mycroft 2193: set_vflags(p, context.sc_eflags);
1.422 mycroft 2194: p->p_md.md_syscall = syscall_vm86;
1.157 mycroft 2195: } else
2196: #endif
2197: {
1.196 mycroft 2198: /*
2199: * Check for security violations. If we're returning to
2200: * protected mode, the CPU will validate the segment registers
2201: * automatically and generate a trap on violations. We handle
2202: * the trap, rather than doing all of the checking here.
2203: */
2204: if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
2205: !USERMODE(context.sc_cs, context.sc_eflags))
2206: return (EINVAL);
2207:
1.446 sommerfe 2208: tf->tf_gs = context.sc_gs;
2209: tf->tf_fs = context.sc_fs;
1.157 mycroft 2210: tf->tf_es = context.sc_es;
2211: tf->tf_ds = context.sc_ds;
1.184 mycroft 2212: tf->tf_eflags = context.sc_eflags;
1.157 mycroft 2213: }
1.184 mycroft 2214: tf->tf_edi = context.sc_edi;
2215: tf->tf_esi = context.sc_esi;
2216: tf->tf_ebp = context.sc_ebp;
2217: tf->tf_ebx = context.sc_ebx;
2218: tf->tf_edx = context.sc_edx;
2219: tf->tf_ecx = context.sc_ecx;
2220: tf->tf_eax = context.sc_eax;
2221: tf->tf_eip = context.sc_eip;
2222: tf->tf_cs = context.sc_cs;
2223: tf->tf_esp = context.sc_esp;
2224: tf->tf_ss = context.sc_ss;
1.196 mycroft 2225:
1.319 mycroft 2226: /* Restore signal stack. */
2227: if (context.sc_onstack & SS_ONSTACK)
1.425 jdolecek 2228: p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
1.196 mycroft 2229: else
1.425 jdolecek 2230: p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
1.319 mycroft 2231:
2232: /* Restore signal mask. */
2233: (void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0);
1.72 mycroft 2234:
1.122 mycroft 2235: return (EJUSTRETURN);
1.37 cgd 2236: }
2237:
1.1 cgd 2238: int waittime = -1;
2239: struct pcb dumppcb;
2240:
1.32 andrew 2241: void
1.228 gwr 2242: cpu_reboot(howto, bootstr)
1.193 mycroft 2243: int howto;
1.206 mrg 2244: char *bootstr;
1.1 cgd 2245: {
2246:
1.106 mycroft 2247: if (cold) {
1.193 mycroft 2248: howto |= RB_HALT;
2249: goto haltsys;
1.1 cgd 2250: }
1.193 mycroft 2251:
1.106 mycroft 2252: boothowto = howto;
1.193 mycroft 2253: if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
1.1 cgd 2254: waittime = 0;
1.150 mycroft 2255: vfs_shutdown();
1.59 mycroft 2256: /*
2257: * If we've been adjusting the clock, the todr
2258: * will be out of synch; adjust it now.
2259: */
2260: resettodr();
1.1 cgd 2261: }
1.193 mycroft 2262:
2263: /* Disable interrupts. */
1.1 cgd 2264: splhigh();
1.193 mycroft 2265:
2266: /* Do a dump if requested. */
2267: if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
2268: dumpsys();
2269:
2270: haltsys:
2271: doshutdownhooks();
2272:
1.484 fvdl 2273: #ifdef MULTIPROCESSOR
2274: i386_broadcast_ipi(I386_IPI_HALT);
2275: #endif
2276:
1.307 thorpej 2277: if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
1.473 tshiozak 2278: #if NACPI > 0
2279: delay(500000);
2280: acpi_enter_sleep_state(acpi_softc, ACPI_STATE_S5);
2281: printf("WARNING: powerdown failed!\n");
2282: #endif
1.208 jtk 2283: #if NAPM > 0 && !defined(APM_NO_POWEROFF)
2284: /* turn off, if we can. But try to turn disk off and
2285: * wait a bit first--some disk drives are slow to clean up
2286: * and users have reported disk corruption.
2287: */
2288: delay(500000);
2289: apm_set_powstate(APM_DEV_DISK(0xff), APM_SYS_OFF);
2290: delay(500000);
2291: apm_set_powstate(APM_DEV_ALLDEVS, APM_SYS_OFF);
1.307 thorpej 2292: printf("WARNING: powerdown failed!\n");
2293: /*
2294: * RB_POWERDOWN implies RB_HALT... fall into it...
2295: */
1.208 jtk 2296: #endif
1.307 thorpej 2297: }
2298:
2299: if (howto & RB_HALT) {
1.210 christos 2300: printf("\n");
2301: printf("The operating system has halted.\n");
2302: printf("Please press any key to reboot.\n\n");
1.300 drochner 2303: cnpollc(1); /* for proper keyboard command handling */
1.12 cgd 2304: cngetc();
1.300 drochner 2305: cnpollc(0);
1.1 cgd 2306: }
1.193 mycroft 2307:
1.210 christos 2308: printf("rebooting...\n");
1.328 bouyer 2309: if (cpureset_delay > 0)
2310: delay(cpureset_delay * 1000);
1.1 cgd 2311: cpu_reset();
2312: for(;;) ;
2313: /*NOTREACHED*/
2314: }
2315:
1.116 gwr 2316: /*
2317: * These variables are needed by /sbin/savecore
2318: */
1.468 tsutsui 2319: u_int32_t dumpmag = 0x8fca0101; /* magic number */
1.116 gwr 2320: int dumpsize = 0; /* pages */
2321: long dumplo = 0; /* blocks */
2322:
2323: /*
1.291 thorpej 2324: * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
2325: */
2326: int
2327: cpu_dumpsize()
2328: {
2329: int size;
2330:
2331: size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
2332: ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
2333: if (roundup(size, dbtob(1)) != dbtob(1))
2334: return (-1);
2335:
2336: return (1);
2337: }
2338:
2339: /*
2340: * cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped.
2341: */
2342: u_long
2343: cpu_dump_mempagecnt()
2344: {
2345: u_long i, n;
2346:
2347: n = 0;
2348: for (i = 0; i < mem_cluster_cnt; i++)
2349: n += atop(mem_clusters[i].size);
2350: return (n);
2351: }
2352:
2353: /*
2354: * cpu_dump: dump the machine-dependent kernel core dump headers.
2355: */
2356: int
2357: cpu_dump()
2358: {
2359: int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
2360: char buf[dbtob(1)];
2361: kcore_seg_t *segp;
2362: cpu_kcore_hdr_t *cpuhdrp;
2363: phys_ram_seg_t *memsegp;
1.481 gehenna 2364: const struct bdevsw *bdev;
1.291 thorpej 2365: int i;
2366:
1.481 gehenna 2367: bdev = bdevsw_lookup(dumpdev);
2368: if (bdev == NULL)
2369: return (ENXIO);
2370: dump = bdev->d_dump;
1.291 thorpej 2371:
1.313 perry 2372: memset(buf, 0, sizeof buf);
1.291 thorpej 2373: segp = (kcore_seg_t *)buf;
2374: cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
2375: memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
2376: ALIGN(sizeof(*cpuhdrp))];
2377:
2378: /*
2379: * Generate a segment header.
2380: */
2381: CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
2382: segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
2383:
2384: /*
2385: * Add the machine-dependent header info.
2386: */
2387: cpuhdrp->ptdpaddr = PTDpaddr;
2388: cpuhdrp->nmemsegs = mem_cluster_cnt;
2389:
2390: /*
2391: * Fill in the memory segment descriptors.
2392: */
2393: for (i = 0; i < mem_cluster_cnt; i++) {
2394: memsegp[i].start = mem_clusters[i].start;
2395: memsegp[i].size = mem_clusters[i].size;
2396: }
2397:
2398: return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
2399: }
2400:
2401: /*
1.228 gwr 2402: * This is called by main to set dumplo and dumpsize.
1.414 thorpej 2403: * Dumps always skip the first PAGE_SIZE of disk space
1.116 gwr 2404: * in case there might be a disk label stored there.
2405: * If there is extra space, put dump at the end to
2406: * reduce the chance that swapping trashes it.
2407: */
2408: void
1.228 gwr 2409: cpu_dumpconf()
1.116 gwr 2410: {
1.481 gehenna 2411: const struct bdevsw *bdev;
1.291 thorpej 2412: int nblks, dumpblks; /* size of dump area */
1.116 gwr 2413:
2414: if (dumpdev == NODEV)
1.291 thorpej 2415: goto bad;
1.481 gehenna 2416: bdev = bdevsw_lookup(dumpdev);
2417: if (bdev == NULL)
1.116 gwr 2418: panic("dumpconf: bad dumpdev=0x%x", dumpdev);
1.481 gehenna 2419: if (bdev->d_psize == NULL)
1.291 thorpej 2420: goto bad;
1.481 gehenna 2421: nblks = (*bdev->d_psize)(dumpdev);
1.116 gwr 2422: if (nblks <= ctod(1))
1.291 thorpej 2423: goto bad;
1.116 gwr 2424:
1.291 thorpej 2425: dumpblks = cpu_dumpsize();
2426: if (dumpblks < 0)
2427: goto bad;
2428: dumpblks += ctod(cpu_dump_mempagecnt());
2429:
2430: /* If dump won't fit (incl. room for possible label), punt. */
2431: if (dumpblks > (nblks - ctod(1)))
2432: goto bad;
2433:
2434: /* Put dump at end of partition */
2435: dumplo = nblks - dumpblks;
2436:
2437: /* dumpsize is in page units, and doesn't include headers. */
2438: dumpsize = cpu_dump_mempagecnt();
2439: return;
1.116 gwr 2440:
1.291 thorpej 2441: bad:
2442: dumpsize = 0;
1.116 gwr 2443: }
2444:
1.1 cgd 2445: /*
2446: * Doadump comes here after turning off memory management and
2447: * getting on the dump stack, either when called above, or by
2448: * the auto-restart code.
2449: */
1.414 thorpej 2450: #define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize XXX small */
1.314 thorpej 2451: static vaddr_t dumpspace;
1.163 cgd 2452:
1.314 thorpej 2453: vaddr_t
1.163 cgd 2454: reserve_dumppages(p)
1.314 thorpej 2455: vaddr_t p;
1.163 cgd 2456: {
2457:
2458: dumpspace = p;
2459: return (p + BYTES_PER_DUMP);
2460: }
2461:
1.32 andrew 2462: void
1.1 cgd 2463: dumpsys()
2464: {
1.291 thorpej 2465: u_long totalbytesleft, bytes, i, n, memseg;
2466: u_long maddr;
2467: int psize;
1.163 cgd 2468: daddr_t blkno;
1.481 gehenna 2469: const struct bdevsw *bdev;
1.163 cgd 2470: int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
1.200 christos 2471: int error;
1.193 mycroft 2472:
2473: /* Save registers. */
2474: savectx(&dumppcb);
1.1 cgd 2475:
2476: if (dumpdev == NODEV)
2477: return;
1.484 fvdl 2478:
1.481 gehenna 2479: bdev = bdevsw_lookup(dumpdev);
2480: if (bdev == NULL || bdev->d_psize == NULL)
2481: return;
1.163 cgd 2482:
2483: /*
2484: * For dumps during autoconfiguration,
2485: * if dump device has already configured...
2486: */
2487: if (dumpsize == 0)
1.228 gwr 2488: cpu_dumpconf();
1.330 jtk 2489: if (dumplo <= 0 || dumpsize == 0) {
1.275 mycroft 2490: printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
2491: minor(dumpdev));
1.163 cgd 2492: return;
1.275 mycroft 2493: }
2494: printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
2495: minor(dumpdev), dumplo);
1.134 mycroft 2496:
1.481 gehenna 2497: psize = (*bdev->d_psize)(dumpdev);
1.210 christos 2498: printf("dump ");
1.163 cgd 2499: if (psize == -1) {
1.210 christos 2500: printf("area unavailable\n");
1.163 cgd 2501: return;
2502: }
2503:
2504: #if 0 /* XXX this doesn't work. grr. */
2505: /* toss any characters present prior to dump */
2506: while (sget() != NULL); /*syscons and pccons differ */
2507: #endif
2508:
1.291 thorpej 2509: if ((error = cpu_dump()) != 0)
2510: goto err;
2511:
2512: totalbytesleft = ptoa(cpu_dump_mempagecnt());
2513: blkno = dumplo + cpu_dumpsize();
1.481 gehenna 2514: dump = bdev->d_dump;
1.200 christos 2515: error = 0;
1.291 thorpej 2516:
2517: for (memseg = 0; memseg < mem_cluster_cnt; memseg++) {
2518: maddr = mem_clusters[memseg].start;
2519: bytes = mem_clusters[memseg].size;
2520:
2521: for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
2522: /* Print out how many MBs we have left to go. */
2523: if ((totalbytesleft % (1024*1024)) == 0)
2524: printf("%ld ", totalbytesleft / (1024 * 1024));
2525:
2526: /* Limit size for next transfer. */
2527: n = bytes - i;
2528: if (n > BYTES_PER_DUMP)
2529: n = BYTES_PER_DUMP;
2530:
2531: (void) pmap_map(dumpspace, maddr, maddr + n,
2532: VM_PROT_READ);
2533:
2534: error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
2535: if (error)
2536: goto err;
1.163 cgd 2537: maddr += n;
1.291 thorpej 2538: blkno += btodb(n); /* XXX? */
1.163 cgd 2539:
2540: #if 0 /* XXX this doesn't work. grr. */
1.291 thorpej 2541: /* operator aborting dump? */
2542: if (sget() != NULL) {
2543: error = EINTR;
2544: break;
2545: }
2546: #endif
1.163 cgd 2547: }
2548: }
2549:
1.291 thorpej 2550: err:
1.163 cgd 2551: switch (error) {
1.1 cgd 2552:
2553: case ENXIO:
1.210 christos 2554: printf("device bad\n");
1.1 cgd 2555: break;
2556:
2557: case EFAULT:
1.210 christos 2558: printf("device not ready\n");
1.1 cgd 2559: break;
2560:
2561: case EINVAL:
1.210 christos 2562: printf("area improper\n");
1.1 cgd 2563: break;
2564:
2565: case EIO:
1.210 christos 2566: printf("i/o error\n");
1.1 cgd 2567: break;
2568:
2569: case EINTR:
1.210 christos 2570: printf("aborted from console\n");
1.1 cgd 2571: break;
2572:
1.163 cgd 2573: case 0:
1.210 christos 2574: printf("succeeded\n");
1.163 cgd 2575: break;
2576:
1.1 cgd 2577: default:
1.210 christos 2578: printf("error %d\n", error);
1.1 cgd 2579: break;
2580: }
1.210 christos 2581: printf("\n\n");
1.163 cgd 2582: delay(5000000); /* 5 seconds */
1.1 cgd 2583: }
2584:
2585: /*
2586: * Clear registers on exec
2587: */
1.33 cgd 2588: void
1.251 mycroft 2589: setregs(p, pack, stack)
1.1 cgd 2590: struct proc *p;
1.151 christos 2591: struct exec_package *pack;
1.21 cgd 2592: u_long stack;
1.1 cgd 2593: {
1.298 mycroft 2594: struct pcb *pcb = &p->p_addr->u_pcb;
2595: struct trapframe *tf;
1.1 cgd 2596:
1.161 mycroft 2597: #if NNPX > 0
2598: /* If we were using the FPU, forget about it. */
1.484 fvdl 2599: if (p->p_addr->u_pcb.pcb_fpcpu != NULL)
2600: npxsave_proc(p, 0);
1.161 mycroft 2601: #endif
1.166 mycroft 2602:
1.178 mycroft 2603: #ifdef USER_LDT
1.353 thorpej 2604: pmap_ldt_cleanup(p);
1.178 mycroft 2605: #endif
2606:
1.167 mycroft 2607: p->p_md.md_flags &= ~MDP_USEDFPU;
1.452 thorpej 2608: if (i386_use_fxsave) {
1.450 thorpej 2609: pcb->pcb_savefpu.sv_xmm.sv_env.en_cw = __NetBSD_NPXCW__;
1.452 thorpej 2610: pcb->pcb_savefpu.sv_xmm.sv_env.en_mxcsr = __INITIAL_MXCSR__;
2611: } else
1.450 thorpej 2612: pcb->pcb_savefpu.sv_87.sv_env.en_cw = __NetBSD_NPXCW__;
1.59 mycroft 2613:
1.154 mycroft 2614: tf = p->p_md.md_regs;
1.445 sommerfe 2615: tf->tf_gs = LSEL(LUDATA_SEL, SEL_UPL);
2616: tf->tf_fs = LSEL(LUDATA_SEL, SEL_UPL);
1.154 mycroft 2617: tf->tf_es = LSEL(LUDATA_SEL, SEL_UPL);
2618: tf->tf_ds = LSEL(LUDATA_SEL, SEL_UPL);
1.252 mycroft 2619: tf->tf_edi = 0;
2620: tf->tf_esi = 0;
1.154 mycroft 2621: tf->tf_ebp = 0;
1.469 christos 2622: tf->tf_ebx = (int)p->p_psstr;
1.252 mycroft 2623: tf->tf_edx = 0;
2624: tf->tf_ecx = 0;
2625: tf->tf_eax = 0;
1.154 mycroft 2626: tf->tf_eip = pack->ep_entry;
2627: tf->tf_cs = LSEL(LUCODE_SEL, SEL_UPL);
2628: tf->tf_eflags = PSL_USERSET;
2629: tf->tf_esp = stack;
2630: tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL);
1.1 cgd 2631: }
2632:
2633: /*
1.55 brezak 2634: * Initialize segments and descriptor tables
1.1 cgd 2635: */
2636:
1.275 mycroft 2637: union descriptor *idt, *gdt, *ldt;
2638: #ifdef I586_CPU
2639: union descriptor *pentium_idt;
2640: #endif
1.178 mycroft 2641: extern struct user *proc0paddr;
1.49 brezak 2642:
1.178 mycroft 2643: void
1.489 fvdl 2644: setgate(gd, func, args, type, dpl, sel)
1.178 mycroft 2645: struct gate_descriptor *gd;
2646: void *func;
1.489 fvdl 2647: int args, type, dpl, sel;
1.178 mycroft 2648: {
1.1 cgd 2649:
1.178 mycroft 2650: gd->gd_looffset = (int)func;
1.489 fvdl 2651: gd->gd_selector = sel;
1.178 mycroft 2652: gd->gd_stkcpy = args;
2653: gd->gd_xx = 0;
2654: gd->gd_type = type;
2655: gd->gd_dpl = dpl;
2656: gd->gd_p = 1;
2657: gd->gd_hioffset = (int)func >> 16;
2658: }
2659:
2660: void
1.484 fvdl 2661: unsetgate(gd)
2662: struct gate_descriptor *gd;
2663: {
2664: gd->gd_p = 0;
2665: gd->gd_hioffset = 0;
2666: gd->gd_looffset = 0;
2667: gd->gd_selector = 0;
2668: gd->gd_xx = 0;
2669: gd->gd_stkcpy = 0;
2670: gd->gd_type = 0;
2671: gd->gd_dpl = 0;
2672: }
2673:
2674:
2675: void
1.178 mycroft 2676: setregion(rd, base, limit)
2677: struct region_descriptor *rd;
2678: void *base;
2679: size_t limit;
2680: {
2681:
2682: rd->rd_limit = (int)limit;
2683: rd->rd_base = (int)base;
2684: }
1.1 cgd 2685:
1.174 mycroft 2686: void
2687: setsegment(sd, base, limit, type, dpl, def32, gran)
2688: struct segment_descriptor *sd;
2689: void *base;
2690: size_t limit;
2691: int type, dpl, def32, gran;
2692: {
1.1 cgd 2693:
1.174 mycroft 2694: sd->sd_lolimit = (int)limit;
2695: sd->sd_lobase = (int)base;
2696: sd->sd_type = type;
2697: sd->sd_dpl = dpl;
2698: sd->sd_p = 1;
2699: sd->sd_hilimit = (int)limit >> 16;
2700: sd->sd_xx = 0;
2701: sd->sd_def32 = def32;
2702: sd->sd_gran = gran;
2703: sd->sd_hibase = (int)base >> 24;
2704: }
1.1 cgd 2705:
2706: #define IDTVEC(name) __CONCAT(X, name)
1.299 mycroft 2707: typedef void (vector) __P((void));
2708: extern vector IDTVEC(syscall);
2709: extern vector IDTVEC(osyscall);
2710: extern vector *IDTVEC(exceptions)[];
1.333 christos 2711: #ifdef COMPAT_SVR4
2712: extern vector IDTVEC(svr4_fasttrap);
2713: #endif /* COMPAT_SVR4 */
1.447 christos 2714: #ifdef COMPAT_MACH
2715: extern vector IDTVEC(mach_trap);
2716: #endif
1.1 cgd 2717:
1.381 thorpej 2718: #define KBTOB(x) ((size_t)(x) * 1024UL)
2719:
1.484 fvdl 2720: void cpu_init_idt()
2721: {
2722: struct region_descriptor region;
2723: #ifdef I586_CPU
2724: setregion(®ion, pentium_idt, NIDT * sizeof(idt[0]) - 1);
2725: #else
2726: setregion(®ion, idt, NIDT * sizeof(idt[0]) - 1);
2727: #endif
2728: lidt(®ion);
2729: }
2730:
1.433 kanaoka 2731: #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
2732: void
2733: add_mem_cluster(seg_start, seg_end, type)
2734: u_int64_t seg_start, seg_end;
2735: u_int32_t type;
2736: {
2737: extern struct extent *iomem_ex;
1.492 kanaoka 2738: int i;
1.433 kanaoka 2739:
2740: if (seg_end > 0x100000000ULL) {
2741: printf("WARNING: skipping large "
2742: "memory map entry: "
2743: "0x%qx/0x%qx/0x%x\n",
2744: seg_start,
2745: (seg_end - seg_start),
2746: type);
2747: return;
2748: }
2749:
2750: /*
2751: * XXX Chop the last page off the size so that
2752: * XXX it can fit in avail_end.
2753: */
2754: if (seg_end == 0x100000000ULL)
2755: seg_end -= PAGE_SIZE;
2756:
2757: if (seg_end <= seg_start)
2758: return;
2759:
1.492 kanaoka 2760: for (i = 0; i < mem_cluster_cnt; i++) {
2761: if ((mem_clusters[i].start == round_page(seg_start))
2762: && (mem_clusters[i].size
2763: == trunc_page(seg_end) - mem_clusters[i].start)) {
2764: #ifdef DEBUG_MEMLOAD
2765: printf("WARNING: skipping duplicate segment entry\n");
2766: #endif
2767: return;
2768: }
2769: }
2770:
1.433 kanaoka 2771: /*
2772: * Allocate the physical addresses used by RAM
2773: * from the iomem extent map. This is done before
2774: * the addresses are page rounded just to make
2775: * sure we get them all.
2776: */
2777: if (extent_alloc_region(iomem_ex, seg_start,
2778: seg_end - seg_start, EX_NOWAIT)) {
2779: /* XXX What should we do? */
2780: printf("WARNING: CAN'T ALLOCATE "
2781: "MEMORY SEGMENT "
2782: "(0x%qx/0x%qx/0x%x) FROM "
2783: "IOMEM EXTENT MAP!\n",
2784: seg_start, seg_end - seg_start, type);
1.492 kanaoka 2785: return;
1.433 kanaoka 2786: }
2787:
2788: /*
2789: * If it's not free memory, skip it.
2790: */
2791: if (type != BIM_Memory)
2792: return;
2793:
2794: /* XXX XXX XXX */
2795: if (mem_cluster_cnt >= VM_PHYSSEG_MAX)
2796: panic("init386: too many memory segments");
2797:
2798: seg_start = round_page(seg_start);
2799: seg_end = trunc_page(seg_end);
2800:
2801: if (seg_start == seg_end)
2802: return;
2803:
2804: mem_clusters[mem_cluster_cnt].start = seg_start;
2805: mem_clusters[mem_cluster_cnt].size =
2806: seg_end - seg_start;
2807:
2808: if (avail_end < seg_end)
2809: avail_end = seg_end;
2810: physmem += atop(mem_clusters[mem_cluster_cnt].size);
2811: mem_cluster_cnt++;
2812: }
2813: #endif /* !defined(REALBASEMEM) && !defined(REALEXTMEM) */
2814:
1.59 mycroft 2815: void
1.484 fvdl 2816: initgdt(union descriptor *tgdt)
2817: {
2818: struct region_descriptor region;
2819: gdt = tgdt;
2820: memset(gdt, 0, NGDT*sizeof(*gdt));
2821: /* make gdt gates and memory segments */
2822: setsegment(&gdt[GCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 1);
2823: setsegment(&gdt[GDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1);
2824: setsegment(&gdt[GUCODE_SEL].sd, 0, i386_btop(VM_MAXUSER_ADDRESS) - 1,
2825: SDT_MEMERA, SEL_UPL, 1, 1);
2826: setsegment(&gdt[GUDATA_SEL].sd, 0, i386_btop(VM_MAXUSER_ADDRESS) - 1,
2827: SDT_MEMRWA, SEL_UPL, 1, 1);
2828: #ifdef COMPAT_MACH
2829: setgate(&gdt[GMACHCALLS_SEL].gd, &IDTVEC(mach_trap), 1,
1.489 fvdl 2830: SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1.484 fvdl 2831: #endif
2832: #if NBIOSCALL > 0
2833: /* bios trampoline GDT entries */
2834: setsegment(&gdt[GBIOSCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 0,
2835: 0);
2836: setsegment(&gdt[GBIOSDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 0,
2837: 0);
2838: #endif
2839: setsegment(&gdt[GCPU_SEL].sd, &cpu_info_primary,
2840: sizeof(struct cpu_info)-1, SDT_MEMRWA, SEL_KPL, 1, 1);
2841:
2842: setregion(®ion, gdt, NGDT * sizeof(gdt[0]) - 1);
2843: lgdt(®ion);
2844: }
2845:
2846: void
1.43 brezak 2847: init386(first_avail)
1.476 fvdl 2848: paddr_t first_avail;
1.2 cgd 2849: {
1.484 fvdl 2850: union descriptor *tgdt;
1.148 mycroft 2851: extern void consinit __P((void));
1.375 drochner 2852: extern struct extent *iomem_ex;
1.429 chs 2853: #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
1.401 thorpej 2854: struct btinfo_memmap *bim;
1.429 chs 2855: #endif
1.381 thorpej 2856: struct region_descriptor region;
1.401 thorpej 2857: int x, first16q;
2858: u_int64_t seg_start, seg_end;
2859: u_int64_t seg_start1, seg_end1;
1.473 tshiozak 2860: paddr_t realmode_reserved_start;
2861: psize_t realmode_reserved_size;
2862: int needs_earlier_install_pte0;
1.436 jdolecek 2863: #if NBIOSCALL > 0
2864: extern int biostramp_image_size;
2865: extern u_char biostramp_image[];
2866: #endif
1.1 cgd 2867:
1.484 fvdl 2868: cpu_probe_features(&cpu_info_primary);
2869: cpu_feature = cpu_info_primary.ci_feature_flags;
2870:
1.1 cgd 2871: proc0.p_addr = proc0paddr;
1.484 fvdl 2872: cpu_info_primary.ci_curpcb = &proc0.p_addr->u_pcb;
1.275 mycroft 2873:
1.375 drochner 2874: i386_bus_space_init();
1.84 cgd 2875: consinit(); /* XXX SHOULD NOT BE DONE HERE */
1.401 thorpej 2876: /*
2877: * Initailize PAGE_SIZE-dependent variables.
2878: */
2879: uvm_setpagesize();
2880: /*
2881: * A quick sanity check.
2882: */
2883: if (PAGE_SIZE != NBPG)
2884: panic("init386: PAGE_SIZE != NBPG");
1.450 thorpej 2885:
2886: /*
2887: * Saving SSE registers won't work if the save area isn't
2888: * 16-byte aligned.
2889: */
2890: if (offsetof(struct user, u_pcb.pcb_savefpu) & 0xf)
2891: panic("init386: pcb_savefpu not 16-byte aligned");
1.440 thorpej 2892:
2893: /*
2894: * Start with 2 color bins -- this is just a guess to get us
2895: * started. We'll recolor when we determine the largest cache
2896: * sizes on the system.
2897: */
2898: uvmexp.ncolors = 2;
1.401 thorpej 2899:
1.473 tshiozak 2900: /*
1.484 fvdl 2901: * BIOS leaves data in physical page 0
2902: * Even if it didn't, our VM system doesn't like using zero as a
2903: * physical page number.
2904: * We may also need pages in low memory (one each) for secondary CPU
2905: * startup, for BIOS calls, and for ACPI, plus a page table page to map
2906: * them into the first few pages of the kernel's pmap.
1.473 tshiozak 2907: */
2908: avail_start = PAGE_SIZE;
2909:
2910: /*
2911: * reserve memory for real-mode call
2912: */
2913: needs_earlier_install_pte0 = 0;
2914: realmode_reserved_start = 0;
2915: realmode_reserved_size = 0;
1.414 thorpej 2916: #if NBIOSCALL > 0
1.473 tshiozak 2917: /* save us a page for trampoline code */
2918: realmode_reserved_size += PAGE_SIZE;
2919: needs_earlier_install_pte0 = 1;
2920: #endif
1.484 fvdl 2921: #ifdef MULTIPROCESSOR /* XXX */
2922: KASSERT(avail_start == PAGE_SIZE); /* XXX */
2923: if (realmode_reserved_size < MP_TRAMPOLINE) /* XXX */
2924: realmode_reserved_size = MP_TRAMPOLINE; /* XXX */
2925: needs_earlier_install_pte0 = 1; /* XXX */
2926: #endif /* XXX */
1.473 tshiozak 2927: #if NACPI > 0
2928: /* trampoline code for wake handler */
2929: realmode_reserved_size += ptoa(acpi_md_get_npages_of_wakecode()+1);
2930: needs_earlier_install_pte0 = 1;
2931: #endif
2932: if (needs_earlier_install_pte0) {
2933: /* page table for directory entry 0 */
2934: realmode_reserved_size += PAGE_SIZE;
2935: }
2936: if (realmode_reserved_size>0) {
2937: realmode_reserved_start = avail_start;
2938: avail_start += realmode_reserved_size;
2939: }
1.414 thorpej 2940:
1.492 kanaoka 2941: #ifdef DEBUG_MEMLOAD
2942: printf("mem_cluster_count: %d\n", mem_cluster_cnt);
2943: #endif
2944:
1.401 thorpej 2945: /*
2946: * Call pmap initialization to make new kernel address space.
2947: * We must do this before loading pages into the VM system.
2948: */
1.314 thorpej 2949: pmap_bootstrap((vaddr_t)atdevbase + IOM_SIZE);
1.2 cgd 2950:
1.429 chs 2951: #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
1.401 thorpej 2952: /*
2953: * Check to see if we have a memory map from the BIOS (passed
2954: * to us by the boot program.
2955: */
2956: bim = lookup_bootinfo(BTINFO_MEMMAP);
1.407 enami 2957: if (bim != NULL && bim->num > 0) {
1.463 enami 2958: #ifdef DEBUG_MEMLOAD
1.401 thorpej 2959: printf("BIOS MEMORY MAP (%d ENTRIES):\n", bim->num);
2960: #endif
2961: for (x = 0; x < bim->num; x++) {
1.463 enami 2962: #ifdef DEBUG_MEMLOAD
1.401 thorpej 2963: printf(" addr 0x%qx size 0x%qx type 0x%x\n",
2964: bim->entry[x].addr,
2965: bim->entry[x].size,
2966: bim->entry[x].type);
2967: #endif
2968:
2969: /*
2970: * If the segment is not memory, skip it.
2971: */
2972: switch (bim->entry[x].type) {
2973: case BIM_Memory:
2974: case BIM_ACPI:
2975: case BIM_NVS:
2976: break;
2977: default:
2978: continue;
2979: }
2980:
2981: /*
2982: * Sanity check the entry.
2983: * XXX Need to handle uint64_t in extent code
2984: * XXX and 64-bit physical addresses in i386
2985: * XXX port.
2986: */
2987: seg_start = bim->entry[x].addr;
2988: seg_end = bim->entry[x].addr + bim->entry[x].size;
2989:
1.432 kanaoka 2990: /*
1.433 kanaoka 2991: * Avoid Compatibility Holes.
2992: * XXX Holes within memory space that allow access
2993: * XXX to be directed to the PC-compatible frame buffer
1.484 fvdl 2994: * XXX (0xa0000-0xbffff),to adapter ROM space
1.433 kanaoka 2995: * XXX (0xc0000-0xdffff), and to system BIOS space
2996: * XXX (0xe0000-0xfffff).
2997: * XXX Some laptop(for example,Toshiba Satellite2550X)
2998: * XXX report this area and occurred problems,
2999: * XXX so we avoid this area.
1.432 kanaoka 3000: */
1.433 kanaoka 3001: if (seg_start < 0x100000 && seg_end > 0xa0000) {
3002: printf("WARNING: memory map entry overlaps "
3003: "with ``Compatibility Holes'': "
3004: "0x%qx/0x%qx/0x%x\n", seg_start,
3005: seg_end - seg_start, bim->entry[x].type);
3006: add_mem_cluster(seg_start, 0xa0000,
3007: bim->entry[x].type);
3008: add_mem_cluster(0x100000, seg_end,
1.401 thorpej 3009: bim->entry[x].type);
1.433 kanaoka 3010: } else
3011: add_mem_cluster(seg_start, seg_end,
1.401 thorpej 3012: bim->entry[x].type);
3013: }
1.421 aymeric 3014: }
1.429 chs 3015: #endif /* ! REALBASEMEM && ! REALEXTMEM */
1.421 aymeric 3016: /*
3017: * If the loop above didn't find any valid segment, fall back to
3018: * former code.
3019: */
3020: if (mem_cluster_cnt == 0) {
1.401 thorpej 3021: /*
3022: * Allocate the physical addresses used by RAM from the iomem
3023: * extent map. This is done before the addresses are
3024: * page rounded just to make sure we get them all.
3025: */
3026: if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem),
3027: EX_NOWAIT)) {
3028: /* XXX What should we do? */
3029: printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM "
3030: "IOMEM EXTENT MAP!\n");
3031: }
3032: mem_clusters[0].start = 0;
3033: mem_clusters[0].size = trunc_page(KBTOB(biosbasemem));
3034: physmem += atop(mem_clusters[0].size);
3035: if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem),
3036: EX_NOWAIT)) {
3037: /* XXX What should we do? */
3038: printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM "
3039: "IOMEM EXTENT MAP!\n");
3040: }
3041: #if NISADMA > 0
3042: /*
3043: * Some motherboards/BIOSes remap the 384K of RAM that would
3044: * normally be covered by the ISA hole to the end of memory
3045: * so that it can be used. However, on a 16M system, this
3046: * would cause bounce buffers to be allocated and used.
3047: * This is not desirable behaviour, as more than 384K of
3048: * bounce buffers might be allocated. As a work-around,
3049: * we round memory down to the nearest 1M boundary if
3050: * we're using any isadma devices and the remapped memory
3051: * is what puts us over 16M.
3052: */
3053: if (biosextmem > (15*1024) && biosextmem < (16*1024)) {
3054: char pbuf[9];
3055:
3056: format_bytes(pbuf, sizeof(pbuf),
3057: biosextmem - (15*1024));
3058: printf("Warning: ignoring %s of remapped memory\n",
3059: pbuf);
3060: biosextmem = (15*1024);
3061: }
3062: #endif
3063: mem_clusters[1].start = IOM_END;
3064: mem_clusters[1].size = trunc_page(KBTOB(biosextmem));
3065: physmem += atop(mem_clusters[1].size);
3066:
3067: mem_cluster_cnt = 2;
3068:
3069: avail_end = IOM_END + trunc_page(KBTOB(biosextmem));
3070: }
3071: /*
3072: * If we have 16M of RAM or less, just put it all on
3073: * the default free list. Otherwise, put the first
3074: * 16M of RAM on a lower priority free list (so that
3075: * all of the ISA DMA'able memory won't be eaten up
3076: * first-off).
3077: */
3078: if (avail_end <= (16 * 1024 * 1024))
3079: first16q = VM_FREELIST_DEFAULT;
3080: else
3081: first16q = VM_FREELIST_FIRST16;
3082:
3083: /* Make sure the end of the space used by the kernel is rounded. */
3084: first_avail = round_page(first_avail);
3085:
3086: /*
3087: * Now, load the memory clusters (which have already been
3088: * rounded and truncated) into the VM system.
3089: *
3090: * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL
3091: * IS LOADED AT IOM_END (1M).
3092: */
3093: for (x = 0; x < mem_cluster_cnt; x++) {
3094: seg_start = mem_clusters[x].start;
3095: seg_end = mem_clusters[x].start + mem_clusters[x].size;
3096: seg_start1 = 0;
3097: seg_end1 = 0;
3098:
3099: /*
3100: * Skip memory before our available starting point.
3101: */
3102: if (seg_end <= avail_start)
3103: continue;
3104:
3105: if (avail_start >= seg_start && avail_start < seg_end) {
3106: if (seg_start != 0)
3107: panic("init386: memory doesn't start at 0");
3108: seg_start = avail_start;
3109: if (seg_start == seg_end)
3110: continue;
3111: }
3112:
3113: /*
3114: * If this segment contains the kernel, split it
3115: * in two, around the kernel.
3116: */
3117: if (seg_start <= IOM_END && first_avail <= seg_end) {
3118: seg_start1 = first_avail;
3119: seg_end1 = seg_end;
3120: seg_end = IOM_END;
3121: }
3122:
3123: /* First hunk */
3124: if (seg_start != seg_end) {
1.480 erh 3125: if (seg_start < (16 * 1024 * 1024) &&
1.401 thorpej 3126: first16q != VM_FREELIST_DEFAULT) {
3127: u_int64_t tmp;
3128:
3129: if (seg_end > (16 * 1024 * 1024))
3130: tmp = (16 * 1024 * 1024);
3131: else
3132: tmp = seg_end;
1.492 kanaoka 3133:
3134: if (tmp != seg_start) {
1.463 enami 3135: #ifdef DEBUG_MEMLOAD
1.492 kanaoka 3136: printf("loading 0x%qx-0x%qx "
3137: "(0x%lx-0x%lx)\n",
3138: seg_start, tmp,
3139: atop(seg_start), atop(tmp));
3140: #endif
3141: uvm_page_physload(atop(seg_start),
3142: atop(tmp), atop(seg_start),
3143: atop(tmp), first16q);
3144: }
1.401 thorpej 3145: seg_start = tmp;
3146: }
1.411 enami 3147:
3148: if (seg_start != seg_end) {
1.463 enami 3149: #ifdef DEBUG_MEMLOAD
1.411 enami 3150: printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
3151: seg_start, seg_end,
3152: atop(seg_start), atop(seg_end));
3153: #endif
3154: uvm_page_physload(atop(seg_start),
3155: atop(seg_end), atop(seg_start),
3156: atop(seg_end), VM_FREELIST_DEFAULT);
3157: }
1.401 thorpej 3158: }
3159:
3160: /* Second hunk */
3161: if (seg_start1 != seg_end1) {
1.480 erh 3162: if (seg_start1 < (16 * 1024 * 1024) &&
1.401 thorpej 3163: first16q != VM_FREELIST_DEFAULT) {
3164: u_int64_t tmp;
3165:
3166: if (seg_end1 > (16 * 1024 * 1024))
3167: tmp = (16 * 1024 * 1024);
3168: else
3169: tmp = seg_end1;
1.492 kanaoka 3170:
3171: if (tmp != seg_start1) {
1.463 enami 3172: #ifdef DEBUG_MEMLOAD
1.492 kanaoka 3173: printf("loading 0x%qx-0x%qx "
3174: "(0x%lx-0x%lx)\n",
3175: seg_start1, tmp,
3176: atop(seg_start1), atop(tmp));
3177: #endif
3178: uvm_page_physload(atop(seg_start1),
3179: atop(tmp), atop(seg_start1),
3180: atop(tmp), first16q);
3181: }
1.401 thorpej 3182: seg_start1 = tmp;
3183: }
1.412 enami 3184:
3185: if (seg_start1 != seg_end1) {
1.463 enami 3186: #ifdef DEBUG_MEMLOAD
1.412 enami 3187: printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
3188: seg_start1, seg_end1,
3189: atop(seg_start1), atop(seg_end1));
3190: #endif
3191: uvm_page_physload(atop(seg_start1),
3192: atop(seg_end1), atop(seg_start1),
3193: atop(seg_end1), VM_FREELIST_DEFAULT);
3194: }
1.401 thorpej 3195: }
3196: }
3197:
3198: /*
3199: * Steal memory for the message buffer (at end of core).
3200: */
3201: {
3202: struct vm_physseg *vps;
3203: psize_t sz = round_page(MSGBUFSIZE);
3204: psize_t reqsz = sz;
3205:
3206: for (x = 0; x < vm_nphysseg; x++) {
3207: vps = &vm_physmem[x];
3208: if (ptoa(vps->avail_end) == avail_end)
3209: break;
3210: }
3211: if (x == vm_nphysseg)
1.410 christos 3212: panic("init386: can't find end of memory");
1.401 thorpej 3213:
3214: /* Shrink so it'll fit in the last segment. */
3215: if ((vps->avail_end - vps->avail_start) < atop(sz))
3216: sz = ptoa(vps->avail_end - vps->avail_start);
3217:
3218: vps->avail_end -= atop(sz);
3219: vps->end -= atop(sz);
3220: msgbuf_paddr = ptoa(vps->avail_end);
3221:
3222: /* Remove the last segment if it now has no pages. */
3223: if (vps->start == vps->end) {
3224: for (vm_nphysseg--; x < vm_nphysseg; x++)
3225: vm_physmem[x] = vm_physmem[x + 1];
3226: }
3227:
3228: /* Now find where the new avail_end is. */
3229: for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
3230: if (vm_physmem[x].avail_end > avail_end)
3231: avail_end = vm_physmem[x].avail_end;
3232: avail_end = ptoa(avail_end);
3233:
3234: /* Warn if the message buffer had to be shrunk. */
3235: if (sz != reqsz)
3236: printf("WARNING: %ld bytes not available for msgbuf "
3237: "in last cluster (%ld used)\n", reqsz, sz);
3238: }
3239:
1.473 tshiozak 3240: /*
3241: * install PT page for the first 4M if needed.
3242: */
3243: if (needs_earlier_install_pte0) {
3244: paddr_t paddr;
3245: #ifdef DIAGNOSTIC
3246: if (realmode_reserved_size < PAGE_SIZE) {
3247: panic("cannot steal memory for first 4M PT page.");
3248: }
3249: #endif
3250: paddr=realmode_reserved_start+realmode_reserved_size-PAGE_SIZE;
3251: pmap_enter(pmap_kernel(), (vaddr_t)vtopte(0), paddr,
3252: VM_PROT_READ|VM_PROT_WRITE,
3253: PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE);
3254: pmap_update(pmap_kernel());
3255: /* make sure it is clean before using */
3256: memset(vtopte(0), 0, PAGE_SIZE);
3257: realmode_reserved_size -= PAGE_SIZE;
3258: }
3259:
1.295 drochner 3260: #if NBIOSCALL > 0
1.436 jdolecek 3261: /*
3262: * this should be caught at kernel build time, but put it here
3263: * in case someone tries to fake it out...
3264: */
3265: #ifdef DIAGNOSTIC
1.473 tshiozak 3266: if (realmode_reserved_start > BIOSTRAMP_BASE ||
1.474 tron 3267: (realmode_reserved_start+realmode_reserved_size) < (BIOSTRAMP_BASE+
1.473 tshiozak 3268: PAGE_SIZE)) {
3269: panic("cannot steal memory for PT page of bioscall.");
3270: }
1.436 jdolecek 3271: if (biostramp_image_size > PAGE_SIZE)
1.483 provos 3272: panic("biostramp_image_size too big: %x vs. %x",
1.436 jdolecek 3273: biostramp_image_size, PAGE_SIZE);
3274: #endif
3275: pmap_kenter_pa((vaddr_t)BIOSTRAMP_BASE, /* virtual */
3276: (paddr_t)BIOSTRAMP_BASE, /* physical */
3277: VM_PROT_ALL); /* protection */
1.456 chris 3278: pmap_update(pmap_kernel());
1.436 jdolecek 3279: memcpy((caddr_t)BIOSTRAMP_BASE, biostramp_image, biostramp_image_size);
3280: #ifdef DEBUG_BIOSCALL
3281: printf("biostramp installed @ %x\n", BIOSTRAMP_BASE);
3282: #endif
1.473 tshiozak 3283: realmode_reserved_size -= PAGE_SIZE;
3284: realmode_reserved_start += PAGE_SIZE;
3285: #endif
3286:
3287: #if NACPI > 0
3288: /*
3289: * Steal memory for the acpi wake code
3290: */
3291: {
3292: paddr_t paddr, p;
3293: psize_t sz;
3294: int npg;
3295:
3296: paddr = realmode_reserved_start;
3297: npg = acpi_md_get_npages_of_wakecode();
3298: sz = ptoa(npg);
3299: #ifdef DIAGNOSTIC
3300: if (realmode_reserved_size < sz) {
3301: panic("cannot steal memory for ACPI wake code.");
3302: }
3303: #endif
3304:
3305: /* identical mapping */
3306: p = paddr;
3307: for (x=0; x<npg; x++) {
3308: printf("kenter: 0x%08X\n", (unsigned)p);
3309: pmap_kenter_pa((vaddr_t)p, p, VM_PROT_ALL);
3310: p += PAGE_SIZE;
3311: }
3312: pmap_update(pmap_kernel());
3313:
3314: acpi_md_install_wakecode(paddr);
3315:
3316: realmode_reserved_size -= sz;
3317: realmode_reserved_start += sz;
3318: }
1.295 drochner 3319: #endif
1.59 mycroft 3320:
1.346 mycroft 3321: pmap_enter(pmap_kernel(), idt_vaddr, idt_paddr,
1.367 thorpej 3322: VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE);
1.456 chris 3323: pmap_update(pmap_kernel());
1.484 fvdl 3324: memset((void *)idt_vaddr, 0, PAGE_SIZE);
3325:
1.275 mycroft 3326: idt = (union descriptor *)idt_vaddr;
3327: #ifdef I586_CPU
1.346 mycroft 3328: pmap_enter(pmap_kernel(), pentium_idt_vaddr, idt_paddr,
1.367 thorpej 3329: VM_PROT_READ, PMAP_WIRED|VM_PROT_READ);
1.275 mycroft 3330: pentium_idt = (union descriptor *)pentium_idt_vaddr;
3331: #endif
1.484 fvdl 3332: pmap_update(pmap_kernel());
3333:
3334: tgdt = gdt;
1.275 mycroft 3335: gdt = idt + NIDT;
3336: ldt = gdt + NGDT;
3337:
1.484 fvdl 3338: memcpy(gdt, tgdt, NGDT*sizeof(*gdt));
1.275 mycroft 3339:
3340: setsegment(&gdt[GLDT_SEL].sd, ldt, NLDT * sizeof(ldt[0]) - 1,
3341: SDT_SYSLDT, SEL_KPL, 0, 0);
3342:
3343: /* make ldt gates and memory segments */
3344: setgate(&ldt[LSYS5CALLS_SEL].gd, &IDTVEC(osyscall), 1,
1.489 fvdl 3345: SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1.447 christos 3346:
1.275 mycroft 3347: ldt[LUCODE_SEL] = gdt[GUCODE_SEL];
3348: ldt[LUDATA_SEL] = gdt[GUDATA_SEL];
1.324 christos 3349: ldt[LSOL26CALLS_SEL] = ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
1.275 mycroft 3350:
3351: /* exceptions */
3352: for (x = 0; x < 32; x++)
3353: setgate(&idt[x].gd, IDTVEC(exceptions)[x], 0, SDT_SYS386TGT,
1.489 fvdl 3354: (x == 3 || x == 4) ? SEL_UPL : SEL_KPL,
3355: GSEL(GCODE_SEL, SEL_KPL));
1.257 thorpej 3356:
1.275 mycroft 3357: /* new-style interrupt gate for syscalls */
1.489 fvdl 3358: setgate(&idt[128].gd, &IDTVEC(syscall), 0, SDT_SYS386TGT, SEL_UPL,
3359: GSEL(GCODE_SEL, SEL_KPL));
1.333 christos 3360: #ifdef COMPAT_SVR4
3361: setgate(&idt[0xd2].gd, &IDTVEC(svr4_fasttrap), 0, SDT_SYS386TGT,
1.489 fvdl 3362: SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1.333 christos 3363: #endif /* COMPAT_SVR4 */
1.264 mycroft 3364:
1.275 mycroft 3365: setregion(®ion, gdt, NGDT * sizeof(gdt[0]) - 1);
3366: lgdt(®ion);
1.484 fvdl 3367:
3368: cpu_init_idt();
1.264 mycroft 3369:
1.190 mycroft 3370: #ifdef DDB
1.308 tv 3371: {
3372: extern int end;
3373: extern int *esym;
1.336 christos 3374: struct btinfo_symtab *symtab;
1.308 tv 3375:
1.484 fvdl 3376: db_machine_init();
3377:
1.336 christos 3378: symtab = lookup_bootinfo(BTINFO_SYMTAB);
1.484 fvdl 3379:
1.336 christos 3380: if (symtab) {
3381: symtab->ssym += KERNBASE;
3382: symtab->esym += KERNBASE;
3383: ddb_init(symtab->nsym, (int *)symtab->ssym,
3384: (int *)symtab->esym);
3385: }
3386: else
3387: ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
1.308 tv 3388: }
1.190 mycroft 3389: if (boothowto & RB_KDB)
3390: Debugger();
1.377 ws 3391: #endif
3392: #ifdef IPKDB
3393: ipkdb_init();
3394: if (boothowto & RB_KDB)
3395: ipkdb_connect(0);
1.190 mycroft 3396: #endif
3397: #ifdef KGDB
1.243 drochner 3398: kgdb_port_init();
1.235 thorpej 3399: if (boothowto & RB_KDB) {
3400: kgdb_debug_init = 1;
1.242 drochner 3401: kgdb_connect(1);
1.235 thorpej 3402: }
1.384 jdolecek 3403: #endif
3404:
3405: #if NMCA > 0
3406: /* check for MCA bus, needed to be done before ISA stuff - if
3407: * MCA is detected, ISA needs to use level triggered interrupts
3408: * by default */
3409: mca_busprobe();
1.190 mycroft 3410: #endif
1.275 mycroft 3411:
3412: #if NISA > 0
3413: isa_defaultirq();
3414: #endif
1.431 thorpej 3415:
3416: /* Initialize software interrupts. */
3417: softintr_init();
1.275 mycroft 3418:
1.484 fvdl 3419: splraise(IPL_SERIAL); /* XXX MP clean me */
1.275 mycroft 3420: enable_intr();
3421:
3422: if (physmem < btoc(2 * 1024 * 1024)) {
3423: printf("warning: too little memory available; "
1.383 mycroft 3424: "have %lu bytes, want %lu bytes\n"
1.275 mycroft 3425: "running in degraded mode\n"
3426: "press a key to confirm\n\n",
1.383 mycroft 3427: ptoa(physmem), 2*1024*1024UL);
1.275 mycroft 3428: cngetc();
3429: }
1.1 cgd 3430: }
3431:
1.107 deraadt 3432: #ifdef COMPAT_NOMID
3433: static int
3434: exec_nomid(p, epp)
1.59 mycroft 3435: struct proc *p;
3436: struct exec_package *epp;
1.31 cgd 3437: {
1.59 mycroft 3438: int error;
3439: u_long midmag, magic;
3440: u_short mid;
1.80 cgd 3441: struct exec *execp = epp->ep_hdr;
1.31 cgd 3442:
1.80 cgd 3443: /* check on validity of epp->ep_hdr performed by exec_out_makecmds */
3444:
3445: midmag = ntohl(execp->a_midmag);
1.59 mycroft 3446: mid = (midmag >> 16) & 0xffff;
3447: magic = midmag & 0xffff;
3448:
3449: if (magic == 0) {
1.80 cgd 3450: magic = (execp->a_midmag & 0xffff);
1.59 mycroft 3451: mid = MID_ZERO;
3452: }
3453:
3454: midmag = mid << 16 | magic;
3455:
3456: switch (midmag) {
3457: case (MID_ZERO << 16) | ZMAGIC:
3458: /*
3459: * 386BSD's ZMAGIC format:
3460: */
1.202 christos 3461: error = exec_aout_prep_oldzmagic(p, epp);
1.59 mycroft 3462: break;
3463:
3464: case (MID_ZERO << 16) | QMAGIC:
3465: /*
3466: * BSDI's QMAGIC format:
3467: * same as new ZMAGIC format, but with different magic number
3468: */
3469: error = exec_aout_prep_zmagic(p, epp);
3470: break;
3471:
1.202 christos 3472: case (MID_ZERO << 16) | NMAGIC:
3473: /*
3474: * BSDI's NMAGIC format:
3475: * same as NMAGIC format, but with different magic number
3476: * and with text starting at 0.
3477: */
3478: error = exec_aout_prep_oldnmagic(p, epp);
3479: break;
3480:
3481: case (MID_ZERO << 16) | OMAGIC:
3482: /*
3483: * BSDI's OMAGIC format:
3484: * same as OMAGIC format, but with different magic number
3485: * and with text starting at 0.
3486: */
3487: error = exec_aout_prep_oldomagic(p, epp);
3488: break;
3489:
1.59 mycroft 3490: default:
3491: error = ENOEXEC;
3492: }
3493:
3494: return error;
1.107 deraadt 3495: }
1.31 cgd 3496: #endif
1.107 deraadt 3497:
3498: /*
3499: * cpu_exec_aout_makecmds():
3500: * cpu-dependent a.out format hook for execve().
3501: *
3502: * Determine of the given exec package refers to something which we
3503: * understand and, if so, set up the vmcmds for it.
3504: *
3505: * On the i386, old (386bsd) ZMAGIC binaries and BSDI QMAGIC binaries
3506: * if COMPAT_NOMID is given as a kernel option.
3507: */
3508: int
3509: cpu_exec_aout_makecmds(p, epp)
3510: struct proc *p;
3511: struct exec_package *epp;
3512: {
3513: int error = ENOEXEC;
3514:
3515: #ifdef COMPAT_NOMID
3516: if ((error = exec_nomid(p, epp)) == 0)
3517: return error;
3518: #endif /* ! COMPAT_NOMID */
3519:
3520: return error;
1.31 cgd 3521: }
1.84 cgd 3522:
1.255 drochner 3523: void *
3524: lookup_bootinfo(type)
3525: int type;
3526: {
3527: struct btinfo_common *help;
3528: int n = *(int*)bootinfo;
3529: help = (struct btinfo_common *)(bootinfo + sizeof(int));
3530: while(n--) {
3531: if(help->type == type)
3532: return(help);
3533: help = (struct btinfo_common *)((char*)help + help->len);
3534: }
3535: return(0);
3536: }
1.149 mycroft 3537:
1.484 fvdl 3538: #include <dev/ic/mc146818reg.h> /* for NVRAM POST */
3539: #include <i386/isa/nvram.h> /* for NVRAM POST */
3540:
1.149 mycroft 3541: void
3542: cpu_reset()
3543: {
3544:
1.224 mycroft 3545: disable_intr();
3546:
1.227 mycroft 3547: /*
1.484 fvdl 3548: * Ensure the NVRAM reset byte contains something vaguely sane.
3549: */
3550:
3551: outb(IO_RTC, NVRAM_RESET);
3552: outb(IO_RTC+1, NVRAM_RESET_RST);
3553:
3554: /*
1.227 mycroft 3555: * The keyboard controller has 4 random output pins, one of which is
3556: * connected to the RESET pin on the CPU in many PCs. We tell the
3557: * keyboard controller to pulse this line a couple of times.
3558: */
1.273 drochner 3559: outb(IO_KBD + KBCMDP, KBC_PULSE0);
1.226 mycroft 3560: delay(100000);
1.273 drochner 3561: outb(IO_KBD + KBCMDP, KBC_PULSE0);
1.226 mycroft 3562: delay(100000);
1.149 mycroft 3563:
3564: /*
1.224 mycroft 3565: * Try to cause a triple fault and watchdog reset by making the IDT
3566: * invalid and causing a fault.
1.149 mycroft 3567: */
1.313 perry 3568: memset((caddr_t)idt, 0, NIDT * sizeof(idt[0]));
1.484 fvdl 3569: __asm __volatile("divl %0,%1" : : "q" (0), "a" (0));
1.149 mycroft 3570:
1.224 mycroft 3571: #if 0
1.149 mycroft 3572: /*
3573: * Try to cause a triple fault and watchdog reset by unmapping the
1.224 mycroft 3574: * entire address space and doing a TLB flush.
1.149 mycroft 3575: */
1.414 thorpej 3576: memset((caddr_t)PTD, 0, PAGE_SIZE);
1.484 fvdl 3577: tlbflush();
1.224 mycroft 3578: #endif
1.149 mycroft 3579:
3580: for (;;);
1.45 cgd 3581: }
1.484 fvdl 3582:
3583: void cpu_initclocks()
3584: {
3585: (*initclock_func)();
3586: }
3587:
3588: #ifdef MULTIPROCESSOR
3589: void need_resched(struct cpu_info *ci)
3590: {
3591: ci->ci_want_resched = 1;
3592: aston(ci);
3593: }
3594: #endif
3595:
3596: /*
3597: * Allocate an IDT vector slot within the given range.
3598: * XXX needs locking to avoid MP allocation races.
3599: */
3600:
3601: int
3602: idt_vec_alloc (low, high)
3603: int low;
3604: int high;
3605: {
3606: int vec;
3607:
3608: for (vec=low; vec<=high; vec++)
3609: if (idt[vec].gd.gd_p == 0)
3610: return vec;
3611: return 0;
3612: }
3613:
3614: void idt_vec_set (vec, function)
3615: int vec;
3616: void (*function) __P((void));
3617: {
1.489 fvdl 3618: setgate(&idt[vec].gd, function, 0, SDT_SYS386IGT, SEL_KPL,
3619: GSEL(GCODE_SEL, SEL_KPL));
1.484 fvdl 3620: }
3621:
3622: void
3623: idt_vec_free (vec)
3624: int vec;
3625: {
3626: unsetgate(&idt[vec].gd);
3627: }
3628:
3629: #if 0
3630: extern void xxx_lapic_time(void);
3631:
3632: void
3633: xxx_lapic_time(void)
3634: {
3635: uint64_t before, after;
3636: int i, sum;
3637:
3638: before=rdtsc();
3639: for (i=0; i<1000000; i++) {
3640: sum += lapic_tpr;
3641: }
3642: after=rdtsc();
3643: printf("1000000 lapic_tpr reads: %llx\n", after-before);
3644: }
3645: #endif
CVSweb <webmaster@jp.NetBSD.org>