Annotation of src/sys/arch/i386/i386/machdep.c, Revision 1.441
1.441 ! thorpej 1: /* $NetBSD: machdep.c,v 1.440 2001/05/03 16:55:32 thorpej Exp $ */
1.231 thorpej 2:
3: /*-
1.401 thorpej 4: * Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
1.231 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.316 mycroft 8: * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.231 thorpej 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.125 cgd 39:
1.1 cgd 40: /*-
41: * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
42: * All rights reserved.
43: *
44: * This code is derived from software contributed to Berkeley by
45: * William Jolitz.
46: *
47: * Redistribution and use in source and binary forms, with or without
48: * modification, are permitted provided that the following conditions
49: * are met:
50: * 1. Redistributions of source code must retain the above copyright
51: * notice, this list of conditions and the following disclaimer.
52: * 2. Redistributions in binary form must reproduce the above copyright
53: * notice, this list of conditions and the following disclaimer in the
54: * documentation and/or other materials provided with the distribution.
55: * 3. All advertising materials mentioning features or use of this software
56: * must display the following acknowledgement:
57: * This product includes software developed by the University of
58: * California, Berkeley and its contributors.
59: * 4. Neither the name of the University nor the names of its contributors
60: * may be used to endorse or promote products derived from this software
61: * without specific prior written permission.
62: *
63: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73: * SUCH DAMAGE.
74: *
1.125 cgd 75: * @(#)machdep.c 7.4 (Berkeley) 6/3/91
1.1 cgd 76: */
1.271 thorpej 77:
78: #include "opt_cputype.h"
1.309 jonathan 79: #include "opt_ddb.h"
1.377 ws 80: #include "opt_ipkdb.h"
1.272 thorpej 81: #include "opt_vm86.h"
1.274 thorpej 82: #include "opt_user_ldt.h"
1.310 jonathan 83: #include "opt_compat_netbsd.h"
1.327 bouyer 84: #include "opt_cpureset_delay.h"
1.333 christos 85: #include "opt_compat_svr4.h"
1.429 chs 86: #include "opt_realmem.h"
1.1 cgd 87:
1.59 mycroft 88: #include <sys/param.h>
89: #include <sys/systm.h>
90: #include <sys/signalvar.h>
91: #include <sys/kernel.h>
92: #include <sys/map.h>
93: #include <sys/proc.h>
94: #include <sys/user.h>
95: #include <sys/exec.h>
96: #include <sys/buf.h>
97: #include <sys/reboot.h>
98: #include <sys/conf.h>
99: #include <sys/file.h>
100: #include <sys/malloc.h>
101: #include <sys/mbuf.h>
102: #include <sys/msgbuf.h>
103: #include <sys/mount.h>
104: #include <sys/vnode.h>
1.204 thorpej 105: #include <sys/extent.h>
1.123 cgd 106: #include <sys/syscallargs.h>
1.291 thorpej 107: #include <sys/core.h>
108: #include <sys/kcore.h>
109: #include <machine/kcore.h>
1.57 cgd 110:
1.377 ws 111: #ifdef IPKDB
112: #include <ipkdb/ipkdb.h>
113: #endif
114:
1.235 thorpej 115: #ifdef KGDB
116: #include <sys/kgdb.h>
117: #endif
118:
1.104 cgd 119: #include <dev/cons.h>
1.390 mrg 120:
121: #include <uvm/uvm_extern.h>
1.393 fvdl 122: #include <uvm/uvm_page.h>
1.284 mrg 123:
1.200 christos 124: #include <sys/sysctl.h>
125:
1.59 mycroft 126: #include <machine/cpu.h>
127: #include <machine/cpufunc.h>
1.178 mycroft 128: #include <machine/gdt.h>
1.149 mycroft 129: #include <machine/pio.h>
1.59 mycroft 130: #include <machine/psl.h>
131: #include <machine/reg.h>
132: #include <machine/specialreg.h>
1.255 drochner 133: #include <machine/bootinfo.h>
1.43 brezak 134:
1.146 cgd 135: #include <dev/isa/isareg.h>
1.372 drochner 136: #include <machine/isa_machdep.h>
1.164 cgd 137: #include <dev/ic/i8042reg.h>
1.43 brezak 138:
1.200 christos 139: #ifdef DDB
140: #include <machine/db_machdep.h>
141: #include <ddb/db_extern.h>
142: #endif
143:
1.184 mycroft 144: #ifdef VM86
145: #include <machine/vm86.h>
146: #endif
147:
1.207 jtk 148: #include "apm.h"
1.258 jtk 149: #include "bioscall.h"
1.207 jtk 150:
1.259 jtk 151: #if NBIOSCALL > 0
152: #include <machine/bioscall.h>
153: #endif
154:
1.207 jtk 155: #if NAPM > 0
156: #include <machine/apmvar.h>
1.258 jtk 157: #endif
158:
1.59 mycroft 159: #include "isa.h"
1.231 thorpej 160: #include "isadma.h"
1.59 mycroft 161: #include "npx.h"
1.161 mycroft 162: #if NNPX > 0
163: extern struct proc *npxproc;
164: #endif
1.2 cgd 165:
1.384 jdolecek 166: #include "mca.h"
167: #if NMCA > 0
168: #include <machine/mca_machdep.h> /* for mca_busprobe() */
169: #endif
170:
1.104 cgd 171: /* the following is used externally (sysctl_hw) */
172: char machine[] = "i386"; /* cpu "architecture" */
1.232 veego 173: char machine_arch[] = "i386"; /* machine == machine_arch */
1.104 cgd 174:
1.402 explorer 175: u_int cpu_serial[3];
176:
1.255 drochner 177: char bootinfo[BOOTINFO_MAXSIZE];
1.386 thorpej 178:
179: /* Our exported CPU info; we have only one right now. */
180: struct cpu_info cpu_info_store;
1.255 drochner 181:
1.343 fvdl 182: struct bi_devmatch *i386_alldisks = NULL;
183: int i386_ndisks = 0;
1.342 fvdl 184:
1.328 bouyer 185: #ifdef CPURESET_DELAY
186: int cpureset_delay = CPURESET_DELAY;
187: #else
188: int cpureset_delay = 2000; /* default to 2s */
189: #endif
190:
1.1 cgd 191:
1.59 mycroft 192: int physmem;
1.163 cgd 193: int dumpmem_low;
194: int dumpmem_high;
1.59 mycroft 195: int boothowto;
196: int cpu_class;
1.428 fvdl 197: int i386_fpu_present;
198: int i386_fpu_exception;
199: int i386_fpu_fdivbug;
1.59 mycroft 200:
1.379 jdolecek 201: #define CPUID2MODEL(cpuid) (((cpuid) >> 4) & 15)
202:
1.314 thorpej 203: vaddr_t msgbuf_vaddr;
204: paddr_t msgbuf_paddr;
205:
206: vaddr_t idt_vaddr;
207: paddr_t idt_paddr;
208:
1.264 mycroft 209: #ifdef I586_CPU
1.314 thorpej 210: vaddr_t pentium_idt_vaddr;
1.264 mycroft 211: #endif
1.59 mycroft 212:
1.284 mrg 213: vm_map_t exec_map = NULL;
214: vm_map_t mb_map = NULL;
215: vm_map_t phys_map = NULL;
1.48 brezak 216:
1.314 thorpej 217: extern paddr_t avail_start, avail_end;
1.1 cgd 218:
1.204 thorpej 219: /*
1.291 thorpej 220: * Size of memory segments, before any memory is stolen.
221: */
222: phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
223: int mem_cluster_cnt;
224:
1.408 thorpej 225: /*
226: * The number of CPU cycles in one second.
227: */
228: u_int64_t cpu_tsc_freq;
229:
1.291 thorpej 230: int cpu_dump __P((void));
231: int cpu_dumpsize __P((void));
232: u_long cpu_dump_mempagecnt __P((void));
1.200 christos 233: void dumpsys __P((void));
1.437 thorpej 234: void identifycpu __P((struct cpu_info *));
1.314 thorpej 235: void init386 __P((paddr_t));
1.255 drochner 236:
1.433 kanaoka 237: #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
238: void add_mem_cluster __P((u_int64_t, u_int64_t, u_int32_t));
239: #endif /* !defnied(REALBASEMEM) && !defined(REALEXTMEM) */
240:
1.417 jdolecek 241: /*
242: * Map Brand ID from cpuid instruction to brand name.
243: * Source: Intel Processor Identification and the CPUID Instruction, AP-485
244: */
245: const char * const i386_p3_brand[] = {
1.419 jdolecek 246: "", /* Unsupported */
1.417 jdolecek 247: "Celeron", /* Intel (R) Celeron (TM) processor */
248: "", /* Intel (R) Pentium (R) III processor */
249: "Xeon", /* Intel (R) Pentium (R) III Xeon (TM) processor */
250: };
251:
1.200 christos 252: #ifdef COMPAT_NOMID
253: static int exec_nomid __P((struct proc *, struct exec_package *));
254: #endif
1.59 mycroft 255:
1.267 bouyer 256: void cyrix6x86_cpu_setup __P((void));
1.379 jdolecek 257: void winchip_cpu_setup __P((void));
1.441 ! thorpej 258: void amd_family5_setup __P((void));
1.267 bouyer 259:
1.437 thorpej 260: void intel_cpuid_cpu_cacheinfo __P((struct cpu_info *));
1.438 thorpej 261: void amd_cpuid_cpu_cacheinfo __P((struct cpu_info *));
1.437 thorpej 262:
1.267 bouyer 263: static __inline u_char
264: cyrix_read_reg(u_char reg)
265: {
266: outb(0x22, reg);
267: return inb(0x23);
268: }
269:
270: static __inline void
271: cyrix_write_reg(u_char reg, u_char data)
272: {
273: outb(0x22, reg);
274: outb(0x23, data);
275: }
276:
1.59 mycroft 277: /*
278: * Machine-dependent startup code
279: */
1.32 andrew 280: void
1.1 cgd 281: cpu_startup()
282: {
1.437 thorpej 283: struct cpu_info *ci = curcpu();
1.59 mycroft 284: caddr_t v;
1.349 thorpej 285: int sz, x;
1.314 thorpej 286: vaddr_t minaddr, maxaddr;
287: vsize_t size;
1.424 enami 288: char buf[160]; /* about 2 line */
1.354 lukem 289: char pbuf[9];
1.438 thorpej 290: char cbuf[7];
1.440 thorpej 291: int bigcache, cachesize;
1.1 cgd 292:
1.284 mrg 293: /*
294: * Initialize error message buffer (et end of core).
295: */
1.354 lukem 296: msgbuf_vaddr = uvm_km_valloc(kernel_map, i386_round_page(MSGBUFSIZE));
1.385 thorpej 297: if (msgbuf_vaddr == 0)
1.284 mrg 298: panic("failed to valloc msgbuf_vaddr");
1.359 thorpej 299:
1.284 mrg 300: /* msgbuf_paddr was init'd in pmap */
301: for (x = 0; x < btoc(MSGBUFSIZE); x++)
1.414 thorpej 302: pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE,
303: msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
1.435 thorpej 304: pmap_update();
1.359 thorpej 305:
1.284 mrg 306: initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
307:
1.392 sommerfe 308: printf("%s", version);
1.393 fvdl 309:
1.408 thorpej 310: printf("cpu0: %s", cpu_model);
311: if (cpu_tsc_freq != 0)
312: printf(", %qd.%02qd MHz", (cpu_tsc_freq + 4999) / 1000000,
313: ((cpu_tsc_freq + 4999) / 10000) % 100);
314: printf("\n");
1.440 thorpej 315:
316: bigcache = cachesize = 0;
317:
1.438 thorpej 318: if (ci->ci_cinfo[CAI_ICACHE].cai_totalsize != 0 ||
319: ci->ci_cinfo[CAI_DCACHE].cai_totalsize != 0) {
1.397 thorpej 320: printf("cpu0:");
1.438 thorpej 321: if (ci->ci_cinfo[CAI_ICACHE].cai_totalsize) {
1.440 thorpej 322: cachesize = ci->ci_cinfo[CAI_ICACHE].cai_totalsize;
1.438 thorpej 323: format_bytes(cbuf, sizeof(cbuf),
324: ci->ci_cinfo[CAI_ICACHE].cai_totalsize);
325: printf(" I-cache %s %db/line ", cbuf,
326: ci->ci_cinfo[CAI_ICACHE].cai_linesize);
327: switch (ci->ci_cinfo[CAI_ICACHE].cai_associativity) {
328: case 0:
329: printf("disabled");
1.440 thorpej 330: cachesize = 0;
1.438 thorpej 331: break;
332: case 1:
333: printf("direct-mapped");
334: break;
335: case ~0:
336: printf("fully associative");
1.440 thorpej 337: /* XXX Don't need any color bins? */
1.438 thorpej 338: break;
339: default:
340: printf("%d-way",
341: ci->ci_cinfo[CAI_ICACHE].cai_associativity);
1.440 thorpej 342: cachesize /=
343: ci->ci_cinfo[CAI_ICACHE].cai_associativity;
1.438 thorpej 344: break;
345: }
346: }
1.440 thorpej 347: if (cachesize > bigcache)
348: bigcache = cachesize;
349:
1.438 thorpej 350: if (ci->ci_cinfo[CAI_DCACHE].cai_totalsize) {
1.440 thorpej 351: cachesize = ci->ci_cinfo[CAI_DCACHE].cai_totalsize;
1.438 thorpej 352: format_bytes(cbuf, sizeof(cbuf),
353: ci->ci_cinfo[CAI_DCACHE].cai_totalsize);
354: printf("%sD-cache %s %db/line ",
355: (ci->ci_cinfo[CAI_ICACHE].cai_totalsize != 0) ?
356: ", " : " ",
357: cbuf, ci->ci_cinfo[CAI_DCACHE].cai_linesize);
358: switch (ci->ci_cinfo[CAI_DCACHE].cai_associativity) {
359: case 0:
360: printf("disabled");
1.440 thorpej 361: cachesize = 0;
1.438 thorpej 362: break;
363: case 1:
364: printf("direct-mapped");
365: break;
366: case ~0:
367: printf("fully associative");
1.440 thorpej 368: /* XXX Don't need any color bins? */
1.438 thorpej 369: break;
370: default:
371: printf("%d-way",
372: ci->ci_cinfo[CAI_DCACHE].cai_associativity);
1.440 thorpej 373: cachesize /=
374: ci->ci_cinfo[CAI_DCACHE].cai_associativity;
1.438 thorpej 375: break;
376: }
377: }
1.440 thorpej 378: if (cachesize > bigcache)
379: bigcache = cachesize;
380:
1.438 thorpej 381: printf("\n");
382: }
383: if (ci->ci_cinfo[CAI_L2CACHE].cai_totalsize) {
1.440 thorpej 384: cachesize = ci->ci_cinfo[CAI_L2CACHE].cai_totalsize;
1.438 thorpej 385: format_bytes(cbuf, sizeof(cbuf),
386: ci->ci_cinfo[CAI_L2CACHE].cai_totalsize);
387: printf("cpu0: L2 cache %s %db/line ", cbuf,
388: ci->ci_cinfo[CAI_L2CACHE].cai_linesize);
389: switch (ci->ci_cinfo[CAI_L2CACHE].cai_associativity) {
390: case 0:
391: printf("disabled");
1.440 thorpej 392: cachesize = 0;
1.438 thorpej 393: break;
394: case 1:
395: printf("direct-mapped");
396: break;
397: case ~0:
398: printf("fully associative");
1.440 thorpej 399: /* XXX Don't need any color bins? */
1.438 thorpej 400: break;
401: default:
402: printf("%d-way",
403: ci->ci_cinfo[CAI_L2CACHE].cai_associativity);
1.440 thorpej 404: cachesize /=
405: ci->ci_cinfo[CAI_L2CACHE].cai_associativity;
1.438 thorpej 406: break;
407: }
1.440 thorpej 408: if (cachesize > bigcache)
409: bigcache = cachesize;
410:
1.397 thorpej 411: printf("\n");
412: }
1.440 thorpej 413:
414: /*
415: * Know the size of the largest cache on this CPU, re-color
416: * our pages.
417: */
418: if (bigcache != 0)
419: uvm_page_recolor(atop(bigcache));
420:
1.424 enami 421: if ((cpu_feature & CPUID_MASK1) != 0) {
422: bitmask_snprintf(cpu_feature, CPUID_FLAGS1,
423: buf, sizeof(buf));
424: printf("cpu0: features %s\n", buf);
425: }
426: if ((cpu_feature & CPUID_MASK2) != 0) {
427: bitmask_snprintf(cpu_feature, CPUID_FLAGS2,
428: buf, sizeof(buf));
429: printf("cpu0: features %s\n", buf);
1.395 thorpej 430: }
1.267 bouyer 431:
1.420 jdolecek 432: if (cpuid_level >= 3 && ((cpu_feature & CPUID_PN) != 0)) {
433: printf("cpu0: serial number %04X-%04X-%04X-%04X-%04X-%04X\n",
434: cpu_serial[0] / 65536, cpu_serial[0] % 65536,
435: cpu_serial[1] / 65536, cpu_serial[1] % 65536,
436: cpu_serial[2] / 65536, cpu_serial[2] % 65536);
437: }
1.402 explorer 438:
1.382 mycroft 439: format_bytes(pbuf, sizeof(pbuf), ptoa(physmem));
1.354 lukem 440: printf("total memory = %s\n", pbuf);
1.1 cgd 441:
442: /*
1.59 mycroft 443: * Find out how much space we need, allocate it,
444: * and then give everything true virtual addresses.
1.1 cgd 445: */
1.354 lukem 446: sz = (int)allocsys(NULL, NULL);
1.284 mrg 447: if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
448: panic("startup: no room for tables");
1.354 lukem 449: if (allocsys(v, NULL) - v != sz)
1.1 cgd 450: panic("startup: table size inconsistency");
1.50 cgd 451:
1.36 cgd 452: /*
1.284 mrg 453: * Allocate virtual address space for the buffers. The area
454: * is not managed by the VM system.
1.36 cgd 455: */
456: size = MAXBSIZE * nbuf;
1.314 thorpej 457: if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
1.398 thorpej 458: NULL, UVM_UNKNOWN_OFFSET, 0,
1.284 mrg 459: UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
1.430 chs 460: UVM_ADV_NORMAL, 0)) != 0)
1.284 mrg 461: panic("cpu_startup: cannot allocate VM for buffers");
1.314 thorpej 462: minaddr = (vaddr_t)buffers;
1.54 cgd 463: if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
464: /* don't want to alloc more physical mem than needed */
465: bufpages = btoc(MAXBSIZE) * nbuf;
466: }
1.36 cgd 467:
1.268 thorpej 468: /*
469: * XXX We defer allocation of physical pages for buffers until
470: * XXX after autoconfiguration has run. We must do this because
471: * XXX on system with large amounts of memory or with large
472: * XXX user-configured buffer caches, the buffer cache will eat
473: * XXX up all of the lower 16M of RAM. This prevents ISA DMA
474: * XXX maps from allocating bounce pages.
475: *
476: * XXX Note that nothing can use buffer cache buffers until after
477: * XXX autoconfiguration completes!!
478: *
479: * XXX This is a hack, and needs to be replaced with a better
480: * XXX solution! --thorpej@netbsd.org, December 6, 1997
481: */
1.41 cgd 482:
1.1 cgd 483: /*
1.36 cgd 484: * Allocate a submap for exec arguments. This map effectively
485: * limits the number of processes exec'ing at any time.
1.1 cgd 486: */
1.284 mrg 487: exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
1.358 thorpej 488: 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
1.59 mycroft 489:
1.1 cgd 490: /*
491: * Allocate a submap for physio
492: */
1.284 mrg 493: phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
1.358 thorpej 494: VM_PHYS_SIZE, 0, FALSE, NULL);
1.1 cgd 495:
496: /*
1.229 thorpej 497: * Finally, allocate mbuf cluster submap.
1.1 cgd 498: */
1.334 thorpej 499: mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
1.358 thorpej 500: nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL);
1.1 cgd 501:
1.284 mrg 502: /*
503: * XXX Buffer cache pages haven't yet been allocated, so
504: * XXX we need to account for those pages when printing
505: * XXX the amount of free memory.
506: */
1.354 lukem 507: format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free - bufpages));
508: printf("avail memory = %s\n", pbuf);
1.414 thorpej 509: format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
1.354 lukem 510: printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
1.1 cgd 511:
1.375 drochner 512: /* Safe for i/o port / memory space allocation to use malloc now. */
513: i386_bus_space_mallocok();
1.349 thorpej 514: }
515:
516: /*
517: * Set up proc0's TSS and LDT.
518: */
519: void
520: i386_proc0_tss_ldt_init()
521: {
522: struct pcb *pcb;
523: int x;
1.268 thorpej 524:
1.326 thorpej 525: gdt_init();
526: curpcb = pcb = &proc0.p_addr->u_pcb;
527: pcb->pcb_flags = 0;
528: pcb->pcb_tss.tss_ioopt =
529: ((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss) << 16;
530: for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++)
531: pcb->pcb_iomap[x] = 0xffffffff;
532:
1.394 thorpej 533: pcb->pcb_ldt_sel = pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
1.326 thorpej 534: pcb->pcb_cr0 = rcr0();
535: pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
536: pcb->pcb_tss.tss_esp0 = (int)proc0.p_addr + USPACE - 16;
1.394 thorpej 537: tss_alloc(&proc0);
1.326 thorpej 538:
1.394 thorpej 539: ltr(proc0.p_md.md_tss_sel);
1.326 thorpej 540: lldt(pcb->pcb_ldt_sel);
541:
542: proc0.p_md.md_regs = (struct trapframe *)pcb->pcb_tss.tss_esp0 - 1;
543: }
544:
545: /*
546: * XXX Finish up the deferred buffer cache allocation and initialization.
547: */
548: void
549: i386_bufinit()
550: {
551: int i, base, residual;
552:
1.268 thorpej 553: base = bufpages / nbuf;
554: residual = bufpages % nbuf;
555: for (i = 0; i < nbuf; i++) {
1.314 thorpej 556: vsize_t curbufsize;
557: vaddr_t curbuf;
1.284 mrg 558: struct vm_page *pg;
559:
560: /*
561: * Each buffer has MAXBSIZE bytes of VM space allocated. Of
562: * that MAXBSIZE space, we allocate and map (base+1) pages
563: * for the first "residual" buffers, and then we allocate
564: * "base" pages for the rest.
565: */
1.314 thorpej 566: curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
1.414 thorpej 567: curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
1.284 mrg 568:
569: while (curbufsize) {
1.311 thorpej 570: /*
571: * Attempt to allocate buffers from the first
572: * 16M of RAM to avoid bouncing file system
573: * transfers.
574: */
1.350 chs 575: pg = uvm_pagealloc_strat(NULL, 0, NULL, 0,
1.311 thorpej 576: UVM_PGA_STRAT_FALLBACK, VM_FREELIST_FIRST16);
1.284 mrg 577: if (pg == NULL)
578: panic("cpu_startup: not enough memory for "
579: "buffer cache");
1.434 thorpej 580: pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
581: VM_PROT_READ|VM_PROT_WRITE);
1.284 mrg 582: curbuf += PAGE_SIZE;
583: curbufsize -= PAGE_SIZE;
584: }
1.268 thorpej 585: }
1.435 thorpej 586: pmap_update();
1.268 thorpej 587:
588: /*
589: * Set up buffers, so they can be used to read disk labels.
590: */
591: bufinit();
1.16 cgd 592: }
593:
1.104 cgd 594: /*
595: * Info for CTL_HW
596: */
597: char cpu_model[120];
598:
1.216 fvdl 599: /*
600: * Note: these are just the ones that may not have a cpuid instruction.
601: * We deal with the rest in a different way.
602: */
1.418 jdolecek 603: const struct cpu_nocpuid_nameclass i386_nocpuid_cpus[] = {
1.267 bouyer 604: { CPUVENDOR_INTEL, "Intel", "386SX", CPUCLASS_386,
1.437 thorpej 605: NULL, NULL}, /* CPU_386SX */
1.267 bouyer 606: { CPUVENDOR_INTEL, "Intel", "386DX", CPUCLASS_386,
1.437 thorpej 607: NULL, NULL}, /* CPU_386 */
1.267 bouyer 608: { CPUVENDOR_INTEL, "Intel", "486SX", CPUCLASS_486,
1.437 thorpej 609: NULL, NULL}, /* CPU_486SX */
1.267 bouyer 610: { CPUVENDOR_INTEL, "Intel", "486DX", CPUCLASS_486,
1.437 thorpej 611: NULL, NULL}, /* CPU_486 */
1.267 bouyer 612: { CPUVENDOR_CYRIX, "Cyrix", "486DLC", CPUCLASS_486,
1.437 thorpej 613: NULL, NULL}, /* CPU_486DLC */
614: { CPUVENDOR_CYRIX, "Cyrix", "6x86", CPUCLASS_486,
615: cyrix6x86_cpu_setup, NULL}, /* CPU_6x86 */
1.267 bouyer 616: { CPUVENDOR_NEXGEN,"NexGen","586", CPUCLASS_386,
1.437 thorpej 617: NULL, NULL}, /* CPU_NX586 */
1.216 fvdl 618: };
619:
620: const char *classnames[] = {
621: "386",
622: "486",
623: "586",
624: "686"
625: };
626:
627: const char *modifiers[] = {
628: "",
629: "OverDrive ",
630: "Dual ",
631: ""
1.18 cgd 632: };
633:
1.418 jdolecek 634: const struct cpu_cpuid_nameclass i386_cpuid_cpus[] = {
1.216 fvdl 635: {
636: "GenuineIntel",
637: CPUVENDOR_INTEL,
638: "Intel",
639: /* Family 4 */
640: { {
641: CPUCLASS_486,
642: {
1.219 perry 643: "486DX", "486DX", "486SX", "486DX2", "486SL",
1.216 fvdl 644: "486SX2", 0, "486DX2 W/B Enhanced",
645: "486DX4", 0, 0, 0, 0, 0, 0, 0,
646: "486" /* Default */
1.267 bouyer 647: },
1.437 thorpej 648: NULL,
649: intel_cpuid_cpu_cacheinfo,
1.216 fvdl 650: },
651: /* Family 5 */
652: {
653: CPUCLASS_586,
654: {
1.361 tron 655: "Pentium (P5 A-step)", "Pentium (P5)",
656: "Pentium (P54C)", "Pentium (P24T)",
657: "Pentium/MMX", "Pentium", 0,
658: "Pentium (P54C)", "Pentium/MMX (Tillamook)",
659: 0, 0, 0, 0, 0, 0, 0,
1.216 fvdl 660: "Pentium" /* Default */
1.267 bouyer 661: },
1.437 thorpej 662: NULL,
663: intel_cpuid_cpu_cacheinfo,
1.216 fvdl 664: },
665: /* Family 6 */
666: {
667: CPUCLASS_686,
668: {
1.361 tron 669: "Pentium Pro (A-step)", "Pentium Pro", 0,
670: "Pentium II (Klamath)", "Pentium Pro",
1.416 jdolecek 671: "Pentium II/Celeron (Deschutes)",
672: "Celeron (Mendocino)",
673: "Pentium III (Katmai)",
674: "Pentium III (Coppermine)",
1.419 jdolecek 675: 0, "Pentium III (Cascades)", 0, 0,
1.416 jdolecek 676: 0, 0,
1.340 fvdl 677: "Pentium Pro, II or III" /* Default */
1.267 bouyer 678: },
1.437 thorpej 679: NULL,
680: intel_cpuid_cpu_cacheinfo,
1.406 fvdl 681: },
682: /* Family > 6 */
683: {
684: CPUCLASS_686,
685: {
686: 0, 0, 0, 0, 0, 0, 0,
687: 0, 0, 0, 0, 0, 0, 0, 0, 0,
688: "Pentium 4" /* Default */
689: },
1.437 thorpej 690: NULL,
691: intel_cpuid_cpu_cacheinfo,
1.216 fvdl 692: } }
693: },
694: {
695: "AuthenticAMD",
696: CPUVENDOR_AMD,
697: "AMD",
698: /* Family 4 */
699: { {
700: CPUCLASS_486,
701: {
702: 0, 0, 0, "Am486DX2 W/T",
703: 0, 0, 0, "Am486DX2 W/B",
704: "Am486DX4 W/T or Am5x86 W/T 150",
705: "Am486DX4 W/B or Am5x86 W/B 150", 0, 0,
706: 0, 0, "Am5x86 W/T 133/160",
707: "Am5x86 W/B 133/160",
708: "Am486 or Am5x86" /* Default */
709: },
1.437 thorpej 710: NULL,
711: NULL,
1.216 fvdl 712: },
713: /* Family 5 */
714: {
715: CPUCLASS_586,
716: {
1.241 fvdl 717: "K5", "K5", "K5", "K5", 0, 0, "K6",
1.416 jdolecek 718: "K6", "K6-2", "K6-III", 0, 0, 0,
719: "K6-2+/III+", 0, 0,
1.267 bouyer 720: "K5 or K6" /* Default */
1.216 fvdl 721: },
1.441 ! thorpej 722: amd_family5_setup,
1.439 thorpej 723: amd_cpuid_cpu_cacheinfo,
1.216 fvdl 724: },
1.363 fvdl 725: /* Family 6 */
1.216 fvdl 726: {
727: CPUCLASS_686,
728: {
1.416 jdolecek 729: 0, "Athlon Model 1", "Athlon Model 2",
730: "Duron", "Athlon Model 4 (Thunderbird)",
731: 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.362 fvdl 732: "K7 (Athlon)" /* Default */
1.216 fvdl 733: },
1.437 thorpej 734: NULL,
1.439 thorpej 735: amd_cpuid_cpu_cacheinfo,
1.406 fvdl 736: },
737: /* Family > 6 */
738: {
739: CPUCLASS_686,
740: {
741: 0, 0, 0, 0, 0, 0, 0,
742: 0, 0, 0, 0, 0, 0, 0, 0, 0,
743: "Unknown K7 (Athlon)" /* Default */
744: },
1.437 thorpej 745: NULL,
1.439 thorpej 746: amd_cpuid_cpu_cacheinfo,
1.216 fvdl 747: } }
748: },
749: {
750: "CyrixInstead",
751: CPUVENDOR_CYRIX,
752: "Cyrix",
753: /* Family 4 */
754: { {
755: CPUCLASS_486,
756: {
1.376 minoura 757: 0, 0, 0,
758: "MediaGX",
759: 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.216 fvdl 760: "486" /* Default */
761: },
1.437 thorpej 762: NULL,
763: NULL,
1.216 fvdl 764: },
765: /* Family 5 */
766: {
767: CPUCLASS_586,
768: {
1.376 minoura 769: 0, 0, "6x86", 0,
770: "MMX-enhanced MediaGX (GXm)", /* or Geode? */
771: 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.216 fvdl 772: "6x86" /* Default */
1.267 bouyer 773: },
1.437 thorpej 774: cyrix6x86_cpu_setup,
775: NULL,
1.216 fvdl 776: },
1.278 bouyer 777: /* Family 6 */
1.216 fvdl 778: {
779: CPUCLASS_686,
780: {
1.278 bouyer 781: "6x86MX", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
782: "6x86MX" /* Default */
1.329 bad 783: },
1.437 thorpej 784: cyrix6x86_cpu_setup,
785: NULL,
1.406 fvdl 786: },
787: /* Family > 6 */
788: {
789: CPUCLASS_686,
790: {
791: 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
792: "Unknown 6x86MX" /* Default */
793: },
1.437 thorpej 794: NULL,
795: NULL,
1.329 bad 796: } }
797: },
798: {
799: "CentaurHauls",
800: CPUVENDOR_IDT,
801: "IDT",
802: /* Family 4, IDT never had any of these */
803: { {
804: CPUCLASS_486,
805: {
806: 0, 0, 0, 0, 0, 0, 0,
807: 0, 0, 0, 0, 0, 0, 0, 0, 0,
808: "486 compatible" /* Default */
809: },
1.437 thorpej 810: NULL,
811: NULL,
1.329 bad 812: },
813: /* Family 5 */
814: {
815: CPUCLASS_586,
816: {
817: 0, 0, 0, 0, "WinChip C6", 0, 0, 0,
1.379 jdolecek 818: "WinChip 2", "WinChip 3", 0, 0, 0, 0, 0, 0,
1.329 bad 819: "WinChip" /* Default */
820: },
1.437 thorpej 821: winchip_cpu_setup,
822: NULL,
1.329 bad 823: },
824: /* Family 6, not yet available from IDT */
1.406 fvdl 825: {
826: CPUCLASS_686,
827: {
828: 0, 0, 0, 0, 0, 0, 0,
829: 0, 0, 0, 0, 0, 0, 0, 0, 0,
830: "Pentium Pro compatible" /* Default */
831: },
1.437 thorpej 832: NULL,
833: NULL,
1.406 fvdl 834: },
835: /* Family > 6, not yet available from IDT */
1.329 bad 836: {
837: CPUCLASS_686,
838: {
839: 0, 0, 0, 0, 0, 0, 0,
840: 0, 0, 0, 0, 0, 0, 0, 0, 0,
841: "Pentium Pro compatible" /* Default */
1.267 bouyer 842: },
1.437 thorpej 843: NULL,
844: NULL,
1.216 fvdl 845: } }
846: }
847: };
848:
1.437 thorpej 849: static void
850: do_cpuid(u_int which, u_int *rv)
851: {
852: register u_int eax __asm("%eax") = which;
853:
854: __asm __volatile(
855: " cpuid ;"
856: " movl %%eax,0(%2) ;"
857: " movl %%ebx,4(%2) ;"
858: " movl %%ecx,8(%2) ;"
859: " movl %%edx,12(%2) "
860: : "=a" (eax)
861: : "0" (eax), "S" (rv)
862: : "ebx", "ecx", "edx");
863: }
864:
865: static void
866: do_cpuid_serial(u_int *serial)
867: {
868: __asm __volatile(
869: " movl $1,%%eax ;"
870: " cpuid ;"
871: " movl %%eax,0(%0) ;"
872: " movl $3,%%eax ;"
873: " cpuid ;"
874: " movl %%edx,4(%0) ;"
875: " movl %%ecx,8(%0) "
876: : /* no inputs */
877: : "S" (serial)
878: : "eax", "ebx", "ecx", "edx");
879: }
880:
1.195 mycroft 881: void
1.267 bouyer 882: cyrix6x86_cpu_setup()
883: {
884: /* set up various cyrix registers */
885: /* Enable suspend on halt */
886: cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08);
887: /* enable access to ccr4/ccr5 */
888: cyrix_write_reg(0xC3, cyrix_read_reg(0xC3) | 0x10);
889: /* cyrix's workaround for the "coma bug" */
890: cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8);
891: cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f);
892: cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff);
893: cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87);
894: /* disable access to ccr4/ccr5 */
895: cyrix_write_reg(0xC3, cyrix_read_reg(0xC3) & ~0x10);
1.393 fvdl 896:
897: /*
898: * XXX disable page zero in the idle loop, it seems to
899: * cause panics on these CPUs.
900: */
901: vm_page_zero_enable = FALSE;
1.267 bouyer 902: }
903:
904: void
1.379 jdolecek 905: winchip_cpu_setup()
906: {
1.380 jdolecek 907: #if defined(I586_CPU)
1.379 jdolecek 908: extern int cpu_id;
909:
910: switch (CPUID2MODEL(cpu_id)) { /* model */
911: case 4: /* WinChip C6 */
912: cpu_feature &= ~CPUID_TSC;
913: printf("WARNING: WinChip C6: broken TSC disabled\n");
914: }
915: #endif
1.441 ! thorpej 916: }
! 917:
! 918: void
! 919: amd_family5_setup(void)
! 920: {
! 921: extern int cpu_id;
! 922:
! 923: switch (CPUID2MODEL(cpu_id)) {
! 924: case 0: /* AMD-K5 Model 0 */
! 925: /*
! 926: * According to the AMD Processor Recognition App Note,
! 927: * the AMD-K5 Model 0 uses the wrong bit to indicate
! 928: * support for global PTEs, instead using bit 9 (APIC)
! 929: * rather than bit 13 (i.e. "0x200" vs. 0x2000". Oops!).
! 930: */
! 931: if (cpu_feature & CPUID_APIC)
! 932: cpu_feature = (cpu_feature & ~CPUID_APIC) | CPUID_PGE;
! 933: /*
! 934: * XXX But pmap_pg_g is already initialized -- need to kick
! 935: * XXX the pmap somehow. How does the MP branch do this?
! 936: */
! 937: break;
! 938: }
1.379 jdolecek 939: }
940:
1.437 thorpej 941: static const struct i386_cache_info *
1.438 thorpej 942: cache_info_lookup(const struct i386_cache_info *cai, int count, u_int8_t desc)
1.397 thorpej 943: {
1.438 thorpej 944: int i;
1.397 thorpej 945:
1.438 thorpej 946: for (i = 0; i < count; i++) {
947: if (cai[i].cai_desc == desc)
948: return (&cai[i]);
1.437 thorpej 949: }
950:
951: return (NULL);
1.397 thorpej 952: }
953:
1.438 thorpej 954: static const struct i386_cache_info intel_cpuid_cache_info[] = {
955: { CAI_ITLB,
956: 0x01,
957: 32, 4, 4 * 1024 },
958: { CAI_ITLB2,
959: 0x02,
960: 2, 1, 4 * 1024 * 1024 },
961: { CAI_DTLB,
962: 0x03,
963: 64, 4, 4 * 1024 },
964: { CAI_DTLB2,
965: 0x04,
966: 8, 4, 4 * 1024 * 1024 },
967: { CAI_ICACHE,
968: 0x06,
969: 8 * 1024, 4, 32 },
970: { CAI_ICACHE,
971: 0x08,
972: 16 * 1024, 4, 32 },
973: { CAI_DCACHE,
974: 0x0a,
975: 8 * 1024, 2, 32 },
976: { CAI_DCACHE,
977: 0x0c,
978: 16 * 1024, 2, 32 },
979: { CAI_L2CACHE,
980: 0x40,
981: 0, 1, 0 },
982: { CAI_L2CACHE,
983: 0x41,
984: 128 * 1024, 4, 32 },
985: { CAI_L2CACHE,
986: 0x42,
987: 256 * 1024, 4, 32 },
988: { CAI_L2CACHE,
989: 0x43,
990: 512 * 1024, 4, 32 },
991: { CAI_L2CACHE,
992: 0x44,
993: 1 * 1024 * 1024, 4, 32 },
994: { CAI_L2CACHE,
995: 0x45,
996: 2 * 1024 * 1024, 4, 32 },
997: { CAI_L2CACHE,
998: 0x82,
999: 256 * 1024, 8, 32 },
1000: { CAI_L2CACHE,
1001: 0x84,
1002: 1 * 1024 * 1024, 8, 32 },
1003: { CAI_L2CACHE,
1004: 0x85,
1005: 2 * 1024 * 1024, 8, 32 },
1006: };
1007: static const int intel_cpuid_cache_info_count =
1008: sizeof(intel_cpuid_cache_info) / sizeof(intel_cpuid_cache_info[0]);
1009:
1.437 thorpej 1010: void
1011: intel_cpuid_cpu_cacheinfo(struct cpu_info *ci)
1.402 explorer 1012: {
1.437 thorpej 1013: const struct i386_cache_info *cai;
1014: u_int descs[4];
1015: int iterations, i, j;
1016: u_int8_t desc;
1017:
1018: /*
1019: * Parse the cache info from `cpuid'.
1020: * XXX This is kinda ugly, but hey, so is the architecture...
1021: */
1022:
1023: do_cpuid(2, descs);
1024: iterations = descs[0] & 0xff;
1025: while (iterations-- > 0) {
1026: for (i = 0; i < 4; i++) {
1027: if (descs[i] & 0x80000000)
1028: continue;
1029: for (j = 0; j < 4; j++) {
1030: if (i == 0 && j == 0)
1031: continue;
1032: desc = (descs[i] >> (j * 8)) & 0xff;
1033: cai = cache_info_lookup(intel_cpuid_cache_info,
1.438 thorpej 1034: intel_cpuid_cache_info_count, desc);
1.437 thorpej 1035: if (cai != NULL) {
1.438 thorpej 1036: memcpy(&ci->ci_cinfo[cai->cai_index],
1037: cai,
1038: sizeof(struct i386_cache_info));
1.437 thorpej 1039: }
1040: }
1041: }
1042: do_cpuid(2, descs);
1043: }
1.438 thorpej 1044: }
1045:
1046: /*
1047: * AMD Cache Info:
1048: *
1049: * Athlon, Duron:
1050: *
1051: * Function 8000.0005 L1 TLB/Cache Information
1052: * EAX -- L1 TLB 2/4MB pages
1053: * EBX -- L1 TLB 4K pages
1054: * ECX -- L1 D-cache
1055: * EDX -- L1 I-cache
1056: *
1057: * Function 8000.0006 L2 TLB/Cache Information
1058: * EAX -- L2 TLB 2/4MB pages
1059: * EBX -- L2 TLB 4K pages
1060: * ECX -- L2 Unified cache
1061: * EDX -- reserved
1062: *
1063: * K5, K6:
1064: *
1065: * Function 8000.0005 L1 TLB/Cache Information
1066: * EAX -- reserved
1067: * EBX -- TLB 4K pages
1068: * ECX -- L1 D-cache
1069: * EDX -- L1 I-cache
1070: *
1071: * K6-III:
1072: *
1073: * Function 8000.0006 L2 Cache Information
1074: * EAX -- reserved
1075: * EBX -- reserved
1076: * ECX -- L2 Unified cache
1077: * EDX -- reserved
1078: */
1079:
1080: /* L1 TLB 2/4MB pages */
1081: #define AMD_L1_EAX_DTLB_ASSOC(x) (((x) >> 24) & 0xff)
1082: #define AMD_L1_EAX_DTLB_ENTRIES(x) (((x) >> 16) & 0xff)
1083: #define AMD_L1_EAX_ITLB_ASSOC(x) (((x) >> 8) & 0xff)
1084: #define AMD_L1_EAX_ITLB_ENTRIES(x) ( (x) & 0xff)
1085:
1086: /* L1 TLB 4K pages */
1087: #define AMD_L1_EBX_DTLB_ASSOC(x) (((x) >> 24) & 0xff)
1088: #define AMD_L1_EBX_DTLB_ENTRIES(x) (((x) >> 16) & 0xff)
1089: #define AMD_L1_EBX_ITLB_ASSOC(x) (((x) >> 8) & 0xff)
1090: #define AMD_L1_EBX_ITLB_ENTRIES(x) ( (x) & 0xff)
1091:
1092: /* L1 Data Cache */
1093: #define AMD_L1_ECX_DC_SIZE(x) ((((x) >> 24) & 0xff) * 1024)
1094: #define AMD_L1_ECX_DC_ASSOC(x) (((x) >> 16) & 0xff)
1095: #define AMD_L1_ECX_DC_LPT(x) (((x) >> 8) & 0xff)
1096: #define AMD_L1_ECX_DC_LS(x) ( (x) & 0xff)
1097:
1098: /* L1 Instruction Cache */
1099: #define AMD_L1_EDX_IC_SIZE(x) ((((x) >> 24) & 0xff) * 1024)
1100: #define AMD_L1_EDX_IC_ASSOC(x) (((x) >> 16) & 0xff)
1101: #define AMD_L1_EDX_IC_LPT(x) (((x) >> 8) & 0xff)
1102: #define AMD_L1_EDX_IC_LS(x) ( (x) & 0xff)
1103:
1104: /* Note for L2 TLB -- if the upper 16 bits are 0, it is a unified TLB */
1105:
1106: /* L2 TLB 2/4MB pages */
1107: #define AMD_L2_EAX_DTLB_ASSOC(x) (((x) >> 28) & 0xf)
1108: #define AMD_L2_EAX_DTLB_ENTRIES(x) (((x) >> 16) & 0xfff)
1109: #define AMD_L2_EAX_IUTLB_ASSOC(x) (((x) >> 12) & 0xf)
1110: #define AMD_L2_EAX_IUTLB_ENTRIES(x) ( (x) & 0xfff)
1111:
1112: /* L2 TLB 4K pages */
1.440 thorpej 1113: #define AMD_L2_EBX_DTLB_ASSOC(x) (((x) >> 28) & 0xf)
1114: #define AMD_L2_EBX_DTLB_ENTRIES(x) (((x) >> 16) & 0xfff)
1115: #define AMD_L2_EBX_IUTLB_ASSOC(x) (((x) >> 12) & 0xf)
1116: #define AMD_L2_EBX_IUTLB_ENTRIES(x) ( (x) & 0xfff)
1.438 thorpej 1117:
1118: /* L2 Cache */
1.440 thorpej 1119: #define AMD_L2_ECX_C_SIZE(x) ((((x) >> 16) & 0xffff) * 1024)
1120: #define AMD_L2_ECX_C_ASSOC(x) (((x) >> 12) & 0xf)
1121: #define AMD_L2_ECX_C_LPT(x) (((x) >> 8) & 0xf)
1.438 thorpej 1122: #define AMD_L2_ECX_C_LS(x) ( (x) & 0xff)
1123:
1124: static const struct i386_cache_info amd_cpuid_l2cache_assoc_info[] = {
1125: { 0,
1126: 0x00,
1127: 0, 0, 0 },
1128: { 0,
1129: 0x01,
1130: 0, 1, 0 },
1131: { 0,
1132: 0x02,
1133: 0, 2, 0 },
1134: { 0,
1135: 0x04,
1136: 0, 4, 0 },
1137: { 0,
1138: 0x06,
1139: 0, 8, 0 },
1140: { 0,
1141: 0x08,
1142: 0, 16, 0 },
1143: { 0,
1144: 0x0f,
1145: 0, ~0, 0 },
1146: };
1147: static const int amd_cpuid_l2cache_assoc_info_count =
1148: sizeof(amd_cpuid_l2cache_assoc_info) /
1149: sizeof(amd_cpuid_l2cache_assoc_info[0]);
1150:
1151: void
1152: amd_cpuid_cpu_cacheinfo(struct cpu_info *ci)
1153: {
1154: extern int cpu_id;
1155: const struct i386_cache_info *cp;
1156: struct i386_cache_info *cai;
1157: int family, model;
1158: u_int descs[4];
1159: u_int lfunc;
1160:
1161: family = (cpu_id >> 8) & 15;
1162: if (family < CPU_MINFAMILY)
1163: panic("amd_cpuid_cpu_cacheinfo: strange family value");
1164: model = CPUID2MODEL(cpu_id);
1165:
1166: /*
1167: * K5 model 0 has none of this info.
1168: */
1169: if (family == 5 && model == 0)
1170: return;
1171:
1172: /*
1173: * Determine the largest extended function value.
1174: */
1175: do_cpuid(0x80000000, descs);
1176: lfunc = descs[0];
1177:
1178: /*
1179: * Determine L1 cache/TLB info.
1180: */
1181: if (lfunc < 0x80000005) {
1182: /* No L1 cache info available. */
1183: return;
1184: }
1185:
1186: do_cpuid(0x80000005, descs);
1187:
1188: /*
1189: * K6-III and higher have large page TLBs.
1190: */
1191: if ((family == 5 && model >= 9) || family >= 6) {
1192: cai = &ci->ci_cinfo[CAI_ITLB2];
1193: cai->cai_totalsize = AMD_L1_EAX_ITLB_ENTRIES(descs[0]);
1194: cai->cai_associativity = AMD_L1_EAX_ITLB_ASSOC(descs[0]);
1195: cai->cai_linesize = (4 * 1024 * 1024);
1196:
1197: cai = &ci->ci_cinfo[CAI_DTLB2];
1198: cai->cai_totalsize = AMD_L1_EAX_DTLB_ENTRIES(descs[0]);
1199: cai->cai_associativity = AMD_L1_EAX_DTLB_ASSOC(descs[0]);
1200: cai->cai_linesize = (4 * 1024 * 1024);
1201: }
1202:
1203: cai = &ci->ci_cinfo[CAI_ITLB];
1204: cai->cai_totalsize = AMD_L1_EBX_ITLB_ENTRIES(descs[1]);
1205: cai->cai_associativity = AMD_L1_EBX_ITLB_ASSOC(descs[1]);
1206: cai->cai_linesize = (4 * 1024);
1207:
1208: cai = &ci->ci_cinfo[CAI_DTLB];
1209: cai->cai_totalsize = AMD_L1_EBX_DTLB_ENTRIES(descs[1]);
1210: cai->cai_associativity = AMD_L1_EBX_DTLB_ASSOC(descs[1]);
1211: cai->cai_linesize = (4 * 1024);
1212:
1213: cai = &ci->ci_cinfo[CAI_DCACHE];
1214: cai->cai_totalsize = AMD_L1_ECX_DC_SIZE(descs[2]);
1215: cai->cai_associativity = AMD_L1_ECX_DC_ASSOC(descs[2]);
1216: cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[2]);
1217:
1218: cai = &ci->ci_cinfo[CAI_ICACHE];
1219: cai->cai_totalsize = AMD_L1_EDX_IC_SIZE(descs[3]);
1220: cai->cai_associativity = AMD_L1_EDX_IC_ASSOC(descs[3]);
1221: cai->cai_linesize = AMD_L1_EDX_IC_LS(descs[3]);
1222:
1223: /*
1224: * Determine L2 cache/TLB info.
1225: */
1226: if (lfunc < 0x80000006) {
1227: /* No L2 cache info available. */
1228: return;
1229: }
1230:
1231: do_cpuid(0x80000006, descs);
1232:
1233: cai = &ci->ci_cinfo[CAI_L2CACHE];
1234: cai->cai_totalsize = AMD_L2_ECX_C_SIZE(descs[2]);
1235: cai->cai_associativity = AMD_L2_ECX_C_ASSOC(descs[2]);
1236: cai->cai_linesize = AMD_L2_ECX_C_LS(descs[2]);
1237:
1238: cp = cache_info_lookup(amd_cpuid_l2cache_assoc_info,
1239: amd_cpuid_l2cache_assoc_info_count, cai->cai_associativity);
1240: if (cp != NULL)
1241: cai->cai_associativity = cp->cai_associativity;
1242: else
1243: cai->cai_associativity = 0; /* XXX Unknown/reserved */
1.402 explorer 1244: }
1245:
1.379 jdolecek 1246: void
1.437 thorpej 1247: identifycpu(struct cpu_info *ci)
1.16 cgd 1248: {
1.86 mycroft 1249: extern char cpu_vendor[];
1.216 fvdl 1250: extern int cpu_id;
1.417 jdolecek 1251: extern int cpu_brand_id;
1252: const char *name, *modifier, *vendorname, *brand = "";
1.216 fvdl 1253: int class = CPUCLASS_386, vendor, i, max;
1254: int family, model, step, modif;
1.418 jdolecek 1255: const struct cpu_cpuid_nameclass *cpup = NULL;
1.267 bouyer 1256: void (*cpu_setup) __P((void));
1.437 thorpej 1257: void (*cpu_cacheinfo) __P((struct cpu_info *));
1.86 mycroft 1258:
1.216 fvdl 1259: if (cpuid_level == -1) {
1.59 mycroft 1260: #ifdef DIAGNOSTIC
1.216 fvdl 1261: if (cpu < 0 || cpu >=
1262: (sizeof i386_nocpuid_cpus/sizeof(struct cpu_nocpuid_nameclass)))
1263: panic("unknown cpu type %d\n", cpu);
1264: #endif
1265: name = i386_nocpuid_cpus[cpu].cpu_name;
1266: vendor = i386_nocpuid_cpus[cpu].cpu_vendor;
1267: vendorname = i386_nocpuid_cpus[cpu].cpu_vendorname;
1268: class = i386_nocpuid_cpus[cpu].cpu_class;
1.267 bouyer 1269: cpu_setup = i386_nocpuid_cpus[cpu].cpu_setup;
1.437 thorpej 1270: cpu_cacheinfo = i386_nocpuid_cpus[cpu].cpu_cacheinfo;
1.216 fvdl 1271: modifier = "";
1272: } else {
1273: max = sizeof (i386_cpuid_cpus) / sizeof (i386_cpuid_cpus[0]);
1274: modif = (cpu_id >> 12) & 3;
1275: family = (cpu_id >> 8) & 15;
1276: if (family < CPU_MINFAMILY)
1277: panic("identifycpu: strange family value");
1.379 jdolecek 1278: model = CPUID2MODEL(cpu_id);
1.216 fvdl 1279: step = cpu_id & 15;
1280: #ifdef CPUDEBUG
1281: printf("cpu0: family %x model %x step %x\n", family, model,
1282: step);
1283: #endif
1284:
1285: for (i = 0; i < max; i++) {
1286: if (!strncmp(cpu_vendor,
1287: i386_cpuid_cpus[i].cpu_id, 12)) {
1288: cpup = &i386_cpuid_cpus[i];
1289: break;
1290: }
1291: }
1292:
1293: if (cpup == NULL) {
1294: vendor = CPUVENDOR_UNKNOWN;
1295: if (cpu_vendor[0] != '\0')
1296: vendorname = &cpu_vendor[0];
1297: else
1298: vendorname = "Unknown";
1299: if (family > CPU_MAXFAMILY)
1300: family = CPU_MAXFAMILY;
1301: class = family - 3;
1302: modifier = "";
1303: name = "";
1.267 bouyer 1304: cpu_setup = NULL;
1.216 fvdl 1305: } else {
1306: vendor = cpup->cpu_vendor;
1307: vendorname = cpup->cpu_vendorname;
1308: modifier = modifiers[modif];
1309: if (family > CPU_MAXFAMILY) {
1310: family = CPU_MAXFAMILY;
1311: model = CPU_DEFMODEL;
1312: } else if (model > CPU_MAXMODEL)
1313: model = CPU_DEFMODEL;
1314: i = family - CPU_MINFAMILY;
1315: name = cpup->cpu_family[i].cpu_models[model];
1316: if (name == NULL)
1317: name = cpup->cpu_family[i].cpu_models[CPU_DEFMODEL];
1318: class = cpup->cpu_family[i].cpu_class;
1.267 bouyer 1319: cpu_setup = cpup->cpu_family[i].cpu_setup;
1.437 thorpej 1320: cpu_cacheinfo = cpup->cpu_family[i].cpu_cacheinfo;
1.417 jdolecek 1321:
1322: /*
1323: * Intel processors family >= 6, model 8 allow to
1324: * recognize brand by Brand ID value.
1325: */
1326: if (vendor == CPUVENDOR_INTEL && family >= 6
1327: && model >= 8 && cpu_brand_id && cpu_brand_id <= 3)
1328: brand = i386_p3_brand[cpu_brand_id];
1.216 fvdl 1329: }
1.104 cgd 1330: }
1331:
1.417 jdolecek 1332: sprintf(cpu_model, "%s %s%s%s%s (%s-class)", vendorname, modifier, name,
1.419 jdolecek 1333: (*brand) ? " " : "", brand,
1.241 fvdl 1334: classnames[class]);
1.216 fvdl 1335:
1336: cpu_class = class;
1.397 thorpej 1337:
1338: /*
1.437 thorpej 1339: * Get the cache info for this CPU, if we can.
1.397 thorpej 1340: */
1.437 thorpej 1341: if (cpu_cacheinfo != NULL)
1342: (*cpu_cacheinfo)(ci);
1.402 explorer 1343:
1344: /*
1345: * If the processor serial number misfeature is present and supported,
1346: * extract it here.
1347: */
1348: if (cpuid_level >= 3 && (cpu_feature & CPUID_PN) != 0)
1349: do_cpuid_serial(cpu_serial);
1.18 cgd 1350:
1.16 cgd 1351: /*
1352: * Now that we have told the user what they have,
1353: * let them know if that machine type isn't configured.
1354: */
1.24 cgd 1355: switch (cpu_class) {
1.216 fvdl 1356: #if !defined(I386_CPU) && !defined(I486_CPU) && !defined(I586_CPU) && !defined(I686_CPU)
1.100 mycroft 1357: #error No CPU classes configured.
1358: #endif
1.216 fvdl 1359: #ifndef I686_CPU
1360: case CPUCLASS_686:
1361: printf("NOTICE: this kernel does not support Pentium Pro CPU class\n");
1362: #ifdef I586_CPU
1363: printf("NOTICE: lowering CPU class to i586\n");
1364: cpu_class = CPUCLASS_586;
1365: break;
1366: #endif
1367: #endif
1.165 mycroft 1368: #ifndef I586_CPU
1.118 mycroft 1369: case CPUCLASS_586:
1.210 christos 1370: printf("NOTICE: this kernel does not support Pentium CPU class\n");
1.165 mycroft 1371: #ifdef I486_CPU
1.210 christos 1372: printf("NOTICE: lowering CPU class to i486\n");
1.118 mycroft 1373: cpu_class = CPUCLASS_486;
1374: break;
1.16 cgd 1375: #endif
1.165 mycroft 1376: #endif
1377: #ifndef I486_CPU
1.18 cgd 1378: case CPUCLASS_486:
1.210 christos 1379: printf("NOTICE: this kernel does not support i486 CPU class\n");
1.165 mycroft 1380: #ifdef I386_CPU
1.210 christos 1381: printf("NOTICE: lowering CPU class to i386\n");
1.118 mycroft 1382: cpu_class = CPUCLASS_386;
1383: break;
1384: #endif
1.165 mycroft 1385: #endif
1386: #ifndef I386_CPU
1.118 mycroft 1387: case CPUCLASS_386:
1.210 christos 1388: printf("NOTICE: this kernel does not support i386 CPU class\n");
1.187 mycroft 1389: panic("no appropriate CPU class available");
1.59 mycroft 1390: #endif
1.16 cgd 1391: default:
1392: break;
1.121 mycroft 1393: }
1394:
1.267 bouyer 1395: /* configure the CPU if needed */
1396: if (cpu_setup != NULL)
1397: cpu_setup();
1.121 mycroft 1398: if (cpu == CPU_486DLC) {
1399: #ifndef CYRIX_CACHE_WORKS
1.210 christos 1400: printf("WARNING: CYRIX 486DLC CACHE UNCHANGED.\n");
1.121 mycroft 1401: #else
1402: #ifndef CYRIX_CACHE_REALLY_WORKS
1.210 christos 1403: printf("WARNING: CYRIX 486DLC CACHE ENABLED IN HOLD-FLUSH MODE.\n");
1.121 mycroft 1404: #else
1.210 christos 1405: printf("WARNING: CYRIX 486DLC CACHE ENABLED.\n");
1.121 mycroft 1406: #endif
1407: #endif
1.16 cgd 1408: }
1.147 mycroft 1409:
1.216 fvdl 1410: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1.147 mycroft 1411: /*
1.199 mycroft 1412: * On a 486 or above, enable ring 0 write protection.
1.147 mycroft 1413: */
1414: if (cpu_class >= CPUCLASS_486)
1.199 mycroft 1415: lcr0(rcr0() | CR0_WP);
1.408 thorpej 1416: #endif
1417:
1418: #if defined(I586_CPU) || defined(I686_CPU)
1419: /*
1420: * If we have a cycle counter, compute the approximate
1421: * CPU speed in MHz.
1422: */
1423: if (cpu_feature & CPUID_TSC) {
1424: u_int64_t last_tsc;
1425:
1426: last_tsc = rdtsc();
1.413 jdolecek 1427: delay(100000);
1428: cpu_tsc_freq = (rdtsc() - last_tsc) * 10;
1.408 thorpej 1429: }
1.147 mycroft 1430: #endif
1.1 cgd 1431: }
1432:
1.104 cgd 1433: /*
1434: * machine dependent system variables.
1435: */
1.195 mycroft 1436: int
1.104 cgd 1437: cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1438: int *name;
1439: u_int namelen;
1440: void *oldp;
1441: size_t *oldlenp;
1442: void *newp;
1443: size_t newlen;
1444: struct proc *p;
1445: {
1446: dev_t consdev;
1.255 drochner 1447: struct btinfo_bootpath *bibp;
1.104 cgd 1448:
1449: /* all sysctl names at this level are terminal */
1450: if (namelen != 1)
1451: return (ENOTDIR); /* overloaded */
1452:
1453: switch (name[0]) {
1454: case CPU_CONSDEV:
1455: if (cn_tab != NULL)
1456: consdev = cn_tab->cn_dev;
1457: else
1458: consdev = NODEV;
1459: return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
1460: sizeof consdev));
1.215 fvdl 1461:
1462: case CPU_BIOSBASEMEM:
1463: return (sysctl_rdint(oldp, oldlenp, newp, biosbasemem));
1464:
1465: case CPU_BIOSEXTMEM:
1466: return (sysctl_rdint(oldp, oldlenp, newp, biosextmem));
1467:
1468: case CPU_NKPDE:
1469: return (sysctl_rdint(oldp, oldlenp, newp, nkpde));
1.366 fvdl 1470:
1471: case CPU_FPU_PRESENT:
1472: return (sysctl_rdint(oldp, oldlenp, newp, i386_fpu_present));
1.215 fvdl 1473:
1.255 drochner 1474: case CPU_BOOTED_KERNEL:
1475: bibp = lookup_bootinfo(BTINFO_BOOTPATH);
1476: if(!bibp)
1477: return(ENOENT); /* ??? */
1478: return (sysctl_rdstring(oldp, oldlenp, newp, bibp->bootpath));
1.343 fvdl 1479: case CPU_DISKINFO:
1480: if (i386_alldisks == NULL)
1.339 fvdl 1481: return (ENOENT);
1.343 fvdl 1482: return (sysctl_rdstruct(oldp, oldlenp, newp, i386_alldisks,
1483: sizeof (struct disklist) +
1484: (i386_ndisks - 1) * sizeof (struct nativedisk_info)));
1.104 cgd 1485: default:
1486: return (EOPNOTSUPP);
1487: }
1488: /* NOTREACHED */
1489: }
1.151 christos 1490:
1.1 cgd 1491: /*
1492: * Send an interrupt to process.
1493: *
1494: * Stack is set up to allow sigcode stored
1495: * in u. to call routine, followed by kcall
1496: * to sigreturn routine below. After sigreturn
1497: * resets the signal mask, the stack, and the
1498: * frame pointer, it returns to the user
1499: * specified pc, psl.
1500: */
1501: void
1502: sendsig(catcher, sig, mask, code)
1503: sig_t catcher;
1.319 mycroft 1504: int sig;
1505: sigset_t *mask;
1.126 cgd 1506: u_long code;
1.1 cgd 1507: {
1.298 mycroft 1508: struct proc *p = curproc;
1509: struct trapframe *tf;
1.82 ws 1510: struct sigframe *fp, frame;
1.319 mycroft 1511: int onstack;
1.1 cgd 1512:
1.319 mycroft 1513: tf = p->p_md.md_regs;
1.135 christos 1514:
1.319 mycroft 1515: /* Do we need to jump onto the signal stack? */
1516: onstack =
1.425 jdolecek 1517: (p->p_sigctx.ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
1518: (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
1.135 christos 1519:
1.319 mycroft 1520: /* Allocate space for the signal handler context. */
1521: if (onstack)
1.425 jdolecek 1522: fp = (struct sigframe *)((caddr_t)p->p_sigctx.ps_sigstk.ss_sp +
1523: p->p_sigctx.ps_sigstk.ss_size);
1.319 mycroft 1524: else
1525: fp = (struct sigframe *)tf->tf_esp;
1526: fp--;
1.1 cgd 1527:
1.319 mycroft 1528: /* Build stack frame for signal trampoline. */
1529: frame.sf_signum = sig;
1.82 ws 1530: frame.sf_code = code;
1531: frame.sf_scp = &fp->sf_sc;
1532: frame.sf_handler = catcher;
1533:
1.319 mycroft 1534: /* Save register context. */
1.157 mycroft 1535: #ifdef VM86
1536: if (tf->tf_eflags & PSL_VM) {
1537: frame.sf_sc.sc_gs = tf->tf_vm86_gs;
1538: frame.sf_sc.sc_fs = tf->tf_vm86_fs;
1539: frame.sf_sc.sc_es = tf->tf_vm86_es;
1540: frame.sf_sc.sc_ds = tf->tf_vm86_ds;
1.196 mycroft 1541: frame.sf_sc.sc_eflags = get_vflags(p);
1.422 mycroft 1542: (*p->p_emul->e_syscall_intern)(p);
1.157 mycroft 1543: } else
1544: #endif
1545: {
1546: __asm("movl %%gs,%w0" : "=r" (frame.sf_sc.sc_gs));
1547: __asm("movl %%fs,%w0" : "=r" (frame.sf_sc.sc_fs));
1548: frame.sf_sc.sc_es = tf->tf_es;
1549: frame.sf_sc.sc_ds = tf->tf_ds;
1.184 mycroft 1550: frame.sf_sc.sc_eflags = tf->tf_eflags;
1.157 mycroft 1551: }
1.184 mycroft 1552: frame.sf_sc.sc_edi = tf->tf_edi;
1553: frame.sf_sc.sc_esi = tf->tf_esi;
1554: frame.sf_sc.sc_ebp = tf->tf_ebp;
1555: frame.sf_sc.sc_ebx = tf->tf_ebx;
1556: frame.sf_sc.sc_edx = tf->tf_edx;
1557: frame.sf_sc.sc_ecx = tf->tf_ecx;
1558: frame.sf_sc.sc_eax = tf->tf_eax;
1559: frame.sf_sc.sc_eip = tf->tf_eip;
1560: frame.sf_sc.sc_cs = tf->tf_cs;
1561: frame.sf_sc.sc_esp = tf->tf_esp;
1562: frame.sf_sc.sc_ss = tf->tf_ss;
1.319 mycroft 1563: frame.sf_sc.sc_trapno = tf->tf_trapno;
1564: frame.sf_sc.sc_err = tf->tf_err;
1565:
1566: /* Save signal stack. */
1.425 jdolecek 1567: frame.sf_sc.sc_onstack = p->p_sigctx.ps_sigstk.ss_flags & SS_ONSTACK;
1.319 mycroft 1568:
1569: /* Save signal mask. */
1570: frame.sf_sc.sc_mask = *mask;
1.322 thorpej 1571:
1572: #ifdef COMPAT_13
1573: /*
1574: * XXX We always have to save an old style signal mask because
1575: * XXX we might be delivering a signal to a process which will
1576: * XXX escape from the signal in a non-standard way and invoke
1577: * XXX sigreturn() directly.
1578: */
1579: native_sigset_to_sigset13(mask, &frame.sf_sc.__sc_mask13);
1580: #endif
1.1 cgd 1581:
1.87 mycroft 1582: if (copyout(&frame, fp, sizeof(frame)) != 0) {
1.1 cgd 1583: /*
1584: * Process has trashed its stack; give it an illegal
1585: * instruction to halt it in its tracks.
1586: */
1.93 mycroft 1587: sigexit(p, SIGILL);
1588: /* NOTREACHED */
1.1 cgd 1589: }
1590:
1.73 mycroft 1591: /*
1.59 mycroft 1592: * Build context to run handler in.
1593: */
1.185 mycroft 1594: __asm("movl %w0,%%gs" : : "r" (GSEL(GUDATA_SEL, SEL_UPL)));
1595: __asm("movl %w0,%%fs" : : "r" (GSEL(GUDATA_SEL, SEL_UPL)));
1596: tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
1597: tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
1.425 jdolecek 1598: tf->tf_eip = (int)p->p_sigctx.ps_sigcode;
1.185 mycroft 1599: tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
1.198 mycroft 1600: tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
1.185 mycroft 1601: tf->tf_esp = (int)fp;
1.177 mycroft 1602: tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
1.319 mycroft 1603:
1604: /* Remember that we're now on the signal stack. */
1605: if (onstack)
1.425 jdolecek 1606: p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
1.1 cgd 1607: }
1608:
1609: /*
1610: * System call to cleanup state after a signal
1611: * has been taken. Reset signal mask and
1612: * stack state from context left by sendsig (above).
1613: * Return to previous pc and psl as specified by
1614: * context left by sendsig. Check carefully to
1615: * make sure that the user has not modified the
1.110 mycroft 1616: * psl to gain improper privileges or to cause
1.1 cgd 1617: * a machine fault.
1618: */
1.195 mycroft 1619: int
1.320 mycroft 1620: sys___sigreturn14(p, v, retval)
1.1 cgd 1621: struct proc *p;
1.172 thorpej 1622: void *v;
1623: register_t *retval;
1624: {
1.320 mycroft 1625: struct sys___sigreturn14_args /* {
1.123 cgd 1626: syscallarg(struct sigcontext *) sigcntxp;
1.172 thorpej 1627: } */ *uap = v;
1.82 ws 1628: struct sigcontext *scp, context;
1.298 mycroft 1629: struct trapframe *tf;
1.59 mycroft 1630:
1.27 cgd 1631: /*
1.59 mycroft 1632: * The trampoline code hands us the context.
1633: * It is unsafe to keep track of it ourselves, in the event that a
1634: * program jumps out of a signal handler.
1.27 cgd 1635: */
1.123 cgd 1636: scp = SCARG(uap, sigcntxp);
1.87 mycroft 1637: if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0)
1.122 mycroft 1638: return (EFAULT);
1.1 cgd 1639:
1.319 mycroft 1640: /* Restore register context. */
1641: tf = p->p_md.md_regs;
1.157 mycroft 1642: #ifdef VM86
1643: if (context.sc_eflags & PSL_VM) {
1.422 mycroft 1644: void syscall_vm86 __P((struct trapframe));
1645:
1.157 mycroft 1646: tf->tf_vm86_gs = context.sc_gs;
1647: tf->tf_vm86_fs = context.sc_fs;
1648: tf->tf_vm86_es = context.sc_es;
1649: tf->tf_vm86_ds = context.sc_ds;
1.196 mycroft 1650: set_vflags(p, context.sc_eflags);
1.422 mycroft 1651: p->p_md.md_syscall = syscall_vm86;
1.157 mycroft 1652: } else
1653: #endif
1654: {
1.196 mycroft 1655: /*
1656: * Check for security violations. If we're returning to
1657: * protected mode, the CPU will validate the segment registers
1658: * automatically and generate a trap on violations. We handle
1659: * the trap, rather than doing all of the checking here.
1660: */
1661: if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 ||
1662: !USERMODE(context.sc_cs, context.sc_eflags))
1663: return (EINVAL);
1664:
1.157 mycroft 1665: /* %fs and %gs were restored by the trampoline. */
1666: tf->tf_es = context.sc_es;
1667: tf->tf_ds = context.sc_ds;
1.184 mycroft 1668: tf->tf_eflags = context.sc_eflags;
1.157 mycroft 1669: }
1.184 mycroft 1670: tf->tf_edi = context.sc_edi;
1671: tf->tf_esi = context.sc_esi;
1672: tf->tf_ebp = context.sc_ebp;
1673: tf->tf_ebx = context.sc_ebx;
1674: tf->tf_edx = context.sc_edx;
1675: tf->tf_ecx = context.sc_ecx;
1676: tf->tf_eax = context.sc_eax;
1677: tf->tf_eip = context.sc_eip;
1678: tf->tf_cs = context.sc_cs;
1679: tf->tf_esp = context.sc_esp;
1680: tf->tf_ss = context.sc_ss;
1.196 mycroft 1681:
1.319 mycroft 1682: /* Restore signal stack. */
1683: if (context.sc_onstack & SS_ONSTACK)
1.425 jdolecek 1684: p->p_sigctx.ps_sigstk.ss_flags |= SS_ONSTACK;
1.196 mycroft 1685: else
1.425 jdolecek 1686: p->p_sigctx.ps_sigstk.ss_flags &= ~SS_ONSTACK;
1.319 mycroft 1687:
1688: /* Restore signal mask. */
1689: (void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0);
1.72 mycroft 1690:
1.122 mycroft 1691: return (EJUSTRETURN);
1.37 cgd 1692: }
1693:
1.1 cgd 1694: int waittime = -1;
1695: struct pcb dumppcb;
1696:
1.32 andrew 1697: void
1.228 gwr 1698: cpu_reboot(howto, bootstr)
1.193 mycroft 1699: int howto;
1.206 mrg 1700: char *bootstr;
1.1 cgd 1701: {
1702:
1.106 mycroft 1703: if (cold) {
1.193 mycroft 1704: howto |= RB_HALT;
1705: goto haltsys;
1.1 cgd 1706: }
1.193 mycroft 1707:
1.106 mycroft 1708: boothowto = howto;
1.193 mycroft 1709: if ((howto & RB_NOSYNC) == 0 && waittime < 0) {
1.1 cgd 1710: waittime = 0;
1.150 mycroft 1711: vfs_shutdown();
1.59 mycroft 1712: /*
1713: * If we've been adjusting the clock, the todr
1714: * will be out of synch; adjust it now.
1715: */
1716: resettodr();
1.1 cgd 1717: }
1.193 mycroft 1718:
1719: /* Disable interrupts. */
1.1 cgd 1720: splhigh();
1.193 mycroft 1721:
1722: /* Do a dump if requested. */
1723: if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
1724: dumpsys();
1725:
1726: haltsys:
1727: doshutdownhooks();
1728:
1.307 thorpej 1729: if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
1.208 jtk 1730: #if NAPM > 0 && !defined(APM_NO_POWEROFF)
1731: /* turn off, if we can. But try to turn disk off and
1732: * wait a bit first--some disk drives are slow to clean up
1733: * and users have reported disk corruption.
1734: */
1735: delay(500000);
1736: apm_set_powstate(APM_DEV_DISK(0xff), APM_SYS_OFF);
1737: delay(500000);
1738: apm_set_powstate(APM_DEV_ALLDEVS, APM_SYS_OFF);
1.307 thorpej 1739: printf("WARNING: powerdown failed!\n");
1740: /*
1741: * RB_POWERDOWN implies RB_HALT... fall into it...
1742: */
1.208 jtk 1743: #endif
1.307 thorpej 1744: }
1745:
1746: if (howto & RB_HALT) {
1.210 christos 1747: printf("\n");
1748: printf("The operating system has halted.\n");
1749: printf("Please press any key to reboot.\n\n");
1.300 drochner 1750: cnpollc(1); /* for proper keyboard command handling */
1.12 cgd 1751: cngetc();
1.300 drochner 1752: cnpollc(0);
1.1 cgd 1753: }
1.193 mycroft 1754:
1.210 christos 1755: printf("rebooting...\n");
1.328 bouyer 1756: if (cpureset_delay > 0)
1757: delay(cpureset_delay * 1000);
1.1 cgd 1758: cpu_reset();
1759: for(;;) ;
1760: /*NOTREACHED*/
1761: }
1762:
1.116 gwr 1763: /*
1764: * These variables are needed by /sbin/savecore
1765: */
1766: u_long dumpmag = 0x8fca0101; /* magic number */
1767: int dumpsize = 0; /* pages */
1768: long dumplo = 0; /* blocks */
1769:
1770: /*
1.291 thorpej 1771: * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1772: */
1773: int
1774: cpu_dumpsize()
1775: {
1776: int size;
1777:
1778: size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1779: ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1780: if (roundup(size, dbtob(1)) != dbtob(1))
1781: return (-1);
1782:
1783: return (1);
1784: }
1785:
1786: /*
1787: * cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped.
1788: */
1789: u_long
1790: cpu_dump_mempagecnt()
1791: {
1792: u_long i, n;
1793:
1794: n = 0;
1795: for (i = 0; i < mem_cluster_cnt; i++)
1796: n += atop(mem_clusters[i].size);
1797: return (n);
1798: }
1799:
1800: /*
1801: * cpu_dump: dump the machine-dependent kernel core dump headers.
1802: */
1803: int
1804: cpu_dump()
1805: {
1806: int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
1807: char buf[dbtob(1)];
1808: kcore_seg_t *segp;
1809: cpu_kcore_hdr_t *cpuhdrp;
1810: phys_ram_seg_t *memsegp;
1811: int i;
1812:
1813: dump = bdevsw[major(dumpdev)].d_dump;
1814:
1.313 perry 1815: memset(buf, 0, sizeof buf);
1.291 thorpej 1816: segp = (kcore_seg_t *)buf;
1817: cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
1818: memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
1819: ALIGN(sizeof(*cpuhdrp))];
1820:
1821: /*
1822: * Generate a segment header.
1823: */
1824: CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1825: segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1826:
1827: /*
1828: * Add the machine-dependent header info.
1829: */
1830: cpuhdrp->ptdpaddr = PTDpaddr;
1831: cpuhdrp->nmemsegs = mem_cluster_cnt;
1832:
1833: /*
1834: * Fill in the memory segment descriptors.
1835: */
1836: for (i = 0; i < mem_cluster_cnt; i++) {
1837: memsegp[i].start = mem_clusters[i].start;
1838: memsegp[i].size = mem_clusters[i].size;
1839: }
1840:
1841: return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1)));
1842: }
1843:
1844: /*
1.228 gwr 1845: * This is called by main to set dumplo and dumpsize.
1.414 thorpej 1846: * Dumps always skip the first PAGE_SIZE of disk space
1.116 gwr 1847: * in case there might be a disk label stored there.
1848: * If there is extra space, put dump at the end to
1849: * reduce the chance that swapping trashes it.
1850: */
1851: void
1.228 gwr 1852: cpu_dumpconf()
1.116 gwr 1853: {
1.291 thorpej 1854: int nblks, dumpblks; /* size of dump area */
1.116 gwr 1855: int maj;
1856:
1857: if (dumpdev == NODEV)
1.291 thorpej 1858: goto bad;
1.116 gwr 1859: maj = major(dumpdev);
1860: if (maj < 0 || maj >= nblkdev)
1861: panic("dumpconf: bad dumpdev=0x%x", dumpdev);
1.134 mycroft 1862: if (bdevsw[maj].d_psize == NULL)
1.291 thorpej 1863: goto bad;
1.134 mycroft 1864: nblks = (*bdevsw[maj].d_psize)(dumpdev);
1.116 gwr 1865: if (nblks <= ctod(1))
1.291 thorpej 1866: goto bad;
1.116 gwr 1867:
1.291 thorpej 1868: dumpblks = cpu_dumpsize();
1869: if (dumpblks < 0)
1870: goto bad;
1871: dumpblks += ctod(cpu_dump_mempagecnt());
1872:
1873: /* If dump won't fit (incl. room for possible label), punt. */
1874: if (dumpblks > (nblks - ctod(1)))
1875: goto bad;
1876:
1877: /* Put dump at end of partition */
1878: dumplo = nblks - dumpblks;
1879:
1880: /* dumpsize is in page units, and doesn't include headers. */
1881: dumpsize = cpu_dump_mempagecnt();
1882: return;
1.116 gwr 1883:
1.291 thorpej 1884: bad:
1885: dumpsize = 0;
1.116 gwr 1886: }
1887:
1.1 cgd 1888: /*
1889: * Doadump comes here after turning off memory management and
1890: * getting on the dump stack, either when called above, or by
1891: * the auto-restart code.
1892: */
1.414 thorpej 1893: #define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize XXX small */
1.314 thorpej 1894: static vaddr_t dumpspace;
1.163 cgd 1895:
1.314 thorpej 1896: vaddr_t
1.163 cgd 1897: reserve_dumppages(p)
1.314 thorpej 1898: vaddr_t p;
1.163 cgd 1899: {
1900:
1901: dumpspace = p;
1902: return (p + BYTES_PER_DUMP);
1903: }
1904:
1.32 andrew 1905: void
1.1 cgd 1906: dumpsys()
1907: {
1.291 thorpej 1908: u_long totalbytesleft, bytes, i, n, memseg;
1909: u_long maddr;
1910: int psize;
1.163 cgd 1911: daddr_t blkno;
1912: int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
1.200 christos 1913: int error;
1.193 mycroft 1914:
1915: /* Save registers. */
1916: savectx(&dumppcb);
1.1 cgd 1917:
1918: if (dumpdev == NODEV)
1919: return;
1.163 cgd 1920:
1921: /*
1922: * For dumps during autoconfiguration,
1923: * if dump device has already configured...
1924: */
1925: if (dumpsize == 0)
1.228 gwr 1926: cpu_dumpconf();
1.330 jtk 1927: if (dumplo <= 0 || dumpsize == 0) {
1.275 mycroft 1928: printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
1929: minor(dumpdev));
1.163 cgd 1930: return;
1.275 mycroft 1931: }
1932: printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
1933: minor(dumpdev), dumplo);
1.134 mycroft 1934:
1.163 cgd 1935: psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
1.210 christos 1936: printf("dump ");
1.163 cgd 1937: if (psize == -1) {
1.210 christos 1938: printf("area unavailable\n");
1.163 cgd 1939: return;
1940: }
1941:
1942: #if 0 /* XXX this doesn't work. grr. */
1943: /* toss any characters present prior to dump */
1944: while (sget() != NULL); /*syscons and pccons differ */
1945: #endif
1946:
1.291 thorpej 1947: if ((error = cpu_dump()) != 0)
1948: goto err;
1949:
1950: totalbytesleft = ptoa(cpu_dump_mempagecnt());
1951: blkno = dumplo + cpu_dumpsize();
1.163 cgd 1952: dump = bdevsw[major(dumpdev)].d_dump;
1.200 christos 1953: error = 0;
1.291 thorpej 1954:
1955: for (memseg = 0; memseg < mem_cluster_cnt; memseg++) {
1956: maddr = mem_clusters[memseg].start;
1957: bytes = mem_clusters[memseg].size;
1958:
1959: for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
1960: /* Print out how many MBs we have left to go. */
1961: if ((totalbytesleft % (1024*1024)) == 0)
1962: printf("%ld ", totalbytesleft / (1024 * 1024));
1963:
1964: /* Limit size for next transfer. */
1965: n = bytes - i;
1966: if (n > BYTES_PER_DUMP)
1967: n = BYTES_PER_DUMP;
1968:
1969: (void) pmap_map(dumpspace, maddr, maddr + n,
1970: VM_PROT_READ);
1971:
1972: error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n);
1973: if (error)
1974: goto err;
1.163 cgd 1975: maddr += n;
1.291 thorpej 1976: blkno += btodb(n); /* XXX? */
1.163 cgd 1977:
1978: #if 0 /* XXX this doesn't work. grr. */
1.291 thorpej 1979: /* operator aborting dump? */
1980: if (sget() != NULL) {
1981: error = EINTR;
1982: break;
1983: }
1984: #endif
1.163 cgd 1985: }
1986: }
1987:
1.291 thorpej 1988: err:
1.163 cgd 1989: switch (error) {
1.1 cgd 1990:
1991: case ENXIO:
1.210 christos 1992: printf("device bad\n");
1.1 cgd 1993: break;
1994:
1995: case EFAULT:
1.210 christos 1996: printf("device not ready\n");
1.1 cgd 1997: break;
1998:
1999: case EINVAL:
1.210 christos 2000: printf("area improper\n");
1.1 cgd 2001: break;
2002:
2003: case EIO:
1.210 christos 2004: printf("i/o error\n");
1.1 cgd 2005: break;
2006:
2007: case EINTR:
1.210 christos 2008: printf("aborted from console\n");
1.1 cgd 2009: break;
2010:
1.163 cgd 2011: case 0:
1.210 christos 2012: printf("succeeded\n");
1.163 cgd 2013: break;
2014:
1.1 cgd 2015: default:
1.210 christos 2016: printf("error %d\n", error);
1.1 cgd 2017: break;
2018: }
1.210 christos 2019: printf("\n\n");
1.163 cgd 2020: delay(5000000); /* 5 seconds */
1.1 cgd 2021: }
2022:
2023: /*
2024: * Clear registers on exec
2025: */
1.33 cgd 2026: void
1.251 mycroft 2027: setregs(p, pack, stack)
1.1 cgd 2028: struct proc *p;
1.151 christos 2029: struct exec_package *pack;
1.21 cgd 2030: u_long stack;
1.1 cgd 2031: {
1.298 mycroft 2032: struct pcb *pcb = &p->p_addr->u_pcb;
2033: struct trapframe *tf;
1.1 cgd 2034:
1.161 mycroft 2035: #if NNPX > 0
2036: /* If we were using the FPU, forget about it. */
2037: if (npxproc == p)
1.166 mycroft 2038: npxdrop();
1.161 mycroft 2039: #endif
1.166 mycroft 2040:
1.178 mycroft 2041: #ifdef USER_LDT
1.353 thorpej 2042: pmap_ldt_cleanup(p);
1.178 mycroft 2043: #endif
2044:
1.167 mycroft 2045: p->p_md.md_flags &= ~MDP_USEDFPU;
1.178 mycroft 2046: pcb->pcb_flags = 0;
1.276 mycroft 2047: pcb->pcb_savefpu.sv_env.en_cw = __NetBSD_NPXCW__;
1.59 mycroft 2048:
1.154 mycroft 2049: tf = p->p_md.md_regs;
1.178 mycroft 2050: __asm("movl %w0,%%gs" : : "r" (LSEL(LUDATA_SEL, SEL_UPL)));
2051: __asm("movl %w0,%%fs" : : "r" (LSEL(LUDATA_SEL, SEL_UPL)));
1.154 mycroft 2052: tf->tf_es = LSEL(LUDATA_SEL, SEL_UPL);
2053: tf->tf_ds = LSEL(LUDATA_SEL, SEL_UPL);
1.252 mycroft 2054: tf->tf_edi = 0;
2055: tf->tf_esi = 0;
1.154 mycroft 2056: tf->tf_ebp = 0;
1.160 mycroft 2057: tf->tf_ebx = (int)PS_STRINGS;
1.252 mycroft 2058: tf->tf_edx = 0;
2059: tf->tf_ecx = 0;
2060: tf->tf_eax = 0;
1.154 mycroft 2061: tf->tf_eip = pack->ep_entry;
2062: tf->tf_cs = LSEL(LUCODE_SEL, SEL_UPL);
2063: tf->tf_eflags = PSL_USERSET;
2064: tf->tf_esp = stack;
2065: tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL);
1.1 cgd 2066: }
2067:
2068: /*
1.55 brezak 2069: * Initialize segments and descriptor tables
1.1 cgd 2070: */
2071:
1.275 mycroft 2072: union descriptor *idt, *gdt, *ldt;
2073: #ifdef I586_CPU
2074: union descriptor *pentium_idt;
2075: #endif
1.178 mycroft 2076: extern struct user *proc0paddr;
1.49 brezak 2077:
1.178 mycroft 2078: void
2079: setgate(gd, func, args, type, dpl)
2080: struct gate_descriptor *gd;
2081: void *func;
2082: int args, type, dpl;
2083: {
1.1 cgd 2084:
1.178 mycroft 2085: gd->gd_looffset = (int)func;
2086: gd->gd_selector = GSEL(GCODE_SEL, SEL_KPL);
2087: gd->gd_stkcpy = args;
2088: gd->gd_xx = 0;
2089: gd->gd_type = type;
2090: gd->gd_dpl = dpl;
2091: gd->gd_p = 1;
2092: gd->gd_hioffset = (int)func >> 16;
2093: }
2094:
2095: void
2096: setregion(rd, base, limit)
2097: struct region_descriptor *rd;
2098: void *base;
2099: size_t limit;
2100: {
2101:
2102: rd->rd_limit = (int)limit;
2103: rd->rd_base = (int)base;
2104: }
1.1 cgd 2105:
1.174 mycroft 2106: void
2107: setsegment(sd, base, limit, type, dpl, def32, gran)
2108: struct segment_descriptor *sd;
2109: void *base;
2110: size_t limit;
2111: int type, dpl, def32, gran;
2112: {
1.1 cgd 2113:
1.174 mycroft 2114: sd->sd_lolimit = (int)limit;
2115: sd->sd_lobase = (int)base;
2116: sd->sd_type = type;
2117: sd->sd_dpl = dpl;
2118: sd->sd_p = 1;
2119: sd->sd_hilimit = (int)limit >> 16;
2120: sd->sd_xx = 0;
2121: sd->sd_def32 = def32;
2122: sd->sd_gran = gran;
2123: sd->sd_hibase = (int)base >> 24;
2124: }
1.1 cgd 2125:
2126: #define IDTVEC(name) __CONCAT(X, name)
1.299 mycroft 2127: typedef void (vector) __P((void));
2128: extern vector IDTVEC(syscall);
2129: extern vector IDTVEC(osyscall);
2130: extern vector *IDTVEC(exceptions)[];
1.333 christos 2131: #ifdef COMPAT_SVR4
2132: extern vector IDTVEC(svr4_fasttrap);
2133: #endif /* COMPAT_SVR4 */
1.1 cgd 2134:
1.381 thorpej 2135: #define KBTOB(x) ((size_t)(x) * 1024UL)
2136:
1.433 kanaoka 2137: #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
2138: void
2139: add_mem_cluster(seg_start, seg_end, type)
2140: u_int64_t seg_start, seg_end;
2141: u_int32_t type;
2142: {
2143: extern struct extent *iomem_ex;
2144:
2145: if (seg_end > 0x100000000ULL) {
2146: printf("WARNING: skipping large "
2147: "memory map entry: "
2148: "0x%qx/0x%qx/0x%x\n",
2149: seg_start,
2150: (seg_end - seg_start),
2151: type);
2152: return;
2153: }
2154:
2155: /*
2156: * XXX Chop the last page off the size so that
2157: * XXX it can fit in avail_end.
2158: */
2159: if (seg_end == 0x100000000ULL)
2160: seg_end -= PAGE_SIZE;
2161:
2162: if (seg_end <= seg_start)
2163: return;
2164:
2165: /*
2166: * Allocate the physical addresses used by RAM
2167: * from the iomem extent map. This is done before
2168: * the addresses are page rounded just to make
2169: * sure we get them all.
2170: */
2171: if (extent_alloc_region(iomem_ex, seg_start,
2172: seg_end - seg_start, EX_NOWAIT)) {
2173: /* XXX What should we do? */
2174: printf("WARNING: CAN'T ALLOCATE "
2175: "MEMORY SEGMENT "
2176: "(0x%qx/0x%qx/0x%x) FROM "
2177: "IOMEM EXTENT MAP!\n",
2178: seg_start, seg_end - seg_start, type);
2179: }
2180:
2181: /*
2182: * If it's not free memory, skip it.
2183: */
2184: if (type != BIM_Memory)
2185: return;
2186:
2187: /* XXX XXX XXX */
2188: if (mem_cluster_cnt >= VM_PHYSSEG_MAX)
2189: panic("init386: too many memory segments");
2190:
2191: seg_start = round_page(seg_start);
2192: seg_end = trunc_page(seg_end);
2193:
2194: if (seg_start == seg_end)
2195: return;
2196:
2197: mem_clusters[mem_cluster_cnt].start = seg_start;
2198: mem_clusters[mem_cluster_cnt].size =
2199: seg_end - seg_start;
2200:
2201: if (avail_end < seg_end)
2202: avail_end = seg_end;
2203: physmem += atop(mem_clusters[mem_cluster_cnt].size);
2204: mem_cluster_cnt++;
2205: }
2206: #endif /* !defined(REALBASEMEM) && !defined(REALEXTMEM) */
2207:
1.59 mycroft 2208: void
1.43 brezak 2209: init386(first_avail)
1.314 thorpej 2210: vaddr_t first_avail;
1.2 cgd 2211: {
1.148 mycroft 2212: extern void consinit __P((void));
1.375 drochner 2213: extern struct extent *iomem_ex;
1.429 chs 2214: #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
1.401 thorpej 2215: struct btinfo_memmap *bim;
1.429 chs 2216: #endif
1.381 thorpej 2217: struct region_descriptor region;
1.401 thorpej 2218: int x, first16q;
2219: u_int64_t seg_start, seg_end;
2220: u_int64_t seg_start1, seg_end1;
1.436 jdolecek 2221: #if NBIOSCALL > 0
2222: extern int biostramp_image_size;
2223: extern u_char biostramp_image[];
2224: #endif
1.1 cgd 2225:
2226: proc0.p_addr = proc0paddr;
1.284 mrg 2227: curpcb = &proc0.p_addr->u_pcb;
1.275 mycroft 2228:
1.375 drochner 2229: i386_bus_space_init();
1.204 thorpej 2230:
1.84 cgd 2231: consinit(); /* XXX SHOULD NOT BE DONE HERE */
1.1 cgd 2232:
1.401 thorpej 2233: /*
2234: * Initailize PAGE_SIZE-dependent variables.
2235: */
2236: uvm_setpagesize();
1.275 mycroft 2237:
1.401 thorpej 2238: /*
2239: * A quick sanity check.
2240: */
2241: if (PAGE_SIZE != NBPG)
2242: panic("init386: PAGE_SIZE != NBPG");
1.440 thorpej 2243:
2244: /*
2245: * Start with 2 color bins -- this is just a guess to get us
2246: * started. We'll recolor when we determine the largest cache
2247: * sizes on the system.
2248: */
2249: uvmexp.ncolors = 2;
1.401 thorpej 2250:
1.414 thorpej 2251: #if NBIOSCALL > 0
2252: avail_start = 3*PAGE_SIZE; /* save us a page for trampoline code and
2253: one additional PT page! */
2254: #else
2255: avail_start = PAGE_SIZE; /* BIOS leaves data in low memory */
2256: /* and VM system doesn't work with phys 0 */
2257: #endif
2258:
1.401 thorpej 2259: /*
2260: * Call pmap initialization to make new kernel address space.
2261: * We must do this before loading pages into the VM system.
2262: */
1.314 thorpej 2263: pmap_bootstrap((vaddr_t)atdevbase + IOM_SIZE);
1.2 cgd 2264:
1.429 chs 2265: #if !defined(REALBASEMEM) && !defined(REALEXTMEM)
1.401 thorpej 2266: /*
2267: * Check to see if we have a memory map from the BIOS (passed
2268: * to us by the boot program.
2269: */
2270: bim = lookup_bootinfo(BTINFO_MEMMAP);
1.407 enami 2271: if (bim != NULL && bim->num > 0) {
1.409 kim 2272: #if DEBUG_MEMLOAD
1.401 thorpej 2273: printf("BIOS MEMORY MAP (%d ENTRIES):\n", bim->num);
2274: #endif
2275: for (x = 0; x < bim->num; x++) {
1.409 kim 2276: #if DEBUG_MEMLOAD
1.401 thorpej 2277: printf(" addr 0x%qx size 0x%qx type 0x%x\n",
2278: bim->entry[x].addr,
2279: bim->entry[x].size,
2280: bim->entry[x].type);
2281: #endif
2282:
2283: /*
2284: * If the segment is not memory, skip it.
2285: */
2286: switch (bim->entry[x].type) {
2287: case BIM_Memory:
2288: case BIM_ACPI:
2289: case BIM_NVS:
2290: break;
2291: default:
2292: continue;
2293: }
2294:
2295: /*
2296: * Sanity check the entry.
2297: * XXX Need to handle uint64_t in extent code
2298: * XXX and 64-bit physical addresses in i386
2299: * XXX port.
2300: */
2301: seg_start = bim->entry[x].addr;
2302: seg_end = bim->entry[x].addr + bim->entry[x].size;
2303:
1.432 kanaoka 2304: /*
1.433 kanaoka 2305: * Avoid Compatibility Holes.
2306: * XXX Holes within memory space that allow access
2307: * XXX to be directed to the PC-compatible frame buffer
2308: * XXX (0xa0000-0xbffff),to adapter ROM space
2309: * XXX (0xc0000-0xdffff), and to system BIOS space
2310: * XXX (0xe0000-0xfffff).
2311: * XXX Some laptop(for example,Toshiba Satellite2550X)
2312: * XXX report this area and occurred problems,
2313: * XXX so we avoid this area.
1.432 kanaoka 2314: */
1.433 kanaoka 2315: if (seg_start < 0x100000 && seg_end > 0xa0000) {
2316: printf("WARNING: memory map entry overlaps "
2317: "with ``Compatibility Holes'': "
2318: "0x%qx/0x%qx/0x%x\n", seg_start,
2319: seg_end - seg_start, bim->entry[x].type);
2320: add_mem_cluster(seg_start, 0xa0000,
2321: bim->entry[x].type);
2322: add_mem_cluster(0x100000, seg_end,
1.401 thorpej 2323: bim->entry[x].type);
1.433 kanaoka 2324: } else
2325: add_mem_cluster(seg_start, seg_end,
1.401 thorpej 2326: bim->entry[x].type);
2327: }
1.421 aymeric 2328: }
1.429 chs 2329: #endif /* ! REALBASEMEM && ! REALEXTMEM */
1.421 aymeric 2330:
2331: /*
2332: * If the loop above didn't find any valid segment, fall back to
2333: * former code.
2334: */
2335: if (mem_cluster_cnt == 0) {
1.401 thorpej 2336: /*
2337: * Allocate the physical addresses used by RAM from the iomem
2338: * extent map. This is done before the addresses are
2339: * page rounded just to make sure we get them all.
2340: */
2341: if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem),
2342: EX_NOWAIT)) {
2343: /* XXX What should we do? */
2344: printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM "
2345: "IOMEM EXTENT MAP!\n");
2346: }
2347: mem_clusters[0].start = 0;
2348: mem_clusters[0].size = trunc_page(KBTOB(biosbasemem));
2349: physmem += atop(mem_clusters[0].size);
2350: if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem),
2351: EX_NOWAIT)) {
2352: /* XXX What should we do? */
2353: printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM "
2354: "IOMEM EXTENT MAP!\n");
2355: }
2356: #if NISADMA > 0
2357: /*
2358: * Some motherboards/BIOSes remap the 384K of RAM that would
2359: * normally be covered by the ISA hole to the end of memory
2360: * so that it can be used. However, on a 16M system, this
2361: * would cause bounce buffers to be allocated and used.
2362: * This is not desirable behaviour, as more than 384K of
2363: * bounce buffers might be allocated. As a work-around,
2364: * we round memory down to the nearest 1M boundary if
2365: * we're using any isadma devices and the remapped memory
2366: * is what puts us over 16M.
2367: */
2368: if (biosextmem > (15*1024) && biosextmem < (16*1024)) {
2369: char pbuf[9];
2370:
2371: format_bytes(pbuf, sizeof(pbuf),
2372: biosextmem - (15*1024));
2373: printf("Warning: ignoring %s of remapped memory\n",
2374: pbuf);
2375: biosextmem = (15*1024);
2376: }
2377: #endif
2378: mem_clusters[1].start = IOM_END;
2379: mem_clusters[1].size = trunc_page(KBTOB(biosextmem));
2380: physmem += atop(mem_clusters[1].size);
2381:
2382: mem_cluster_cnt = 2;
2383:
2384: avail_end = IOM_END + trunc_page(KBTOB(biosextmem));
2385: }
2386:
2387: /*
2388: * If we have 16M of RAM or less, just put it all on
2389: * the default free list. Otherwise, put the first
2390: * 16M of RAM on a lower priority free list (so that
2391: * all of the ISA DMA'able memory won't be eaten up
2392: * first-off).
2393: */
2394: if (avail_end <= (16 * 1024 * 1024))
2395: first16q = VM_FREELIST_DEFAULT;
2396: else
2397: first16q = VM_FREELIST_FIRST16;
2398:
2399: /* Make sure the end of the space used by the kernel is rounded. */
2400: first_avail = round_page(first_avail);
2401:
2402: /*
2403: * Now, load the memory clusters (which have already been
2404: * rounded and truncated) into the VM system.
2405: *
2406: * NOTE: WE ASSUME THAT MEMORY STARTS AT 0 AND THAT THE KERNEL
2407: * IS LOADED AT IOM_END (1M).
2408: */
2409: for (x = 0; x < mem_cluster_cnt; x++) {
2410: seg_start = mem_clusters[x].start;
2411: seg_end = mem_clusters[x].start + mem_clusters[x].size;
2412: seg_start1 = 0;
2413: seg_end1 = 0;
2414:
2415: /*
2416: * Skip memory before our available starting point.
2417: */
2418: if (seg_end <= avail_start)
2419: continue;
2420:
2421: if (avail_start >= seg_start && avail_start < seg_end) {
2422: if (seg_start != 0)
2423: panic("init386: memory doesn't start at 0");
2424: seg_start = avail_start;
2425: if (seg_start == seg_end)
2426: continue;
2427: }
2428:
2429: /*
2430: * If this segment contains the kernel, split it
2431: * in two, around the kernel.
2432: */
2433: if (seg_start <= IOM_END && first_avail <= seg_end) {
2434: seg_start1 = first_avail;
2435: seg_end1 = seg_end;
2436: seg_end = IOM_END;
2437: }
2438:
2439: /* First hunk */
2440: if (seg_start != seg_end) {
2441: if (seg_start <= (16 * 1024 * 1024) &&
2442: first16q != VM_FREELIST_DEFAULT) {
2443: u_int64_t tmp;
2444:
2445: if (seg_end > (16 * 1024 * 1024))
2446: tmp = (16 * 1024 * 1024);
2447: else
2448: tmp = seg_end;
1.409 kim 2449: #if DEBUG_MEMLOAD
1.401 thorpej 2450: printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
2451: seg_start, tmp,
2452: atop(seg_start), atop(tmp));
2453: #endif
2454: uvm_page_physload(atop(seg_start),
2455: atop(tmp), atop(seg_start),
2456: atop(tmp), first16q);
2457: seg_start = tmp;
2458: }
1.411 enami 2459:
2460: if (seg_start != seg_end) {
1.409 kim 2461: #if DEBUG_MEMLOAD
1.411 enami 2462: printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
2463: seg_start, seg_end,
2464: atop(seg_start), atop(seg_end));
2465: #endif
2466: uvm_page_physload(atop(seg_start),
2467: atop(seg_end), atop(seg_start),
2468: atop(seg_end), VM_FREELIST_DEFAULT);
2469: }
1.401 thorpej 2470: }
2471:
2472: /* Second hunk */
2473: if (seg_start1 != seg_end1) {
2474: if (seg_start1 <= (16 * 1024 * 1024) &&
2475: first16q != VM_FREELIST_DEFAULT) {
2476: u_int64_t tmp;
2477:
2478: if (seg_end1 > (16 * 1024 * 1024))
2479: tmp = (16 * 1024 * 1024);
2480: else
2481: tmp = seg_end1;
1.409 kim 2482: #if DEBUG_MEMLOAD
1.401 thorpej 2483: printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
2484: seg_start1, tmp,
2485: atop(seg_start1), atop(tmp));
2486: #endif
2487: uvm_page_physload(atop(seg_start1),
2488: atop(tmp), atop(seg_start1),
2489: atop(tmp), first16q);
2490: seg_start1 = tmp;
2491: }
1.412 enami 2492:
2493: if (seg_start1 != seg_end1) {
1.409 kim 2494: #if DEBUG_MEMLOAD
1.412 enami 2495: printf("loading 0x%qx-0x%qx (0x%lx-0x%lx)\n",
2496: seg_start1, seg_end1,
2497: atop(seg_start1), atop(seg_end1));
2498: #endif
2499: uvm_page_physload(atop(seg_start1),
2500: atop(seg_end1), atop(seg_start1),
2501: atop(seg_end1), VM_FREELIST_DEFAULT);
2502: }
1.401 thorpej 2503: }
2504: }
2505:
2506: /*
2507: * Steal memory for the message buffer (at end of core).
2508: */
2509: {
2510: struct vm_physseg *vps;
2511: psize_t sz = round_page(MSGBUFSIZE);
2512: psize_t reqsz = sz;
2513:
2514: for (x = 0; x < vm_nphysseg; x++) {
2515: vps = &vm_physmem[x];
2516: if (ptoa(vps->avail_end) == avail_end)
2517: break;
2518: }
2519: if (x == vm_nphysseg)
1.410 christos 2520: panic("init386: can't find end of memory");
1.401 thorpej 2521:
2522: /* Shrink so it'll fit in the last segment. */
2523: if ((vps->avail_end - vps->avail_start) < atop(sz))
2524: sz = ptoa(vps->avail_end - vps->avail_start);
2525:
2526: vps->avail_end -= atop(sz);
2527: vps->end -= atop(sz);
2528: msgbuf_paddr = ptoa(vps->avail_end);
2529:
2530: /* Remove the last segment if it now has no pages. */
2531: if (vps->start == vps->end) {
2532: for (vm_nphysseg--; x < vm_nphysseg; x++)
2533: vm_physmem[x] = vm_physmem[x + 1];
2534: }
2535:
2536: /* Now find where the new avail_end is. */
2537: for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
2538: if (vm_physmem[x].avail_end > avail_end)
2539: avail_end = vm_physmem[x].avail_end;
2540: avail_end = ptoa(avail_end);
2541:
2542: /* Warn if the message buffer had to be shrunk. */
2543: if (sz != reqsz)
2544: printf("WARNING: %ld bytes not available for msgbuf "
2545: "in last cluster (%ld used)\n", reqsz, sz);
2546: }
2547:
1.295 drochner 2548: #if NBIOSCALL > 0
2549: /* install page 2 (reserved above) as PT page for first 4M */
1.414 thorpej 2550: pmap_enter(pmap_kernel(), (vaddr_t)vtopte(0), 2*PAGE_SIZE,
1.367 thorpej 2551: VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE);
1.435 thorpej 2552: pmap_update();
1.414 thorpej 2553: memset(vtopte(0), 0, PAGE_SIZE);/* make sure it is clean before using */
1.436 jdolecek 2554:
2555: /*
2556: * this should be caught at kernel build time, but put it here
2557: * in case someone tries to fake it out...
2558: */
2559: #ifdef DIAGNOSTIC
2560: if (biostramp_image_size > PAGE_SIZE)
2561: panic("biostramp_image_size too big: %x vs. %x\n",
2562: biostramp_image_size, PAGE_SIZE);
2563: #endif
2564: pmap_kenter_pa((vaddr_t)BIOSTRAMP_BASE, /* virtual */
2565: (paddr_t)BIOSTRAMP_BASE, /* physical */
2566: VM_PROT_ALL); /* protection */
2567: pmap_update();
2568: memcpy((caddr_t)BIOSTRAMP_BASE, biostramp_image, biostramp_image_size);
2569: #ifdef DEBUG_BIOSCALL
2570: printf("biostramp installed @ %x\n", BIOSTRAMP_BASE);
2571: #endif
1.295 drochner 2572: #endif
1.59 mycroft 2573:
1.346 mycroft 2574: pmap_enter(pmap_kernel(), idt_vaddr, idt_paddr,
1.367 thorpej 2575: VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE);
1.435 thorpej 2576: pmap_update();
1.275 mycroft 2577: idt = (union descriptor *)idt_vaddr;
2578: #ifdef I586_CPU
1.346 mycroft 2579: pmap_enter(pmap_kernel(), pentium_idt_vaddr, idt_paddr,
1.367 thorpej 2580: VM_PROT_READ, PMAP_WIRED|VM_PROT_READ);
1.435 thorpej 2581: pmap_update();
1.275 mycroft 2582: pentium_idt = (union descriptor *)pentium_idt_vaddr;
2583: #endif
2584: gdt = idt + NIDT;
2585: ldt = gdt + NGDT;
2586:
2587:
2588: /* make gdt gates and memory segments */
2589: setsegment(&gdt[GCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 1);
2590: setsegment(&gdt[GDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1);
2591: setsegment(&gdt[GLDT_SEL].sd, ldt, NLDT * sizeof(ldt[0]) - 1,
2592: SDT_SYSLDT, SEL_KPL, 0, 0);
2593: setsegment(&gdt[GUCODE_SEL].sd, 0, i386_btop(VM_MAXUSER_ADDRESS) - 1,
2594: SDT_MEMERA, SEL_UPL, 1, 1);
2595: setsegment(&gdt[GUDATA_SEL].sd, 0, i386_btop(VM_MAXUSER_ADDRESS) - 1,
2596: SDT_MEMRWA, SEL_UPL, 1, 1);
2597: #if NBIOSCALL > 0
2598: /* bios trampoline GDT entries */
2599: setsegment(&gdt[GBIOSCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 0,
2600: 0);
2601: setsegment(&gdt[GBIOSDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 0,
2602: 0);
2603: #endif
2604:
2605: /* make ldt gates and memory segments */
2606: setgate(&ldt[LSYS5CALLS_SEL].gd, &IDTVEC(osyscall), 1,
2607: SDT_SYS386CGT, SEL_UPL);
2608: ldt[LUCODE_SEL] = gdt[GUCODE_SEL];
2609: ldt[LUDATA_SEL] = gdt[GUDATA_SEL];
1.324 christos 2610: ldt[LSOL26CALLS_SEL] = ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
1.275 mycroft 2611:
2612: /* exceptions */
2613: for (x = 0; x < 32; x++)
2614: setgate(&idt[x].gd, IDTVEC(exceptions)[x], 0, SDT_SYS386TGT,
2615: (x == 3 || x == 4) ? SEL_UPL : SEL_KPL);
1.257 thorpej 2616:
1.275 mycroft 2617: /* new-style interrupt gate for syscalls */
2618: setgate(&idt[128].gd, &IDTVEC(syscall), 0, SDT_SYS386TGT, SEL_UPL);
1.333 christos 2619: #ifdef COMPAT_SVR4
2620: setgate(&idt[0xd2].gd, &IDTVEC(svr4_fasttrap), 0, SDT_SYS386TGT,
2621: SEL_UPL);
2622: #endif /* COMPAT_SVR4 */
1.264 mycroft 2623:
1.275 mycroft 2624: setregion(®ion, gdt, NGDT * sizeof(gdt[0]) - 1);
2625: lgdt(®ion);
1.264 mycroft 2626: #ifdef I586_CPU
1.275 mycroft 2627: setregion(®ion, pentium_idt, NIDT * sizeof(idt[0]) - 1);
2628: #else
2629: setregion(®ion, idt, NIDT * sizeof(idt[0]) - 1);
2630: #endif
2631: lidt(®ion);
1.264 mycroft 2632:
1.190 mycroft 2633:
2634: #ifdef DDB
1.308 tv 2635: {
2636: extern int end;
2637: extern int *esym;
1.336 christos 2638: struct btinfo_symtab *symtab;
1.308 tv 2639:
1.336 christos 2640: symtab = lookup_bootinfo(BTINFO_SYMTAB);
2641: if (symtab) {
2642: symtab->ssym += KERNBASE;
2643: symtab->esym += KERNBASE;
2644: ddb_init(symtab->nsym, (int *)symtab->ssym,
2645: (int *)symtab->esym);
2646: }
2647: else
2648: ddb_init(*(int *)&end, ((int *)&end) + 1, esym);
1.308 tv 2649: }
1.190 mycroft 2650: if (boothowto & RB_KDB)
2651: Debugger();
1.377 ws 2652: #endif
2653: #ifdef IPKDB
2654: ipkdb_init();
2655: if (boothowto & RB_KDB)
2656: ipkdb_connect(0);
1.190 mycroft 2657: #endif
2658: #ifdef KGDB
1.243 drochner 2659: kgdb_port_init();
1.235 thorpej 2660: if (boothowto & RB_KDB) {
2661: kgdb_debug_init = 1;
1.242 drochner 2662: kgdb_connect(1);
1.235 thorpej 2663: }
1.384 jdolecek 2664: #endif
2665:
2666: #if NMCA > 0
2667: /* check for MCA bus, needed to be done before ISA stuff - if
2668: * MCA is detected, ISA needs to use level triggered interrupts
2669: * by default */
2670: mca_busprobe();
1.190 mycroft 2671: #endif
1.275 mycroft 2672:
2673: #if NISA > 0
2674: isa_defaultirq();
2675: #endif
1.431 thorpej 2676:
2677: /* Initialize software interrupts. */
2678: softintr_init();
1.275 mycroft 2679:
2680: splraise(-1);
2681: enable_intr();
2682:
2683: if (physmem < btoc(2 * 1024 * 1024)) {
2684: printf("warning: too little memory available; "
1.383 mycroft 2685: "have %lu bytes, want %lu bytes\n"
1.275 mycroft 2686: "running in degraded mode\n"
2687: "press a key to confirm\n\n",
1.383 mycroft 2688: ptoa(physmem), 2*1024*1024UL);
1.275 mycroft 2689: cngetc();
2690: }
1.393 fvdl 2691:
1.437 thorpej 2692: identifycpu(curcpu());
1.1 cgd 2693: }
2694:
1.94 mycroft 2695: struct queue {
2696: struct queue *q_next, *q_prev;
2697: };
2698:
1.1 cgd 2699: /*
1.73 mycroft 2700: * insert an element into a queue
1.1 cgd 2701: */
1.94 mycroft 2702: void
1.188 christos 2703: _insque(v1, v2)
2704: void *v1;
2705: void *v2;
1.94 mycroft 2706: {
1.298 mycroft 2707: struct queue *elem = v1, *head = v2;
2708: struct queue *next;
1.94 mycroft 2709:
2710: next = head->q_next;
2711: elem->q_next = next;
2712: head->q_next = elem;
2713: elem->q_prev = head;
2714: next->q_prev = elem;
1.1 cgd 2715: }
2716:
2717: /*
2718: * remove an element from a queue
2719: */
1.94 mycroft 2720: void
1.188 christos 2721: _remque(v)
2722: void *v;
1.94 mycroft 2723: {
1.298 mycroft 2724: struct queue *elem = v;
2725: struct queue *next, *prev;
1.94 mycroft 2726:
2727: next = elem->q_next;
2728: prev = elem->q_prev;
2729: next->q_prev = prev;
2730: prev->q_next = next;
2731: elem->q_prev = 0;
1.1 cgd 2732: }
2733:
1.107 deraadt 2734: #ifdef COMPAT_NOMID
2735: static int
2736: exec_nomid(p, epp)
1.59 mycroft 2737: struct proc *p;
2738: struct exec_package *epp;
1.31 cgd 2739: {
1.59 mycroft 2740: int error;
2741: u_long midmag, magic;
2742: u_short mid;
1.80 cgd 2743: struct exec *execp = epp->ep_hdr;
1.31 cgd 2744:
1.80 cgd 2745: /* check on validity of epp->ep_hdr performed by exec_out_makecmds */
2746:
2747: midmag = ntohl(execp->a_midmag);
1.59 mycroft 2748: mid = (midmag >> 16) & 0xffff;
2749: magic = midmag & 0xffff;
2750:
2751: if (magic == 0) {
1.80 cgd 2752: magic = (execp->a_midmag & 0xffff);
1.59 mycroft 2753: mid = MID_ZERO;
2754: }
2755:
2756: midmag = mid << 16 | magic;
2757:
2758: switch (midmag) {
2759: case (MID_ZERO << 16) | ZMAGIC:
2760: /*
2761: * 386BSD's ZMAGIC format:
2762: */
1.202 christos 2763: error = exec_aout_prep_oldzmagic(p, epp);
1.59 mycroft 2764: break;
2765:
2766: case (MID_ZERO << 16) | QMAGIC:
2767: /*
2768: * BSDI's QMAGIC format:
2769: * same as new ZMAGIC format, but with different magic number
2770: */
2771: error = exec_aout_prep_zmagic(p, epp);
2772: break;
2773:
1.202 christos 2774: case (MID_ZERO << 16) | NMAGIC:
2775: /*
2776: * BSDI's NMAGIC format:
2777: * same as NMAGIC format, but with different magic number
2778: * and with text starting at 0.
2779: */
2780: error = exec_aout_prep_oldnmagic(p, epp);
2781: break;
2782:
2783: case (MID_ZERO << 16) | OMAGIC:
2784: /*
2785: * BSDI's OMAGIC format:
2786: * same as OMAGIC format, but with different magic number
2787: * and with text starting at 0.
2788: */
2789: error = exec_aout_prep_oldomagic(p, epp);
2790: break;
2791:
1.59 mycroft 2792: default:
2793: error = ENOEXEC;
2794: }
2795:
2796: return error;
1.107 deraadt 2797: }
1.31 cgd 2798: #endif
1.107 deraadt 2799:
2800: /*
2801: * cpu_exec_aout_makecmds():
2802: * cpu-dependent a.out format hook for execve().
2803: *
2804: * Determine of the given exec package refers to something which we
2805: * understand and, if so, set up the vmcmds for it.
2806: *
2807: * On the i386, old (386bsd) ZMAGIC binaries and BSDI QMAGIC binaries
2808: * if COMPAT_NOMID is given as a kernel option.
2809: */
2810: int
2811: cpu_exec_aout_makecmds(p, epp)
2812: struct proc *p;
2813: struct exec_package *epp;
2814: {
2815: int error = ENOEXEC;
2816:
2817: #ifdef COMPAT_NOMID
2818: if ((error = exec_nomid(p, epp)) == 0)
2819: return error;
2820: #endif /* ! COMPAT_NOMID */
2821:
2822: return error;
1.31 cgd 2823: }
1.84 cgd 2824:
1.255 drochner 2825: void *
2826: lookup_bootinfo(type)
2827: int type;
2828: {
2829: struct btinfo_common *help;
2830: int n = *(int*)bootinfo;
2831: help = (struct btinfo_common *)(bootinfo + sizeof(int));
2832: while(n--) {
2833: if(help->type == type)
2834: return(help);
2835: help = (struct btinfo_common *)((char*)help + help->len);
2836: }
2837: return(0);
2838: }
1.149 mycroft 2839:
2840: void
2841: cpu_reset()
2842: {
2843:
1.224 mycroft 2844: disable_intr();
2845:
1.227 mycroft 2846: /*
2847: * The keyboard controller has 4 random output pins, one of which is
2848: * connected to the RESET pin on the CPU in many PCs. We tell the
2849: * keyboard controller to pulse this line a couple of times.
2850: */
1.273 drochner 2851: outb(IO_KBD + KBCMDP, KBC_PULSE0);
1.226 mycroft 2852: delay(100000);
1.273 drochner 2853: outb(IO_KBD + KBCMDP, KBC_PULSE0);
1.226 mycroft 2854: delay(100000);
1.149 mycroft 2855:
2856: /*
1.224 mycroft 2857: * Try to cause a triple fault and watchdog reset by making the IDT
2858: * invalid and causing a fault.
1.149 mycroft 2859: */
1.313 perry 2860: memset((caddr_t)idt, 0, NIDT * sizeof(idt[0]));
1.224 mycroft 2861: __asm __volatile("divl %0,%1" : : "q" (0), "a" (0));
1.149 mycroft 2862:
1.224 mycroft 2863: #if 0
1.149 mycroft 2864: /*
2865: * Try to cause a triple fault and watchdog reset by unmapping the
1.224 mycroft 2866: * entire address space and doing a TLB flush.
1.149 mycroft 2867: */
1.414 thorpej 2868: memset((caddr_t)PTD, 0, PAGE_SIZE);
1.426 thorpej 2869: tlbflush();
1.224 mycroft 2870: #endif
1.149 mycroft 2871:
2872: for (;;);
1.45 cgd 2873: }
CVSweb <webmaster@jp.NetBSD.org>