/* $NetBSD: machdep.c,v 1.386 2000/05/26 21:19:45 thorpej Exp $ */ /*- * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)machdep.c 7.4 (Berkeley) 6/3/91 */ #include "opt_cputype.h" #include "opt_ddb.h" #include "opt_ipkdb.h" #include "opt_vm86.h" #include "opt_user_ldt.h" #include "opt_compat_netbsd.h" #include "opt_cpureset_delay.h" #include "opt_compat_svr4.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef IPKDB #include #endif #ifdef KGDB #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #include #endif #ifdef VM86 #include #endif #include "apm.h" #include "bioscall.h" #if NBIOSCALL > 0 #include #endif #if NAPM > 0 #include #endif #include "isa.h" #include "isadma.h" #include "npx.h" #if NNPX > 0 extern struct proc *npxproc; #endif #include "mca.h" #if NMCA > 0 #include /* for mca_busprobe() */ #endif /* the following is used externally (sysctl_hw) */ char machine[] = "i386"; /* cpu "architecture" */ char machine_arch[] = "i386"; /* machine == machine_arch */ char bootinfo[BOOTINFO_MAXSIZE]; /* Our exported CPU info; we have only one right now. */ struct cpu_info cpu_info_store; struct bi_devmatch *i386_alldisks = NULL; int i386_ndisks = 0; #ifdef CPURESET_DELAY int cpureset_delay = CPURESET_DELAY; #else int cpureset_delay = 2000; /* default to 2s */ #endif int physmem; int dumpmem_low; int dumpmem_high; int boothowto; int cpu_class; int i386_fpu_present = 0; #define CPUID2MODEL(cpuid) (((cpuid) >> 4) & 15) vaddr_t msgbuf_vaddr; paddr_t msgbuf_paddr; vaddr_t idt_vaddr; paddr_t idt_paddr; #ifdef I586_CPU vaddr_t pentium_idt_vaddr; #endif vm_map_t exec_map = NULL; vm_map_t mb_map = NULL; vm_map_t phys_map = NULL; extern paddr_t avail_start, avail_end; extern paddr_t hole_start, hole_end; /* * Size of memory segments, before any memory is stolen. */ phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; int mem_cluster_cnt; int cpu_dump __P((void)); int cpu_dumpsize __P((void)); u_long cpu_dump_mempagecnt __P((void)); void dumpsys __P((void)); void identifycpu __P((void)); void init386 __P((paddr_t)); #ifdef COMPAT_NOMID static int exec_nomid __P((struct proc *, struct exec_package *)); #endif void cyrix6x86_cpu_setup __P((void)); void winchip_cpu_setup __P((void)); static __inline u_char cyrix_read_reg(u_char reg) { outb(0x22, reg); return inb(0x23); } static __inline void cyrix_write_reg(u_char reg, u_char data) { outb(0x22, reg); outb(0x23, data); } /* * Machine-dependent startup code */ void cpu_startup() { caddr_t v; int sz, x; vaddr_t minaddr, maxaddr; vsize_t size; char pbuf[9]; #if NBIOSCALL > 0 extern int biostramp_image_size; extern u_char biostramp_image[]; #endif /* * Initialize error message buffer (et end of core). */ msgbuf_vaddr = uvm_km_valloc(kernel_map, i386_round_page(MSGBUFSIZE)); if (msgbuf_vaddr == 0) panic("failed to valloc msgbuf_vaddr"); /* msgbuf_paddr was init'd in pmap */ for (x = 0; x < btoc(MSGBUFSIZE); x++) pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * NBPG, msgbuf_paddr + x * NBPG, VM_PROT_READ|VM_PROT_WRITE); initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE)); printf(version); identifycpu(); format_bytes(pbuf, sizeof(pbuf), ptoa(physmem)); printf("total memory = %s\n", pbuf); /* * Find out how much space we need, allocate it, * and then give everything true virtual addresses. */ sz = (int)allocsys(NULL, NULL); if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0) panic("startup: no room for tables"); if (allocsys(v, NULL) - v != sz) panic("startup: table size inconsistency"); /* * Allocate virtual address space for the buffers. The area * is not managed by the VM system. */ size = MAXBSIZE * nbuf; if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size), NULL, UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)) != KERN_SUCCESS) panic("cpu_startup: cannot allocate VM for buffers"); minaddr = (vaddr_t)buffers; if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { /* don't want to alloc more physical mem than needed */ bufpages = btoc(MAXBSIZE) * nbuf; } /* * XXX We defer allocation of physical pages for buffers until * XXX after autoconfiguration has run. We must do this because * XXX on system with large amounts of memory or with large * XXX user-configured buffer caches, the buffer cache will eat * XXX up all of the lower 16M of RAM. This prevents ISA DMA * XXX maps from allocating bounce pages. * * XXX Note that nothing can use buffer cache buffers until after * XXX autoconfiguration completes!! * * XXX This is a hack, and needs to be replaced with a better * XXX solution! --thorpej@netbsd.org, December 6, 1997 */ /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); /* * Finally, allocate mbuf cluster submap. */ mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, nmbclusters * mclbytes, VM_MAP_INTRSAFE, FALSE, NULL); /* * XXX Buffer cache pages haven't yet been allocated, so * XXX we need to account for those pages when printing * XXX the amount of free memory. */ format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free - bufpages)); printf("avail memory = %s\n", pbuf); format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG); printf("using %d buffers containing %s of memory\n", nbuf, pbuf); #if NBIOSCALL > 0 /* * this should be caught at kernel build time, but put it here * in case someone tries to fake it out... */ #ifdef DIAGNOSTIC if (biostramp_image_size > NBPG) panic("biostramp_image_size too big: %x vs. %x\n", biostramp_image_size, NBPG); #endif pmap_kenter_pa((vaddr_t)BIOSTRAMP_BASE, /* virtual */ (paddr_t)BIOSTRAMP_BASE, /* physical */ VM_PROT_ALL); /* protection */ memcpy((caddr_t)BIOSTRAMP_BASE, biostramp_image, biostramp_image_size); #ifdef DEBUG printf("biostramp installed @ %x\n", BIOSTRAMP_BASE); #endif #endif /* Safe for i/o port / memory space allocation to use malloc now. */ i386_bus_space_mallocok(); } /* * Set up proc0's TSS and LDT. */ void i386_proc0_tss_ldt_init() { struct pcb *pcb; int x; gdt_init(); curpcb = pcb = &proc0.p_addr->u_pcb; pcb->pcb_flags = 0; pcb->pcb_tss.tss_ioopt = ((caddr_t)pcb->pcb_iomap - (caddr_t)&pcb->pcb_tss) << 16; for (x = 0; x < sizeof(pcb->pcb_iomap) / 4; x++) pcb->pcb_iomap[x] = 0xffffffff; pcb->pcb_ldt_sel = GSEL(GLDT_SEL, SEL_KPL); pcb->pcb_cr0 = rcr0(); pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); pcb->pcb_tss.tss_esp0 = (int)proc0.p_addr + USPACE - 16; tss_alloc(pcb); ltr(pcb->pcb_tss_sel); lldt(pcb->pcb_ldt_sel); proc0.p_md.md_regs = (struct trapframe *)pcb->pcb_tss.tss_esp0 - 1; } /* * XXX Finish up the deferred buffer cache allocation and initialization. */ void i386_bufinit() { int i, base, residual; base = bufpages / nbuf; residual = bufpages % nbuf; for (i = 0; i < nbuf; i++) { vsize_t curbufsize; vaddr_t curbuf; struct vm_page *pg; /* * Each buffer has MAXBSIZE bytes of VM space allocated. Of * that MAXBSIZE space, we allocate and map (base+1) pages * for the first "residual" buffers, and then we allocate * "base" pages for the rest. */ curbuf = (vaddr_t) buffers + (i * MAXBSIZE); curbufsize = NBPG * ((i < residual) ? (base+1) : base); while (curbufsize) { /* * Attempt to allocate buffers from the first * 16M of RAM to avoid bouncing file system * transfers. */ pg = uvm_pagealloc_strat(NULL, 0, NULL, 0, UVM_PGA_STRAT_FALLBACK, VM_FREELIST_FIRST16); if (pg == NULL) panic("cpu_startup: not enough memory for " "buffer cache"); pmap_kenter_pgs(curbuf, &pg, 1); curbuf += PAGE_SIZE; curbufsize -= PAGE_SIZE; } } /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); } /* * Info for CTL_HW */ char cpu_model[120]; /* * Note: these are just the ones that may not have a cpuid instruction. * We deal with the rest in a different way. */ struct cpu_nocpuid_nameclass i386_nocpuid_cpus[] = { { CPUVENDOR_INTEL, "Intel", "386SX", CPUCLASS_386, NULL}, /* CPU_386SX */ { CPUVENDOR_INTEL, "Intel", "386DX", CPUCLASS_386, NULL}, /* CPU_386 */ { CPUVENDOR_INTEL, "Intel", "486SX", CPUCLASS_486, NULL}, /* CPU_486SX */ { CPUVENDOR_INTEL, "Intel", "486DX", CPUCLASS_486, NULL}, /* CPU_486 */ { CPUVENDOR_CYRIX, "Cyrix", "486DLC", CPUCLASS_486, NULL}, /* CPU_486DLC */ { CPUVENDOR_CYRIX, "Cyrix", "6x86", CPUCLASS_486, cyrix6x86_cpu_setup}, /* CPU_6x86 */ { CPUVENDOR_NEXGEN,"NexGen","586", CPUCLASS_386, NULL}, /* CPU_NX586 */ }; const char *classnames[] = { "386", "486", "586", "686" }; const char *modifiers[] = { "", "OverDrive ", "Dual ", "" }; struct cpu_cpuid_nameclass i386_cpuid_cpus[] = { { "GenuineIntel", CPUVENDOR_INTEL, "Intel", /* Family 4 */ { { CPUCLASS_486, { "486DX", "486DX", "486SX", "486DX2", "486SL", "486SX2", 0, "486DX2 W/B Enhanced", "486DX4", 0, 0, 0, 0, 0, 0, 0, "486" /* Default */ }, NULL }, /* Family 5 */ { CPUCLASS_586, { "Pentium (P5 A-step)", "Pentium (P5)", "Pentium (P54C)", "Pentium (P24T)", "Pentium/MMX", "Pentium", 0, "Pentium (P54C)", "Pentium/MMX (Tillamook)", 0, 0, 0, 0, 0, 0, 0, "Pentium" /* Default */ }, NULL }, /* Family 6 */ { CPUCLASS_686, { "Pentium Pro (A-step)", "Pentium Pro", 0, "Pentium II (Klamath)", "Pentium Pro", "Pentium II (Deschutes)", "Pentium II (Celeron)", "Pentium III", "Pentium III (E)", 0, 0, 0, 0, 0, 0, 0, "Pentium Pro, II or III" /* Default */ }, NULL } } }, { "AuthenticAMD", CPUVENDOR_AMD, "AMD", /* Family 4 */ { { CPUCLASS_486, { 0, 0, 0, "Am486DX2 W/T", 0, 0, 0, "Am486DX2 W/B", "Am486DX4 W/T or Am5x86 W/T 150", "Am486DX4 W/B or Am5x86 W/B 150", 0, 0, 0, 0, "Am5x86 W/T 133/160", "Am5x86 W/B 133/160", "Am486 or Am5x86" /* Default */ }, NULL }, /* Family 5 */ { CPUCLASS_586, { "K5", "K5", "K5", "K5", 0, 0, "K6", "K6", "K6-2", "K6-III", 0, 0, 0, 0, 0, 0, "K5 or K6" /* Default */ }, NULL }, /* Family 6 */ { CPUCLASS_686, { 0, "K7 (Athlon)", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "K7 (Athlon)" /* Default */ }, NULL } } }, { "CyrixInstead", CPUVENDOR_CYRIX, "Cyrix", /* Family 4 */ { { CPUCLASS_486, { 0, 0, 0, "MediaGX", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "486" /* Default */ }, NULL }, /* Family 5 */ { CPUCLASS_586, { 0, 0, "6x86", 0, "MMX-enhanced MediaGX (GXm)", /* or Geode? */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "6x86" /* Default */ }, cyrix6x86_cpu_setup }, /* Family 6 */ { CPUCLASS_686, { "6x86MX", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "6x86MX" /* Default */ }, cyrix6x86_cpu_setup } } }, { "CentaurHauls", CPUVENDOR_IDT, "IDT", /* Family 4, IDT never had any of these */ { { CPUCLASS_486, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "486 compatible" /* Default */ }, NULL }, /* Family 5 */ { CPUCLASS_586, { 0, 0, 0, 0, "WinChip C6", 0, 0, 0, "WinChip 2", "WinChip 3", 0, 0, 0, 0, 0, 0, "WinChip" /* Default */ }, winchip_cpu_setup }, /* Family 6, not yet available from IDT */ { CPUCLASS_686, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "Pentium Pro compatible" /* Default */ }, NULL } } } }; #define CPUDEBUG void cyrix6x86_cpu_setup() { /* set up various cyrix registers */ /* Enable suspend on halt */ cyrix_write_reg(0xc2, cyrix_read_reg(0xc2) | 0x08); /* enable access to ccr4/ccr5 */ cyrix_write_reg(0xC3, cyrix_read_reg(0xC3) | 0x10); /* cyrix's workaround for the "coma bug" */ cyrix_write_reg(0x31, cyrix_read_reg(0x31) | 0xf8); cyrix_write_reg(0x32, cyrix_read_reg(0x32) | 0x7f); cyrix_write_reg(0x33, cyrix_read_reg(0x33) & ~0xff); cyrix_write_reg(0x3c, cyrix_read_reg(0x3c) | 0x87); /* disable access to ccr4/ccr5 */ cyrix_write_reg(0xC3, cyrix_read_reg(0xC3) & ~0x10); } void winchip_cpu_setup() { #if defined(I586_CPU) extern int cpu_id; switch (CPUID2MODEL(cpu_id)) { /* model */ case 4: /* WinChip C6 */ cpu_feature &= ~CPUID_TSC; printf("WARNING: WinChip C6: broken TSC disabled\n"); } #endif } void identifycpu() { extern char cpu_vendor[]; extern int cpu_id; const char *name, *modifier, *vendorname; int class = CPUCLASS_386, vendor, i, max; int family, model, step, modif; struct cpu_cpuid_nameclass *cpup = NULL; void (*cpu_setup) __P((void)); if (cpuid_level == -1) { #ifdef DIAGNOSTIC if (cpu < 0 || cpu >= (sizeof i386_nocpuid_cpus/sizeof(struct cpu_nocpuid_nameclass))) panic("unknown cpu type %d\n", cpu); #endif name = i386_nocpuid_cpus[cpu].cpu_name; vendor = i386_nocpuid_cpus[cpu].cpu_vendor; vendorname = i386_nocpuid_cpus[cpu].cpu_vendorname; class = i386_nocpuid_cpus[cpu].cpu_class; cpu_setup = i386_nocpuid_cpus[cpu].cpu_setup; modifier = ""; } else { max = sizeof (i386_cpuid_cpus) / sizeof (i386_cpuid_cpus[0]); modif = (cpu_id >> 12) & 3; family = (cpu_id >> 8) & 15; if (family < CPU_MINFAMILY) panic("identifycpu: strange family value"); model = CPUID2MODEL(cpu_id); step = cpu_id & 15; #ifdef CPUDEBUG printf("cpu0: family %x model %x step %x\n", family, model, step); #endif for (i = 0; i < max; i++) { if (!strncmp(cpu_vendor, i386_cpuid_cpus[i].cpu_id, 12)) { cpup = &i386_cpuid_cpus[i]; break; } } if (cpup == NULL) { vendor = CPUVENDOR_UNKNOWN; if (cpu_vendor[0] != '\0') vendorname = &cpu_vendor[0]; else vendorname = "Unknown"; if (family > CPU_MAXFAMILY) family = CPU_MAXFAMILY; class = family - 3; modifier = ""; name = ""; cpu_setup = NULL; } else { vendor = cpup->cpu_vendor; vendorname = cpup->cpu_vendorname; modifier = modifiers[modif]; if (family > CPU_MAXFAMILY) { family = CPU_MAXFAMILY; model = CPU_DEFMODEL; } else if (model > CPU_MAXMODEL) model = CPU_DEFMODEL; i = family - CPU_MINFAMILY; name = cpup->cpu_family[i].cpu_models[model]; if (name == NULL) name = cpup->cpu_family[i].cpu_models[CPU_DEFMODEL]; class = cpup->cpu_family[i].cpu_class; cpu_setup = cpup->cpu_family[i].cpu_setup; } } sprintf(cpu_model, "%s %s%s (%s-class)", vendorname, modifier, name, classnames[class]); printf("cpu0: %s\n", cpu_model); cpu_class = class; /* * Now that we have told the user what they have, * let them know if that machine type isn't configured. */ switch (cpu_class) { #if !defined(I386_CPU) && !defined(I486_CPU) && !defined(I586_CPU) && !defined(I686_CPU) #error No CPU classes configured. #endif #ifndef I686_CPU case CPUCLASS_686: printf("NOTICE: this kernel does not support Pentium Pro CPU class\n"); #ifdef I586_CPU printf("NOTICE: lowering CPU class to i586\n"); cpu_class = CPUCLASS_586; break; #endif #endif #ifndef I586_CPU case CPUCLASS_586: printf("NOTICE: this kernel does not support Pentium CPU class\n"); #ifdef I486_CPU printf("NOTICE: lowering CPU class to i486\n"); cpu_class = CPUCLASS_486; break; #endif #endif #ifndef I486_CPU case CPUCLASS_486: printf("NOTICE: this kernel does not support i486 CPU class\n"); #ifdef I386_CPU printf("NOTICE: lowering CPU class to i386\n"); cpu_class = CPUCLASS_386; break; #endif #endif #ifndef I386_CPU case CPUCLASS_386: printf("NOTICE: this kernel does not support i386 CPU class\n"); panic("no appropriate CPU class available"); #endif default: break; } /* configure the CPU if needed */ if (cpu_setup != NULL) cpu_setup(); if (cpu == CPU_486DLC) { #ifndef CYRIX_CACHE_WORKS printf("WARNING: CYRIX 486DLC CACHE UNCHANGED.\n"); #else #ifndef CYRIX_CACHE_REALLY_WORKS printf("WARNING: CYRIX 486DLC CACHE ENABLED IN HOLD-FLUSH MODE.\n"); #else printf("WARNING: CYRIX 486DLC CACHE ENABLED.\n"); #endif #endif } #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) /* * On a 486 or above, enable ring 0 write protection. */ if (cpu_class >= CPUCLASS_486) lcr0(rcr0() | CR0_WP); #endif } /* * machine dependent system variables. */ int cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) int *name; u_int namelen; void *oldp; size_t *oldlenp; void *newp; size_t newlen; struct proc *p; { dev_t consdev; struct btinfo_bootpath *bibp; /* all sysctl names at this level are terminal */ if (namelen != 1) return (ENOTDIR); /* overloaded */ switch (name[0]) { case CPU_CONSDEV: if (cn_tab != NULL) consdev = cn_tab->cn_dev; else consdev = NODEV; return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev, sizeof consdev)); case CPU_BIOSBASEMEM: return (sysctl_rdint(oldp, oldlenp, newp, biosbasemem)); case CPU_BIOSEXTMEM: return (sysctl_rdint(oldp, oldlenp, newp, biosextmem)); case CPU_NKPDE: return (sysctl_rdint(oldp, oldlenp, newp, nkpde)); case CPU_FPU_PRESENT: return (sysctl_rdint(oldp, oldlenp, newp, i386_fpu_present)); case CPU_BOOTED_KERNEL: bibp = lookup_bootinfo(BTINFO_BOOTPATH); if(!bibp) return(ENOENT); /* ??? */ return (sysctl_rdstring(oldp, oldlenp, newp, bibp->bootpath)); case CPU_DISKINFO: if (i386_alldisks == NULL) return (ENOENT); return (sysctl_rdstruct(oldp, oldlenp, newp, i386_alldisks, sizeof (struct disklist) + (i386_ndisks - 1) * sizeof (struct nativedisk_info))); default: return (EOPNOTSUPP); } /* NOTREACHED */ } /* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * in u. to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ void sendsig(catcher, sig, mask, code) sig_t catcher; int sig; sigset_t *mask; u_long code; { struct proc *p = curproc; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp = p->p_sigacts; int onstack; tf = p->p_md.md_regs; /* Do we need to jump onto the signal stack? */ onstack = (psp->ps_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && (psp->ps_sigact[sig].sa_flags & SA_ONSTACK) != 0; /* Allocate space for the signal handler context. */ if (onstack) fp = (struct sigframe *)((caddr_t)psp->ps_sigstk.ss_sp + psp->ps_sigstk.ss_size); else fp = (struct sigframe *)tf->tf_esp; fp--; /* Build stack frame for signal trampoline. */ frame.sf_signum = sig; frame.sf_code = code; frame.sf_scp = &fp->sf_sc; frame.sf_handler = catcher; /* Save register context. */ #ifdef VM86 if (tf->tf_eflags & PSL_VM) { frame.sf_sc.sc_gs = tf->tf_vm86_gs; frame.sf_sc.sc_fs = tf->tf_vm86_fs; frame.sf_sc.sc_es = tf->tf_vm86_es; frame.sf_sc.sc_ds = tf->tf_vm86_ds; frame.sf_sc.sc_eflags = get_vflags(p); } else #endif { __asm("movl %%gs,%w0" : "=r" (frame.sf_sc.sc_gs)); __asm("movl %%fs,%w0" : "=r" (frame.sf_sc.sc_fs)); frame.sf_sc.sc_es = tf->tf_es; frame.sf_sc.sc_ds = tf->tf_ds; frame.sf_sc.sc_eflags = tf->tf_eflags; } frame.sf_sc.sc_edi = tf->tf_edi; frame.sf_sc.sc_esi = tf->tf_esi; frame.sf_sc.sc_ebp = tf->tf_ebp; frame.sf_sc.sc_ebx = tf->tf_ebx; frame.sf_sc.sc_edx = tf->tf_edx; frame.sf_sc.sc_ecx = tf->tf_ecx; frame.sf_sc.sc_eax = tf->tf_eax; frame.sf_sc.sc_eip = tf->tf_eip; frame.sf_sc.sc_cs = tf->tf_cs; frame.sf_sc.sc_esp = tf->tf_esp; frame.sf_sc.sc_ss = tf->tf_ss; frame.sf_sc.sc_trapno = tf->tf_trapno; frame.sf_sc.sc_err = tf->tf_err; /* Save signal stack. */ frame.sf_sc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK; /* Save signal mask. */ frame.sf_sc.sc_mask = *mask; #ifdef COMPAT_13 /* * XXX We always have to save an old style signal mask because * XXX we might be delivering a signal to a process which will * XXX escape from the signal in a non-standard way and invoke * XXX sigreturn() directly. */ native_sigset_to_sigset13(mask, &frame.sf_sc.__sc_mask13); #endif if (copyout(&frame, fp, sizeof(frame)) != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ sigexit(p, SIGILL); /* NOTREACHED */ } /* * Build context to run handler in. */ __asm("movl %w0,%%gs" : : "r" (GSEL(GUDATA_SEL, SEL_UPL))); __asm("movl %w0,%%fs" : : "r" (GSEL(GUDATA_SEL, SEL_UPL))); tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_eip = (int)psp->ps_sigcode; tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); tf->tf_esp = (int)fp; tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL); /* Remember that we're now on the signal stack. */ if (onstack) psp->ps_sigstk.ss_flags |= SS_ONSTACK; } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * psl to gain improper privileges or to cause * a machine fault. */ int sys___sigreturn14(p, v, retval) struct proc *p; void *v; register_t *retval; { struct sys___sigreturn14_args /* { syscallarg(struct sigcontext *) sigcntxp; } */ *uap = v; struct sigcontext *scp, context; struct trapframe *tf; /* * The trampoline code hands us the context. * It is unsafe to keep track of it ourselves, in the event that a * program jumps out of a signal handler. */ scp = SCARG(uap, sigcntxp); if (copyin((caddr_t)scp, &context, sizeof(*scp)) != 0) return (EFAULT); /* Restore register context. */ tf = p->p_md.md_regs; #ifdef VM86 if (context.sc_eflags & PSL_VM) { tf->tf_vm86_gs = context.sc_gs; tf->tf_vm86_fs = context.sc_fs; tf->tf_vm86_es = context.sc_es; tf->tf_vm86_ds = context.sc_ds; set_vflags(p, context.sc_eflags); } else #endif { /* * Check for security violations. If we're returning to * protected mode, the CPU will validate the segment registers * automatically and generate a trap on violations. We handle * the trap, rather than doing all of the checking here. */ if (((context.sc_eflags ^ tf->tf_eflags) & PSL_USERSTATIC) != 0 || !USERMODE(context.sc_cs, context.sc_eflags)) return (EINVAL); /* %fs and %gs were restored by the trampoline. */ tf->tf_es = context.sc_es; tf->tf_ds = context.sc_ds; tf->tf_eflags = context.sc_eflags; } tf->tf_edi = context.sc_edi; tf->tf_esi = context.sc_esi; tf->tf_ebp = context.sc_ebp; tf->tf_ebx = context.sc_ebx; tf->tf_edx = context.sc_edx; tf->tf_ecx = context.sc_ecx; tf->tf_eax = context.sc_eax; tf->tf_eip = context.sc_eip; tf->tf_cs = context.sc_cs; tf->tf_esp = context.sc_esp; tf->tf_ss = context.sc_ss; /* Restore signal stack. */ if (context.sc_onstack & SS_ONSTACK) p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(p, SIG_SETMASK, &context.sc_mask, 0); return (EJUSTRETURN); } int waittime = -1; struct pcb dumppcb; void cpu_reboot(howto, bootstr) int howto; char *bootstr; { if (cold) { howto |= RB_HALT; goto haltsys; } boothowto = howto; if ((howto & RB_NOSYNC) == 0 && waittime < 0) { waittime = 0; vfs_shutdown(); /* * If we've been adjusting the clock, the todr * will be out of synch; adjust it now. */ resettodr(); } /* Disable interrupts. */ splhigh(); /* Do a dump if requested. */ if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) dumpsys(); haltsys: doshutdownhooks(); if ((howto & RB_POWERDOWN) == RB_POWERDOWN) { #if NAPM > 0 && !defined(APM_NO_POWEROFF) /* turn off, if we can. But try to turn disk off and * wait a bit first--some disk drives are slow to clean up * and users have reported disk corruption. */ delay(500000); apm_set_powstate(APM_DEV_DISK(0xff), APM_SYS_OFF); delay(500000); apm_set_powstate(APM_DEV_ALLDEVS, APM_SYS_OFF); printf("WARNING: powerdown failed!\n"); /* * RB_POWERDOWN implies RB_HALT... fall into it... */ #endif } if (howto & RB_HALT) { printf("\n"); printf("The operating system has halted.\n"); printf("Please press any key to reboot.\n\n"); cnpollc(1); /* for proper keyboard command handling */ cngetc(); cnpollc(0); } printf("rebooting...\n"); if (cpureset_delay > 0) delay(cpureset_delay * 1000); cpu_reset(); for(;;) ; /*NOTREACHED*/ } /* * These variables are needed by /sbin/savecore */ u_long dumpmag = 0x8fca0101; /* magic number */ int dumpsize = 0; /* pages */ long dumplo = 0; /* blocks */ /* * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers. */ int cpu_dumpsize() { int size; size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) + ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t)); if (roundup(size, dbtob(1)) != dbtob(1)) return (-1); return (1); } /* * cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped. */ u_long cpu_dump_mempagecnt() { u_long i, n; n = 0; for (i = 0; i < mem_cluster_cnt; i++) n += atop(mem_clusters[i].size); return (n); } /* * cpu_dump: dump the machine-dependent kernel core dump headers. */ int cpu_dump() { int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); char buf[dbtob(1)]; kcore_seg_t *segp; cpu_kcore_hdr_t *cpuhdrp; phys_ram_seg_t *memsegp; int i; dump = bdevsw[major(dumpdev)].d_dump; memset(buf, 0, sizeof buf); segp = (kcore_seg_t *)buf; cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))]; memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) + ALIGN(sizeof(*cpuhdrp))]; /* * Generate a segment header. */ CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU); segp->c_size = dbtob(1) - ALIGN(sizeof(*segp)); /* * Add the machine-dependent header info. */ cpuhdrp->ptdpaddr = PTDpaddr; cpuhdrp->nmemsegs = mem_cluster_cnt; /* * Fill in the memory segment descriptors. */ for (i = 0; i < mem_cluster_cnt; i++) { memsegp[i].start = mem_clusters[i].start; memsegp[i].size = mem_clusters[i].size; } return (dump(dumpdev, dumplo, (caddr_t)buf, dbtob(1))); } /* * This is called by main to set dumplo and dumpsize. * Dumps always skip the first NBPG of disk space * in case there might be a disk label stored there. * If there is extra space, put dump at the end to * reduce the chance that swapping trashes it. */ void cpu_dumpconf() { int nblks, dumpblks; /* size of dump area */ int maj; if (dumpdev == NODEV) goto bad; maj = major(dumpdev); if (maj < 0 || maj >= nblkdev) panic("dumpconf: bad dumpdev=0x%x", dumpdev); if (bdevsw[maj].d_psize == NULL) goto bad; nblks = (*bdevsw[maj].d_psize)(dumpdev); if (nblks <= ctod(1)) goto bad; dumpblks = cpu_dumpsize(); if (dumpblks < 0) goto bad; dumpblks += ctod(cpu_dump_mempagecnt()); /* If dump won't fit (incl. room for possible label), punt. */ if (dumpblks > (nblks - ctod(1))) goto bad; /* Put dump at end of partition */ dumplo = nblks - dumpblks; /* dumpsize is in page units, and doesn't include headers. */ dumpsize = cpu_dump_mempagecnt(); return; bad: dumpsize = 0; } /* * Doadump comes here after turning off memory management and * getting on the dump stack, either when called above, or by * the auto-restart code. */ #define BYTES_PER_DUMP NBPG /* must be a multiple of pagesize XXX small */ static vaddr_t dumpspace; vaddr_t reserve_dumppages(p) vaddr_t p; { dumpspace = p; return (p + BYTES_PER_DUMP); } void dumpsys() { u_long totalbytesleft, bytes, i, n, memseg; u_long maddr; int psize; daddr_t blkno; int (*dump) __P((dev_t, daddr_t, caddr_t, size_t)); int error; /* Save registers. */ savectx(&dumppcb); msgbufenabled = 0; /* don't record dump msgs in msgbuf */ if (dumpdev == NODEV) return; /* * For dumps during autoconfiguration, * if dump device has already configured... */ if (dumpsize == 0) cpu_dumpconf(); if (dumplo <= 0 || dumpsize == 0) { printf("\ndump to dev %u,%u not possible\n", major(dumpdev), minor(dumpdev)); return; } printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev), minor(dumpdev), dumplo); psize = (*bdevsw[major(dumpdev)].d_psize)(dumpdev); printf("dump "); if (psize == -1) { printf("area unavailable\n"); return; } #if 0 /* XXX this doesn't work. grr. */ /* toss any characters present prior to dump */ while (sget() != NULL); /*syscons and pccons differ */ #endif if ((error = cpu_dump()) != 0) goto err; totalbytesleft = ptoa(cpu_dump_mempagecnt()); blkno = dumplo + cpu_dumpsize(); dump = bdevsw[major(dumpdev)].d_dump; error = 0; for (memseg = 0; memseg < mem_cluster_cnt; memseg++) { maddr = mem_clusters[memseg].start; bytes = mem_clusters[memseg].size; for (i = 0; i < bytes; i += n, totalbytesleft -= n) { /* Print out how many MBs we have left to go. */ if ((totalbytesleft % (1024*1024)) == 0) printf("%ld ", totalbytesleft / (1024 * 1024)); /* Limit size for next transfer. */ n = bytes - i; if (n > BYTES_PER_DUMP) n = BYTES_PER_DUMP; (void) pmap_map(dumpspace, maddr, maddr + n, VM_PROT_READ); error = (*dump)(dumpdev, blkno, (caddr_t)dumpspace, n); if (error) goto err; maddr += n; blkno += btodb(n); /* XXX? */ #if 0 /* XXX this doesn't work. grr. */ /* operator aborting dump? */ if (sget() != NULL) { error = EINTR; break; } #endif } } err: switch (error) { case ENXIO: printf("device bad\n"); break; case EFAULT: printf("device not ready\n"); break; case EINVAL: printf("area improper\n"); break; case EIO: printf("i/o error\n"); break; case EINTR: printf("aborted from console\n"); break; case 0: printf("succeeded\n"); break; default: printf("error %d\n", error); break; } printf("\n\n"); delay(5000000); /* 5 seconds */ } /* * Clear registers on exec */ void setregs(p, pack, stack) struct proc *p; struct exec_package *pack; u_long stack; { struct pcb *pcb = &p->p_addr->u_pcb; struct trapframe *tf; #if NNPX > 0 /* If we were using the FPU, forget about it. */ if (npxproc == p) npxdrop(); #endif #ifdef USER_LDT pmap_ldt_cleanup(p); #endif p->p_md.md_flags &= ~MDP_USEDFPU; pcb->pcb_flags = 0; pcb->pcb_savefpu.sv_env.en_cw = __NetBSD_NPXCW__; tf = p->p_md.md_regs; __asm("movl %w0,%%gs" : : "r" (LSEL(LUDATA_SEL, SEL_UPL))); __asm("movl %w0,%%fs" : : "r" (LSEL(LUDATA_SEL, SEL_UPL))); tf->tf_es = LSEL(LUDATA_SEL, SEL_UPL); tf->tf_ds = LSEL(LUDATA_SEL, SEL_UPL); tf->tf_edi = 0; tf->tf_esi = 0; tf->tf_ebp = 0; tf->tf_ebx = (int)PS_STRINGS; tf->tf_edx = 0; tf->tf_ecx = 0; tf->tf_eax = 0; tf->tf_eip = pack->ep_entry; tf->tf_cs = LSEL(LUCODE_SEL, SEL_UPL); tf->tf_eflags = PSL_USERSET; tf->tf_esp = stack; tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL); } /* * Initialize segments and descriptor tables */ union descriptor *idt, *gdt, *ldt; #ifdef I586_CPU union descriptor *pentium_idt; #endif extern struct user *proc0paddr; void setgate(gd, func, args, type, dpl) struct gate_descriptor *gd; void *func; int args, type, dpl; { gd->gd_looffset = (int)func; gd->gd_selector = GSEL(GCODE_SEL, SEL_KPL); gd->gd_stkcpy = args; gd->gd_xx = 0; gd->gd_type = type; gd->gd_dpl = dpl; gd->gd_p = 1; gd->gd_hioffset = (int)func >> 16; } void setregion(rd, base, limit) struct region_descriptor *rd; void *base; size_t limit; { rd->rd_limit = (int)limit; rd->rd_base = (int)base; } void setsegment(sd, base, limit, type, dpl, def32, gran) struct segment_descriptor *sd; void *base; size_t limit; int type, dpl, def32, gran; { sd->sd_lolimit = (int)limit; sd->sd_lobase = (int)base; sd->sd_type = type; sd->sd_dpl = dpl; sd->sd_p = 1; sd->sd_hilimit = (int)limit >> 16; sd->sd_xx = 0; sd->sd_def32 = def32; sd->sd_gran = gran; sd->sd_hibase = (int)base >> 24; } #define IDTVEC(name) __CONCAT(X, name) typedef void (vector) __P((void)); extern vector IDTVEC(syscall); extern vector IDTVEC(osyscall); extern vector *IDTVEC(exceptions)[]; #ifdef COMPAT_SVR4 extern vector IDTVEC(svr4_fasttrap); #endif /* COMPAT_SVR4 */ #define KBTOB(x) ((size_t)(x) * 1024UL) void init386(first_avail) vaddr_t first_avail; { extern void consinit __P((void)); extern struct extent *iomem_ex; struct region_descriptor region; int x; proc0.p_addr = proc0paddr; curpcb = &proc0.p_addr->u_pcb; i386_bus_space_init(); consinit(); /* XXX SHOULD NOT BE DONE HERE */ /* * Allocate the physical addresses used by RAM from the iomem * extent map. This is done before the addresses are * page rounded just to make sure we get them all. */ if (extent_alloc_region(iomem_ex, 0, KBTOB(biosbasemem), EX_NOWAIT)) { /* XXX What should we do? */ printf("WARNING: CAN'T ALLOCATE BASE MEMORY FROM IOMEM EXTENT MAP!\n"); } if (extent_alloc_region(iomem_ex, IOM_END, KBTOB(biosextmem), EX_NOWAIT)) { /* XXX What should we do? */ printf("WARNING: CAN'T ALLOCATE EXTENDED MEMORY FROM IOMEM EXTENT MAP!\n"); } #if NISADMA > 0 /* * Some motherboards/BIOSes remap the 384K of RAM that would * normally be covered by the ISA hole to the end of memory * so that it can be used. However, on a 16M system, this * would cause bounce buffers to be allocated and used. * This is not desirable behaviour, as more than 384K of * bounce buffers might be allocated. As a work-around, * we round memory down to the nearest 1M boundary if * we're using any isadma devices and the remapped memory * is what puts us over 16M. */ if (biosextmem > (15*1024) && biosextmem < (16*1024)) { char pbuf[9]; format_bytes(pbuf, sizeof(pbuf), biosextmem - (15*1024)); printf("Warning: ignoring %s of remapped memory\n", pbuf); biosextmem = (15*1024); } #endif #if NBIOSCALL > 0 avail_start = 3*NBPG; /* save us a page for trampoline code and one additional PT page! */ #else avail_start = NBPG; /* BIOS leaves data in low memory */ /* and VM system doesn't work with phys 0 */ #endif avail_end = IOM_END + trunc_page(KBTOB(biosextmem)); hole_start = trunc_page(KBTOB(biosbasemem)); /* we load right after the I/O hole; adjust hole_end to compensate */ hole_end = round_page(first_avail); /* Call pmap initialization to make new kernel address space. */ pmap_bootstrap((vaddr_t)atdevbase + IOM_SIZE); #if NBIOSCALL > 0 /* install page 2 (reserved above) as PT page for first 4M */ pmap_enter(pmap_kernel(), (u_long)vtopte(0), 2*NBPG, VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE); memset(vtopte(0), 0, NBPG); /* make sure it is clean before using */ #endif pmap_enter(pmap_kernel(), idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE); idt = (union descriptor *)idt_vaddr; #ifdef I586_CPU pmap_enter(pmap_kernel(), pentium_idt_vaddr, idt_paddr, VM_PROT_READ, PMAP_WIRED|VM_PROT_READ); pentium_idt = (union descriptor *)pentium_idt_vaddr; #endif gdt = idt + NIDT; ldt = gdt + NGDT; /* make gdt gates and memory segments */ setsegment(&gdt[GCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 1); setsegment(&gdt[GDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1); setsegment(&gdt[GLDT_SEL].sd, ldt, NLDT * sizeof(ldt[0]) - 1, SDT_SYSLDT, SEL_KPL, 0, 0); setsegment(&gdt[GUCODE_SEL].sd, 0, i386_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 1); setsegment(&gdt[GUDATA_SEL].sd, 0, i386_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 1); #if NBIOSCALL > 0 /* bios trampoline GDT entries */ setsegment(&gdt[GBIOSCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 0, 0); setsegment(&gdt[GBIOSDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 0, 0); #endif /* make ldt gates and memory segments */ setgate(&ldt[LSYS5CALLS_SEL].gd, &IDTVEC(osyscall), 1, SDT_SYS386CGT, SEL_UPL); ldt[LUCODE_SEL] = gdt[GUCODE_SEL]; ldt[LUDATA_SEL] = gdt[GUDATA_SEL]; ldt[LSOL26CALLS_SEL] = ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; /* exceptions */ for (x = 0; x < 32; x++) setgate(&idt[x].gd, IDTVEC(exceptions)[x], 0, SDT_SYS386TGT, (x == 3 || x == 4) ? SEL_UPL : SEL_KPL); /* new-style interrupt gate for syscalls */ setgate(&idt[128].gd, &IDTVEC(syscall), 0, SDT_SYS386TGT, SEL_UPL); #ifdef COMPAT_SVR4 setgate(&idt[0xd2].gd, &IDTVEC(svr4_fasttrap), 0, SDT_SYS386TGT, SEL_UPL); #endif /* COMPAT_SVR4 */ setregion(®ion, gdt, NGDT * sizeof(gdt[0]) - 1); lgdt(®ion); #ifdef I586_CPU setregion(®ion, pentium_idt, NIDT * sizeof(idt[0]) - 1); #else setregion(®ion, idt, NIDT * sizeof(idt[0]) - 1); #endif lidt(®ion); #ifdef DDB { extern int end; extern int *esym; struct btinfo_symtab *symtab; symtab = lookup_bootinfo(BTINFO_SYMTAB); if (symtab) { symtab->ssym += KERNBASE; symtab->esym += KERNBASE; ddb_init(symtab->nsym, (int *)symtab->ssym, (int *)symtab->esym); } else ddb_init(*(int *)&end, ((int *)&end) + 1, esym); } if (boothowto & RB_KDB) Debugger(); #endif #ifdef IPKDB ipkdb_init(); if (boothowto & RB_KDB) ipkdb_connect(0); #endif #ifdef KGDB kgdb_port_init(); if (boothowto & RB_KDB) { kgdb_debug_init = 1; kgdb_connect(1); } #endif #if NMCA > 0 /* check for MCA bus, needed to be done before ISA stuff - if * MCA is detected, ISA needs to use level triggered interrupts * by default */ mca_busprobe(); #endif #if NISA > 0 isa_defaultirq(); #endif splraise(-1); enable_intr(); /* number of pages of physmem addr space */ physmem = btoc(KBTOB(biosbasemem)) + btoc(KBTOB(biosextmem)); mem_clusters[0].start = 0; mem_clusters[0].size = trunc_page(KBTOB(biosbasemem)); mem_clusters[1].start = IOM_END; mem_clusters[1].size = trunc_page(KBTOB(biosextmem)); mem_cluster_cnt = 2; if (physmem < btoc(2 * 1024 * 1024)) { printf("warning: too little memory available; " "have %lu bytes, want %lu bytes\n" "running in degraded mode\n" "press a key to confirm\n\n", ptoa(physmem), 2*1024*1024UL); cngetc(); } } struct queue { struct queue *q_next, *q_prev; }; /* * insert an element into a queue */ void _insque(v1, v2) void *v1; void *v2; { struct queue *elem = v1, *head = v2; struct queue *next; next = head->q_next; elem->q_next = next; head->q_next = elem; elem->q_prev = head; next->q_prev = elem; } /* * remove an element from a queue */ void _remque(v) void *v; { struct queue *elem = v; struct queue *next, *prev; next = elem->q_next; prev = elem->q_prev; next->q_prev = prev; prev->q_next = next; elem->q_prev = 0; } #ifdef COMPAT_NOMID static int exec_nomid(p, epp) struct proc *p; struct exec_package *epp; { int error; u_long midmag, magic; u_short mid; struct exec *execp = epp->ep_hdr; /* check on validity of epp->ep_hdr performed by exec_out_makecmds */ midmag = ntohl(execp->a_midmag); mid = (midmag >> 16) & 0xffff; magic = midmag & 0xffff; if (magic == 0) { magic = (execp->a_midmag & 0xffff); mid = MID_ZERO; } midmag = mid << 16 | magic; switch (midmag) { case (MID_ZERO << 16) | ZMAGIC: /* * 386BSD's ZMAGIC format: */ error = exec_aout_prep_oldzmagic(p, epp); break; case (MID_ZERO << 16) | QMAGIC: /* * BSDI's QMAGIC format: * same as new ZMAGIC format, but with different magic number */ error = exec_aout_prep_zmagic(p, epp); break; case (MID_ZERO << 16) | NMAGIC: /* * BSDI's NMAGIC format: * same as NMAGIC format, but with different magic number * and with text starting at 0. */ error = exec_aout_prep_oldnmagic(p, epp); break; case (MID_ZERO << 16) | OMAGIC: /* * BSDI's OMAGIC format: * same as OMAGIC format, but with different magic number * and with text starting at 0. */ error = exec_aout_prep_oldomagic(p, epp); break; default: error = ENOEXEC; } return error; } #endif /* * cpu_exec_aout_makecmds(): * cpu-dependent a.out format hook for execve(). * * Determine of the given exec package refers to something which we * understand and, if so, set up the vmcmds for it. * * On the i386, old (386bsd) ZMAGIC binaries and BSDI QMAGIC binaries * if COMPAT_NOMID is given as a kernel option. */ int cpu_exec_aout_makecmds(p, epp) struct proc *p; struct exec_package *epp; { int error = ENOEXEC; #ifdef COMPAT_NOMID if ((error = exec_nomid(p, epp)) == 0) return error; #endif /* ! COMPAT_NOMID */ return error; } void * lookup_bootinfo(type) int type; { struct btinfo_common *help; int n = *(int*)bootinfo; help = (struct btinfo_common *)(bootinfo + sizeof(int)); while(n--) { if(help->type == type) return(help); help = (struct btinfo_common *)((char*)help + help->len); } return(0); } void cpu_reset() { disable_intr(); /* * The keyboard controller has 4 random output pins, one of which is * connected to the RESET pin on the CPU in many PCs. We tell the * keyboard controller to pulse this line a couple of times. */ outb(IO_KBD + KBCMDP, KBC_PULSE0); delay(100000); outb(IO_KBD + KBCMDP, KBC_PULSE0); delay(100000); /* * Try to cause a triple fault and watchdog reset by making the IDT * invalid and causing a fault. */ memset((caddr_t)idt, 0, NIDT * sizeof(idt[0])); __asm __volatile("divl %0,%1" : : "q" (0), "a" (0)); #if 0 /* * Try to cause a triple fault and watchdog reset by unmapping the * entire address space and doing a TLB flush. */ memset((caddr_t)PTD, 0, NBPG); pmap_update(); #endif for (;;); }