Annotation of src/sys/arch/i386/i386/locore.S, Revision 1.99
1.99 ! jym 1: /* $NetBSD: locore.S,v 1.98 2012/04/19 18:00:35 jym Exp $ */
1.53 yamt 2:
3: /*
4: * Copyright-o-rama!
5: */
6:
7: /*
1.62 bouyer 8: * Copyright (c) 2006 Manuel Bouyer.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29: *
30: */
31:
32: /*
1.53 yamt 33: * Copyright (c) 2001 Wasabi Systems, Inc.
34: * All rights reserved.
35: *
36: * Written by Frank van der Linden for Wasabi Systems, Inc.
37: *
38: * Redistribution and use in source and binary forms, with or without
39: * modification, are permitted provided that the following conditions
40: * are met:
41: * 1. Redistributions of source code must retain the above copyright
42: * notice, this list of conditions and the following disclaimer.
43: * 2. Redistributions in binary form must reproduce the above copyright
44: * notice, this list of conditions and the following disclaimer in the
45: * documentation and/or other materials provided with the distribution.
46: * 3. All advertising materials mentioning features or use of this software
47: * must display the following acknowledgement:
48: * This product includes software developed for the NetBSD Project by
49: * Wasabi Systems, Inc.
50: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
51: * or promote products derived from this software without specific prior
52: * written permission.
53: *
54: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
55: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
58: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64: * POSSIBILITY OF SUCH DAMAGE.
65: */
66:
1.1 fvdl 67:
68: /*-
1.84 ad 69: * Copyright (c) 1998, 2000, 2004, 2006, 2007, 2009 The NetBSD Foundation, Inc.
1.1 fvdl 70: * All rights reserved.
71: *
72: * This code is derived from software contributed to The NetBSD Foundation
1.84 ad 73: * by Charles M. Hannum, and by Andrew Doran.
1.1 fvdl 74: *
75: * Redistribution and use in source and binary forms, with or without
76: * modification, are permitted provided that the following conditions
77: * are met:
78: * 1. Redistributions of source code must retain the above copyright
79: * notice, this list of conditions and the following disclaimer.
80: * 2. Redistributions in binary form must reproduce the above copyright
81: * notice, this list of conditions and the following disclaimer in the
82: * documentation and/or other materials provided with the distribution.
83: *
84: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
85: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
86: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
87: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
88: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
94: * POSSIBILITY OF SUCH DAMAGE.
95: */
96:
97: /*-
98: * Copyright (c) 1990 The Regents of the University of California.
99: * All rights reserved.
100: *
101: * This code is derived from software contributed to Berkeley by
102: * William Jolitz.
103: *
104: * Redistribution and use in source and binary forms, with or without
105: * modification, are permitted provided that the following conditions
106: * are met:
107: * 1. Redistributions of source code must retain the above copyright
108: * notice, this list of conditions and the following disclaimer.
109: * 2. Redistributions in binary form must reproduce the above copyright
110: * notice, this list of conditions and the following disclaimer in the
111: * documentation and/or other materials provided with the distribution.
1.12 agc 112: * 3. Neither the name of the University nor the names of its contributors
1.1 fvdl 113: * may be used to endorse or promote products derived from this software
114: * without specific prior written permission.
115: *
116: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
117: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
118: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
119: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
120: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
121: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
122: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
123: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
124: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
125: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
126: * SUCH DAMAGE.
127: *
128: * @(#)locore.s 7.3 (Berkeley) 5/13/91
129: */
130:
1.59 lukem 131: #include <machine/asm.h>
1.99 ! jym 132: __KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.98 2012/04/19 18:00:35 jym Exp $");
1.59 lukem 133:
1.18 christos 134: #include "opt_compat_oldboot.h"
1.1 fvdl 135: #include "opt_ddb.h"
1.83 apb 136: #include "opt_modular.h"
1.78 joerg 137: #include "opt_multiboot.h"
1.1 fvdl 138: #include "opt_realmem.h"
1.18 christos 139: #include "opt_vm86.h"
1.62 bouyer 140: #include "opt_xen.h"
1.1 fvdl 141:
142: #include "npx.h"
143: #include "assym.h"
144: #include "lapic.h"
145: #include "ioapic.h"
1.8 fvdl 146: #include "ksyms.h"
1.1 fvdl 147:
148: #include <sys/errno.h>
149: #include <sys/syscall.h>
150:
151: #include <machine/cputypes.h>
152: #include <machine/segments.h>
153: #include <machine/specialreg.h>
154: #include <machine/trap.h>
155: #include <machine/i82489reg.h>
1.48 yamt 156: #include <machine/frameasm.h>
157: #include <machine/i82489reg.h>
1.62 bouyer 158: #ifndef XEN
159: #include <machine/multiboot.h>
160: #endif
1.1 fvdl 161:
162: /* XXX temporary kluge; these should not be here */
163: /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */
164: #include <dev/isa/isareg.h>
165:
1.62 bouyer 166: #ifdef XEN
167: /*
168: * Xen guest identifier and loader selection
169: */
170: .section __xen_guest
171: .ascii "GUEST_OS=netbsd,GUEST_VER=3.0,XEN_VER=xen-3.0"
172: #if defined(DOM0OPS) || !defined(XEN_COMPAT_030001)
173: .ascii ",VIRT_BASE=0xc0000000" /* KERNBASE */
174: .ascii ",ELF_PADDR_OFFSET=0xc0000000" /* KERNBASE */
175: #else
176: .ascii ",VIRT_BASE=0xc0100000" /* KERNTEXTOFF */
177: .ascii ",ELF_PADDR_OFFSET=0xc0100000" /* KERNTEXTOFF */
178: #endif
179: .ascii ",VIRT_ENTRY=0xc0100000" /* KERNTEXTOFF */
180: #if !defined(XEN_COMPAT_030001)
181: .ascii ",HYPERCALL_PAGE=0x00000101"
182: /* (???+HYPERCALL_PAGE_OFFSET)/PAGE_SIZE) */
183: #endif
1.66 bouyer 184: #ifdef PAE
185: .ascii ",PAE=yes[extended-cr3]"
186: #endif
1.62 bouyer 187: .ascii ",LOADER=generic"
1.80 ad 188: #if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE)
1.62 bouyer 189: .ascii ",BSD_SYMTAB=yes"
190: #endif
191: .byte 0
192: #endif
193:
1.1 fvdl 194: /*
195: * Initialization
196: */
197: .data
198:
199: .globl _C_LABEL(cpu)
1.73 ad 200: .globl _C_LABEL(cpuid_level)
1.38 jmmv 201: .globl _C_LABEL(esym)
1.71 ad 202: .globl _C_LABEL(eblob)
1.38 jmmv 203: .globl _C_LABEL(atdevbase)
1.91 rmind 204: .globl _C_LABEL(lwp0uarea),_C_LABEL(PDPpaddr)
1.1 fvdl 205: .globl _C_LABEL(gdt)
206: .globl _C_LABEL(idt)
1.30 junyoung 207: .globl _C_LABEL(lapic_tpr)
208:
1.1 fvdl 209: #if NLAPIC > 0
210: #ifdef __ELF__
1.7 thorpej 211: .align PAGE_SIZE
1.1 fvdl 212: #else
213: .align 12
214: #endif
215: .globl _C_LABEL(local_apic), _C_LABEL(lapic_id)
216: _C_LABEL(local_apic):
217: .space LAPIC_ID
1.30 junyoung 218: _C_LABEL(lapic_id):
1.1 fvdl 219: .long 0x00000000
220: .space LAPIC_TPRI-(LAPIC_ID+4)
1.30 junyoung 221: _C_LABEL(lapic_tpr):
1.1 fvdl 222: .space LAPIC_PPRI-LAPIC_TPRI
1.30 junyoung 223: _C_LABEL(lapic_ppr):
1.1 fvdl 224: .space LAPIC_ISR-LAPIC_PPRI
225: _C_LABEL(lapic_isr):
1.7 thorpej 226: .space PAGE_SIZE-LAPIC_ISR
1.1 fvdl 227: #else
1.30 junyoung 228: _C_LABEL(lapic_tpr):
1.1 fvdl 229: .long 0
230: #endif
1.30 junyoung 231:
1.48 yamt 232: _C_LABEL(cpu): .long 0 # are we 80486, Pentium, or..
1.73 ad 233: _C_LABEL(cpuid_level): .long 0
1.1 fvdl 234: _C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual
1.91 rmind 235: _C_LABEL(lwp0uarea): .long 0
1.31 junyoung 236: _C_LABEL(PDPpaddr): .long 0 # paddr of PDP, for libkvm
1.53 yamt 237: _C_LABEL(tablesize): .long 0
1.30 junyoung 238:
1.63 bouyer 239: .space 512
1.1 fvdl 240: tmpstk:
1.96 cherry 241: #ifdef XEN
242: .align PAGE_SIZE, 0x0 # Align on page boundary
243: _C_LABEL(tmpgdt):
244: .space PAGE_SIZE # Xen expects a page
245: #endif /* XEN */
246: .globl tmpgdt
1.62 bouyer 247: #ifndef XEN
1.35 yamt 248: #define _RELOC(x) ((x) - KERNBASE)
1.62 bouyer 249: #else
250: #define _RELOC(x) ((x))
251: #endif /* XEN */
1.1 fvdl 252: #define RELOC(x) _RELOC(_C_LABEL(x))
253:
254: .text
255: .globl _C_LABEL(kernel_text)
256: .set _C_LABEL(kernel_text),KERNTEXTOFF
257:
258: .globl start
1.62 bouyer 259: #ifndef XEN
1.1 fvdl 260: start: movw $0x1234,0x472 # warm boot
261:
1.37 jmmv 262: #if defined(MULTIBOOT)
263: jmp 1f
264:
265: .align 4
266: .globl Multiboot_Header
267: _C_LABEL(Multiboot_Header):
1.44 jmmv 268: #define MULTIBOOT_HEADER_FLAGS (MULTIBOOT_HEADER_WANT_MEMORY)
1.37 jmmv 269: .long MULTIBOOT_HEADER_MAGIC
270: .long MULTIBOOT_HEADER_FLAGS
271: .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
272:
273: 1:
274: /* Check if we are being executed by a Multiboot-compliant boot
275: * loader. */
276: cmpl $MULTIBOOT_INFO_MAGIC,%eax
277: jne 1f
278:
1.43 mrg 279: /*
280: * Indeed, a multiboot-compliant boot loader executed us. We copy
1.37 jmmv 281: * the received Multiboot information structure into kernel's data
282: * space to process it later -- after we are relocated. It will
1.43 mrg 283: * be safer to run complex C code than doing it at this point.
284: */
1.37 jmmv 285: pushl %ebx # Address of Multiboot information
286: call _C_LABEL(multiboot_pre_reloc)
287: addl $4,%esp
1.38 jmmv 288: jmp 2f
1.37 jmmv 289: #endif
290:
291: 1:
1.1 fvdl 292: /*
1.38 jmmv 293: * At this point, we know that a NetBSD-specific boot loader
294: * booted this kernel. The stack carries the following parameters:
1.41 jmmv 295: * (boothowto, [bootdev], bootinfo, esym, biosextmem, biosbasemem),
1.38 jmmv 296: * 4 bytes each.
1.1 fvdl 297: */
1.38 jmmv 298: addl $4,%esp # Discard return address to boot loader
299: call _C_LABEL(native_loader)
300: addl $24,%esp
1.1 fvdl 301:
302: 2:
303: /* First, reset the PSL. */
304: pushl $PSL_MBO
305: popfl
306:
307: /* Clear segment registers; always null in proc0. */
308: xorl %eax,%eax
309: movw %ax,%fs
310: movw %ax,%gs
311: decl %eax
1.73 ad 312: movl %eax,RELOC(cpuid_level)
1.1 fvdl 313:
314: /* Find out our CPU type. */
315:
316: try386: /* Try to toggle alignment check flag; does not exist on 386. */
317: pushfl
318: popl %eax
319: movl %eax,%ecx
320: orl $PSL_AC,%eax
321: pushl %eax
322: popfl
323: pushfl
324: popl %eax
325: xorl %ecx,%eax
326: andl $PSL_AC,%eax
327: pushl %ecx
328: popfl
329:
330: testl %eax,%eax
331: jnz try486
332:
333: /*
334: * Try the test of a NexGen CPU -- ZF will not change on a DIV
335: * instruction on a NexGen, it will on an i386. Documented in
336: * Nx586 Processor Recognition Application Note, NexGen, Inc.
337: */
338: movl $0x5555,%eax
339: xorl %edx,%edx
340: movl $2,%ecx
341: divl %ecx
342: jnz is386
343:
344: isnx586:
345: /*
346: * Don't try cpuid, as Nx586s reportedly don't support the
347: * PSL_ID bit.
348: */
349: movl $CPU_NX586,RELOC(cpu)
350: jmp 2f
351:
352: is386:
353: movl $CPU_386,RELOC(cpu)
354: jmp 2f
355:
356: try486: /* Try to toggle identification flag; does not exist on early 486s. */
357: pushfl
358: popl %eax
359: movl %eax,%ecx
360: xorl $PSL_ID,%eax
361: pushl %eax
362: popfl
363: pushfl
364: popl %eax
365: xorl %ecx,%eax
366: andl $PSL_ID,%eax
367: pushl %ecx
368: popfl
369:
370: testl %eax,%eax
371: jnz try586
372: is486: movl $CPU_486,RELOC(cpu)
373: /*
374: * Check Cyrix CPU
375: * Cyrix CPUs do not change the undefined flags following
376: * execution of the divide instruction which divides 5 by 2.
377: *
378: * Note: CPUID is enabled on M2, so it passes another way.
379: */
380: pushfl
381: movl $0x5555, %eax
382: xorl %edx, %edx
383: movl $2, %ecx
384: clc
385: divl %ecx
386: jnc trycyrix486
387: popfl
388: jmp 2f
389: trycyrix486:
390: movl $CPU_6x86,RELOC(cpu) # set CPU type
391: /*
392: * Check for Cyrix 486 CPU by seeing if the flags change during a
393: * divide. This is documented in the Cx486SLC/e SMM Programmer's
394: * Guide.
395: */
396: xorl %edx,%edx
397: cmpl %edx,%edx # set flags to known state
398: pushfl
399: popl %ecx # store flags in ecx
400: movl $-1,%eax
401: movl $4,%ebx
402: divl %ebx # do a long division
403: pushfl
404: popl %eax
405: xorl %ecx,%eax # are the flags different?
406: testl $0x8d5,%eax # only check C|PF|AF|Z|N|V
407: jne 2f # yes; must be Cyrix 6x86 CPU
408: movl $CPU_486DLC,RELOC(cpu) # set CPU type
409:
410: #ifndef CYRIX_CACHE_WORKS
411: /* Disable caching of the ISA hole only. */
412: invd
413: movb $CCR0,%al # Configuration Register index (CCR0)
414: outb %al,$0x22
415: inb $0x23,%al
416: orb $(CCR0_NC1|CCR0_BARB),%al
417: movb %al,%ah
418: movb $CCR0,%al
419: outb %al,$0x22
420: movb %ah,%al
421: outb %al,$0x23
422: invd
423: #else /* CYRIX_CACHE_WORKS */
424: /* Set cache parameters */
425: invd # Start with guaranteed clean cache
426: movb $CCR0,%al # Configuration Register index (CCR0)
427: outb %al,$0x22
428: inb $0x23,%al
429: andb $~CCR0_NC0,%al
430: #ifndef CYRIX_CACHE_REALLY_WORKS
431: orb $(CCR0_NC1|CCR0_BARB),%al
432: #else
433: orb $CCR0_NC1,%al
434: #endif
435: movb %al,%ah
436: movb $CCR0,%al
437: outb %al,$0x22
438: movb %ah,%al
439: outb %al,$0x23
440: /* clear non-cacheable region 1 */
441: movb $(NCR1+2),%al
442: outb %al,$0x22
443: movb $NCR_SIZE_0K,%al
444: outb %al,$0x23
445: /* clear non-cacheable region 2 */
446: movb $(NCR2+2),%al
447: outb %al,$0x22
448: movb $NCR_SIZE_0K,%al
449: outb %al,$0x23
450: /* clear non-cacheable region 3 */
451: movb $(NCR3+2),%al
452: outb %al,$0x22
453: movb $NCR_SIZE_0K,%al
454: outb %al,$0x23
455: /* clear non-cacheable region 4 */
456: movb $(NCR4+2),%al
457: outb %al,$0x22
458: movb $NCR_SIZE_0K,%al
459: outb %al,$0x23
460: /* enable caching in CR0 */
461: movl %cr0,%eax
462: andl $~(CR0_CD|CR0_NW),%eax
463: movl %eax,%cr0
464: invd
465: #endif /* CYRIX_CACHE_WORKS */
466:
467: jmp 2f
468:
469: try586: /* Use the `cpuid' instruction. */
470: xorl %eax,%eax
471: cpuid
1.73 ad 472: movl %eax,RELOC(cpuid_level)
1.1 fvdl 473:
474: 2:
475: /*
476: * Finished with old stack; load new %esp now instead of later so we
477: * can trace this code without having to worry about the trace trap
478: * clobbering the memory test or the zeroing of the bss+bootstrap page
479: * tables.
480: *
481: * The boot program should check:
482: * text+data <= &stack_variable - more_space_for_stack
483: * text+data+bss+pad+space_for_page_tables <= end_of_memory
484: * Oops, the gdt is in the carcass of the boot program so clearing
485: * the rest of memory is still not possible.
486: */
487: movl $_RELOC(tmpstk),%esp # bootstrap stack end location
488:
489: /*
1.93 jym 490: * Virtual address space of kernel, without PAE. The page dir is 1 page long.
1.1 fvdl 491: *
1.71 ad 492: * text | data | bss | [syms] | [blobs] | page dir | proc0 kstack | L1 ptp
493: * 0 1 2 3
1.93 jym 494: *
495: * Virtual address space of kernel, with PAE. We need 4 pages for the page dir
496: * and 1 page for the L3.
497: * text | data | bss | [syms] | [blobs] | L3 | page dir | proc0 kstack | L1 ptp
498: * 0 1 5 6 7
1.1 fvdl 499: */
1.93 jym 500: #ifndef PAE
501: #define PROC0_PDIR_OFF 0
502: #else
503: #define PROC0_L3_OFF 0
504: #define PROC0_PDIR_OFF 1 * PAGE_SIZE
505: #endif
1.53 yamt 506:
1.93 jym 507: #define PROC0_STK_OFF (PROC0_PDIR_OFF + PDP_SIZE * PAGE_SIZE)
1.53 yamt 508: #define PROC0_PTP1_OFF (PROC0_STK_OFF + UPAGES * PAGE_SIZE)
509:
510: /*
1.93 jym 511: * fillkpt - Fill in a kernel page table
1.53 yamt 512: * eax = pte (page frame | control | status)
513: * ebx = page table address
514: * ecx = number of pages to map
1.93 jym 515: *
516: * For PAE, each entry is 8 bytes long: we must set the 4 upper bytes to 0.
517: * This is done by the first instruction of fillkpt. In the non-PAE case, this
518: * instruction just clears the page table entry.
1.53 yamt 519: */
520:
521: #define fillkpt \
1.93 jym 522: 1: movl $0,(PDE_SIZE-4)(%ebx) ; /* clear bits */ \
523: movl %eax,(%ebx) ; /* store phys addr */ \
524: addl $PDE_SIZE,%ebx ; /* next pte/pde */ \
525: addl $PAGE_SIZE,%eax ; /* next phys page */ \
526: loop 1b ;
1.1 fvdl 527:
528: /* Find end of kernel image. */
529: movl $RELOC(end),%edi
1.71 ad 530:
1.80 ad 531: #if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE)
1.1 fvdl 532: /* Save the symbols (if loaded). */
533: movl RELOC(esym),%eax
534: testl %eax,%eax
535: jz 1f
1.35 yamt 536: subl $KERNBASE,%eax
1.1 fvdl 537: movl %eax,%edi
538: 1:
539: #endif
540:
1.71 ad 541: /* Skip over any modules/blobs. */
542: movl RELOC(eblob),%eax
543: testl %eax,%eax
544: jz 1f
545: subl $KERNBASE,%eax
546: movl %eax,%edi
547: 1:
1.53 yamt 548: /* Compute sizes */
1.71 ad 549: movl %edi,%esi
1.1 fvdl 550: addl $PGOFSET,%esi # page align up
551: andl $~PGOFSET,%esi
552:
1.53 yamt 553: /* nkptp[1] = (esi + ~L2_FRAME) >> L2_SHIFT + 1; */
554: movl %esi,%eax
555: addl $~L2_FRAME,%eax
556: shrl $L2_SHIFT,%eax
557: incl %eax /* one more ptp for VAs stolen by bootstrap */
558: 1: movl %eax,RELOC(nkptp)+1*4
559:
1.93 jym 560: /* tablesize = (PDP_SIZE + UPAGES + nkptp) << PGSHIFT; */
561: addl $(PDP_SIZE+UPAGES),%eax
562: #ifdef PAE
563: incl %eax /* one more page for the L3 PD */
564: shll $PGSHIFT+1,%eax /* PTP tables are twice larger with PAE */
565: #else
1.53 yamt 566: shll $PGSHIFT,%eax
1.93 jym 567: #endif
1.53 yamt 568: movl %eax,RELOC(tablesize)
569:
570: /* ensure that nkptp covers bootstrap tables */
571: addl %esi,%eax
572: addl $~L2_FRAME,%eax
573: shrl $L2_SHIFT,%eax
574: incl %eax
575: cmpl %eax,RELOC(nkptp)+1*4
576: jnz 1b
577:
578: /* Clear tables */
579: movl %esi,%edi
1.1 fvdl 580: xorl %eax,%eax
581: cld
1.53 yamt 582: movl RELOC(tablesize),%ecx
583: shrl $2,%ecx
1.1 fvdl 584: rep
585: stosl
586:
1.53 yamt 587: leal (PROC0_PTP1_OFF)(%esi), %ebx
1.1 fvdl 588:
589: /*
590: * Build initial page tables.
591: */
1.53 yamt 592: /*
593: * Compute &__data_start - KERNBASE. This can't be > 4G,
594: * or we can't deal with it anyway, since we can't load it in
595: * 32 bit mode. So use the bottom 32 bits.
596: */
597: movl $RELOC(__data_start),%edx
1.1 fvdl 598: andl $~PGOFSET,%edx
1.30 junyoung 599:
1.53 yamt 600: /*
601: * Skip the first MB.
602: */
1.1 fvdl 603: movl $_RELOC(KERNTEXTOFF),%eax
604: movl %eax,%ecx
1.93 jym 605: shrl $(PGSHIFT-2),%ecx /* ((n >> PGSHIFT) << 2) for # pdes */
606: #ifdef PAE
607: shll $1,%ecx /* pdes are twice larger with PAE */
608: #endif
1.53 yamt 609: addl %ecx,%ebx
1.1 fvdl 610:
611: /* Map the kernel text read-only. */
612: movl %edx,%ecx
613: subl %eax,%ecx
614: shrl $PGSHIFT,%ecx
615: orl $(PG_V|PG_KR),%eax
616: fillkpt
617:
618: /* Map the data, BSS, and bootstrap tables read-write. */
619: leal (PG_V|PG_KW)(%edx),%eax
1.53 yamt 620: movl RELOC(tablesize),%ecx
1.1 fvdl 621: addl %esi,%ecx # end of tables
622: subl %edx,%ecx # subtract end of text
623: shrl $PGSHIFT,%ecx
624: fillkpt
625:
1.53 yamt 626: /* Map ISA I/O mem (later atdevbase) */
1.1 fvdl 627: movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set
628: movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s,
629: fillkpt
630:
631: /*
632: * Construct a page table directory.
633: */
1.53 yamt 634: /* Set up top level entries for identity mapping */
1.93 jym 635: leal (PROC0_PDIR_OFF)(%esi),%ebx
1.53 yamt 636: leal (PROC0_PTP1_OFF)(%esi),%eax
637: orl $(PG_V|PG_KW), %eax
638: movl RELOC(nkptp)+1*4,%ecx
1.1 fvdl 639: fillkpt
640:
1.53 yamt 641: /* Set up top level entries for actual kernel mapping */
1.93 jym 642: leal (PROC0_PDIR_OFF + L2_SLOT_KERNBASE*PDE_SIZE)(%esi),%ebx
1.53 yamt 643: leal (PROC0_PTP1_OFF)(%esi),%eax
644: orl $(PG_V|PG_KW), %eax
645: movl RELOC(nkptp)+1*4,%ecx
1.1 fvdl 646: fillkpt
647:
648: /* Install a PDE recursively mapping page directory as a page table! */
1.93 jym 649: leal (PROC0_PDIR_OFF + PDIR_SLOT_PTE*PDE_SIZE)(%esi),%ebx
650: leal (PROC0_PDIR_OFF)(%esi),%eax
1.53 yamt 651: orl $(PG_V|PG_KW),%eax
1.93 jym 652: movl $PDP_SIZE,%ecx
653: fillkpt
654:
655: #ifdef PAE
656: /* Fill in proc0 L3 page with entries pointing to the page dirs */
657: leal (PROC0_L3_OFF)(%esi),%ebx
658: leal (PROC0_PDIR_OFF)(%esi),%eax
659: orl $(PG_V),%eax
660: movl $PDP_SIZE,%ecx
661: fillkpt
662:
663: /* Enable PAE mode */
664: movl %cr4,%eax
665: orl $CR4_PAE,%eax
666: movl %eax,%cr4
667: #endif
1.1 fvdl 668:
1.31 junyoung 669: /* Save phys. addr of PDP, for libkvm. */
1.93 jym 670: leal (PROC0_PDIR_OFF)(%esi),%eax
671: movl %eax,RELOC(PDPpaddr)
1.1 fvdl 672:
1.93 jym 673: /*
674: * Startup checklist:
675: * 1. Load %cr3 with pointer to PDIR (or L3 PD page for PAE).
676: */
1.1 fvdl 677: movl %esi,%eax # phys address of ptd in proc 0
678: movl %eax,%cr3 # load ptd addr into mmu
1.93 jym 679:
1.53 yamt 680: /*
681: * 2. Enable paging and the rest of it.
682: */
1.1 fvdl 683: movl %cr0,%eax # get control word
684: # enable paging & NPX emulation
1.99 ! jym 685: orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP|CR0_WP|CR0_AM),%eax
1.77 gmcgarry 686: movl %eax,%cr0 # and page NOW!
1.1 fvdl 687:
688: pushl $begin # jump to high mem
689: ret
690:
691: begin:
1.53 yamt 692: /*
693: * We have arrived.
694: * There's no need anymore for the identity mapping in low
695: * memory, remove it.
696: */
697: movl _C_LABEL(nkptp)+1*4,%ecx
1.93 jym 698: leal (PROC0_PDIR_OFF)(%esi),%ebx # old, phys address of PDIR
699: addl $(KERNBASE), %ebx # new, virtual address of PDIR
700: 1: movl $0,(PDE_SIZE-4)(%ebx) # Upper bits (for PAE)
701: movl $0,(%ebx)
702: addl $PDE_SIZE,%ebx
1.1 fvdl 703: loop 1b
704:
705: /* Relocate atdevbase. */
1.53 yamt 706: movl $KERNBASE,%edx
707: addl _C_LABEL(tablesize),%edx
1.1 fvdl 708: addl %esi,%edx
709: movl %edx,_C_LABEL(atdevbase)
710:
711: /* Set up bootstrap stack. */
1.53 yamt 712: leal (PROC0_STK_OFF+KERNBASE)(%esi),%eax
1.91 rmind 713: movl %eax,_C_LABEL(lwp0uarea)
1.40 yamt 714: leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp
715: movl %esi,(KSTACK_SIZE+PCB_CR3)(%eax) # pcb->pcb_cr3
1.1 fvdl 716: xorl %ebp,%ebp # mark end of frames
717:
1.37 jmmv 718: #if defined(MULTIBOOT)
719: /* It is now safe to parse the Multiboot information structure
720: * we saved before from C code. Note that we cannot delay its
721: * parsing any more because initgdt (called below) needs to make
722: * use of this information. */
723: call _C_LABEL(multiboot_post_reloc)
724: #endif
725:
1.1 fvdl 726: subl $NGDT*8, %esp # space for temporary gdt
727: pushl %esp
728: call _C_LABEL(initgdt)
729: addl $4,%esp
1.30 junyoung 730:
1.53 yamt 731: movl _C_LABEL(tablesize),%eax
1.1 fvdl 732: addl %esi,%eax # skip past stack and page tables
733:
1.93 jym 734: #ifdef PAE
735: pushl $0 # init386() expects a 64 bits paddr_t with PAE
736: #endif
1.1 fvdl 737: pushl %eax
738: call _C_LABEL(init386) # wire 386 chip for unix operation
1.93 jym 739: addl $PDE_SIZE,%esp # pop paddr_t
740: addl $NGDT*8,%esp # pop temporary gdt
1.1 fvdl 741:
742: #ifdef SAFARI_FIFO_HACK
743: movb $5,%al
744: movw $0x37b,%dx
745: outb %al,%dx
746: movw $0x37f,%dx
747: inb %dx,%al
748: movb %al,%cl
749:
750: orb $1,%cl
751:
752: movb $5,%al
753: movw $0x37b,%dx
754: outb %al,%dx
755: movw $0x37f,%dx
756: movb %cl,%al
757: outb %al,%dx
758: #endif /* SAFARI_FIFO_HACK */
759:
760: call _C_LABEL(main)
1.62 bouyer 761: #else /* XEN */
762: start:
763: /* First, reset the PSL. */
764: pushl $PSL_MBO
765: popfl
766:
767: cld
768: movl %esp, %ebx # save start of available space
769: movl $_RELOC(tmpstk),%esp # bootstrap stack end location
770:
771: /* Clear BSS first so that there are no surprises... */
772: xorl %eax,%eax
773: movl $RELOC(__bss_start),%edi
774: movl $RELOC(_end),%ecx
775: subl %edi,%ecx
776: rep stosb
777:
778: /* Copy the necessary stuff from start_info structure. */
779: /* We need to copy shared_info early, so that sti/cli work */
780: movl $RELOC(start_info_union),%edi
781: movl $128,%ecx
782: rep movsl
783:
784: /* Clear segment registers; always null in proc0. */
785: xorl %eax,%eax
786: movw %ax,%fs
787: movw %ax,%gs
788: decl %eax
1.73 ad 789: movl %eax,RELOC(cpuid_level)
1.62 bouyer 790:
791: xorl %eax,%eax
792: cpuid
1.73 ad 793: movl %eax,RELOC(cpuid_level)
1.62 bouyer 794:
1.96 cherry 795: /*
796: * Use a temp page. We'll re- add it to uvm(9) once we're
797: * done using it.
798: */
799: movl $RELOC(tmpgdt), %eax
800: pushl %eax # start of temporary gdt
801: call _C_LABEL(initgdt)
802: addl $4,%esp
803:
1.62 bouyer 804: call xen_pmap_bootstrap
1.92 jym 805:
1.62 bouyer 806: /*
807: * First avail returned by xen_pmap_bootstrap in %eax
808: */
809: movl %eax, %esi;
1.91 rmind 810: movl %esi, _C_LABEL(lwp0uarea)
1.62 bouyer 811:
812: /* Set up bootstrap stack. */
813: leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp
814: xorl %ebp,%ebp # mark end of frames
815:
816: addl $USPACE, %esi
817: subl $KERNBASE, %esi #init386 want a physical address
1.92 jym 818:
819: #ifdef PAE
820: pushl $0 # init386() expects a 64 bits paddr_t with PAE
821: #endif
1.62 bouyer 822: pushl %esi
823: call _C_LABEL(init386) # wire 386 chip for unix operation
1.93 jym 824: addl $PDE_SIZE,%esp # pop paddr_t
1.62 bouyer 825: call _C_LABEL(main)
826:
1.88 cegger 827: #if defined(XEN) && !defined(XEN_COMPAT_030001)
1.62 bouyer 828: /* space for the hypercall call page */
829: #define HYPERCALL_PAGE_OFFSET 0x1000
830: .org HYPERCALL_PAGE_OFFSET
831: ENTRY(hypercall_page)
832: .skip 0x1000
1.88 cegger 833: #endif /* defined(XEN) && !defined(XEN_COMPAT_030001) */
1.62 bouyer 834:
835: /*
836: * void lgdt_finish(void);
837: * Finish load a new GDT pointer (do any necessary cleanup).
838: * XXX It's somewhat questionable whether reloading all the segment registers
839: * is necessary, since the actual descriptor data is not changed except by
840: * process creation and exit, both of which clean up via task switches. OTOH,
841: * this only happens at run time when the GDT is resized.
842: */
843: /* LINTSTUB: Func: void lgdt_finish(void) */
844: NENTRY(lgdt_finish)
845: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
846: movw %ax,%ds
847: movw %ax,%es
848: movw %ax,%gs
849: movw %ax,%ss
850: movl $GSEL(GCPU_SEL, SEL_KPL),%eax
851: movw %ax,%fs
852: /* Reload code selector by doing intersegment return. */
853: popl %eax
854: pushl $GSEL(GCODE_SEL, SEL_KPL)
855: pushl %eax
856: lret
1.72 yamt 857: END(lgdt_finish)
1.62 bouyer 858:
859: #endif /* XEN */
1.1 fvdl 860:
861: /*
1.48 yamt 862: * void lwp_trampoline(void);
863: *
1.1 fvdl 864: * This is a trampoline function pushed onto the stack of a newly created
865: * process in order to do some additional setup. The trampoline is entered by
866: * cpu_switch()ing to the process, so we abuse the callee-saved registers used
867: * by cpu_switch() to store the information about the stub to call.
868: * NOTE: This function does not have a normal calling sequence!
869: */
1.48 yamt 870: NENTRY(lwp_trampoline)
1.68 ad 871: movl %ebp,%edi /* for .Lsyscall_checkast */
1.48 yamt 872: xorl %ebp,%ebp
1.68 ad 873: pushl %edi
1.48 yamt 874: pushl %eax
875: call _C_LABEL(lwp_startup)
876: addl $8,%esp
1.1 fvdl 877: pushl %ebx
878: call *%esi
879: addl $4,%esp
1.68 ad 880: jmp .Lsyscall_checkast
1.1 fvdl 881: /* NOTREACHED */
1.72 yamt 882: END(lwp_trampoline)
1.1 fvdl 883:
884: /*
1.48 yamt 885: * sigcode()
886: *
887: * Signal trampoline; copied to top of user stack. Used only for
888: * compatibility with old releases of NetBSD.
1.1 fvdl 889: */
890: NENTRY(sigcode)
891: /*
892: * Handler has returned here as if we called it. The sigcontext
893: * is on the stack after the 3 args "we" pushed.
894: */
895: leal 12(%esp),%eax # get pointer to sigcontext
896: movl %eax,4(%esp) # put it in the argument slot
897: # fake return address already there
1.17 christos 898: movl $SYS_compat_16___sigreturn14,%eax
1.1 fvdl 899: int $0x80 # enter kernel with args on stack
900: movl $SYS_exit,%eax
901: int $0x80 # exit if sigreturn fails
902: .globl _C_LABEL(esigcode)
903: _C_LABEL(esigcode):
1.72 yamt 904: END(sigcode)
1.1 fvdl 905:
906: /*
1.48 yamt 907: * int setjmp(label_t *)
908: *
909: * Used primarily by DDB.
1.1 fvdl 910: */
911: ENTRY(setjmp)
912: movl 4(%esp),%eax
913: movl %ebx,(%eax) # save ebx
914: movl %esp,4(%eax) # save esp
915: movl %ebp,8(%eax) # save ebp
916: movl %esi,12(%eax) # save esi
917: movl %edi,16(%eax) # save edi
918: movl (%esp),%edx # get rta
919: movl %edx,20(%eax) # save eip
1.48 yamt 920: xorl %eax,%eax # return 0
1.1 fvdl 921: ret
1.72 yamt 922: END(setjmp)
1.1 fvdl 923:
1.48 yamt 924: /*
925: * int longjmp(label_t *)
926: *
927: * Used primarily by DDB.
928: */
1.1 fvdl 929: ENTRY(longjmp)
930: movl 4(%esp),%eax
931: movl (%eax),%ebx # restore ebx
932: movl 4(%eax),%esp # restore esp
933: movl 8(%eax),%ebp # restore ebp
934: movl 12(%eax),%esi # restore esi
935: movl 16(%eax),%edi # restore edi
936: movl 20(%eax),%edx # get rta
937: movl %edx,(%esp) # put in return frame
1.48 yamt 938: movl $1,%eax # return 1
1.1 fvdl 939: ret
1.72 yamt 940: END(longjmp)
1.1 fvdl 941:
1.65 skrll 942: /*
943: * void dumpsys(void)
944: *
945: * Mimic cpu_switchto() for postmortem debugging.
946: */
1.64 skrll 947: ENTRY(dumpsys)
1.65 skrll 948: pushl %ebx # set up fake switchframe
949: pushl %esi # and save context
950: pushl %edi
951: movl %esp,_C_LABEL(dumppcb)+PCB_ESP
952: movl %ebp,_C_LABEL(dumppcb)+PCB_EBP
953: call _C_LABEL(dodumpsys) # dump!
954: addl $(3*4), %esp # unwind switchframe
1.64 skrll 955: ret
1.72 yamt 956: END(dumpsys)
1.64 skrll 957:
1.1 fvdl 958: /*
1.79 ad 959: * struct lwp *cpu_switchto(struct lwp *oldlwp, struct *newlwp,
1.58 ad 960: * bool returning)
1.30 junyoung 961: *
1.48 yamt 962: * 1. if (oldlwp != NULL), save its context.
963: * 2. then, restore context of newlwp.
964: *
965: * Note that the stack frame layout is known to "struct switchframe" in
966: * <machine/frame.h> and to the code in cpu_lwp_fork() which initializes
1.5 thorpej 967: * it for a new lwp.
1.1 fvdl 968: */
1.48 yamt 969: ENTRY(cpu_switchto)
1.1 fvdl 970: pushl %ebx
971: pushl %esi
972: pushl %edi
973:
1.70 ad 974: #if defined(DIAGNOSTIC) && !defined(XEN)
975: cmpl $IPL_SCHED,CPUVAR(ILEVEL)
976: jbe 0f
1.74 christos 977: pushl CPUVAR(ILEVEL)
1.70 ad 978: pushl $.Lstr
979: call _C_LABEL(panic)
1.74 christos 980: addl $8,%esp
981: .Lstr: .string "cpu_switchto: switching above IPL_SCHED (%d)\0"
1.70 ad 982: 0:
983: #endif
984:
1.48 yamt 985: movl 16(%esp),%esi # oldlwp
986: movl 20(%esp),%edi # newlwp
1.58 ad 987: movl 24(%esp),%edx # returning
1.48 yamt 988: testl %esi,%esi
989: jz 1f
1.1 fvdl 990:
1.48 yamt 991: /* Save old context. */
1.91 rmind 992: movl L_PCB(%esi),%eax
1.48 yamt 993: movl %esp,PCB_ESP(%eax)
994: movl %ebp,PCB_EBP(%eax)
995:
996: /* Switch to newlwp's stack. */
1.91 rmind 997: 1: movl L_PCB(%edi),%ebx
1.48 yamt 998: movl PCB_EBP(%ebx),%ebp
999: movl PCB_ESP(%ebx),%esp
1.1 fvdl 1000:
1.61 ad 1001: /*
1002: * Set curlwp. This must be globally visible in order to permit
1003: * non-interlocked mutex release.
1004: */
1005: movl %edi,%ecx
1006: xchgl %ecx,CPUVAR(CURLWP)
1.58 ad 1007:
1008: /* Skip the rest if returning to a pinned LWP. */
1009: testl %edx,%edx
1010: jnz 4f
1011:
1.62 bouyer 1012: #ifdef XEN
1013: pushl %edi
1014: call _C_LABEL(i386_switch_context)
1015: addl $4,%esp
1.97 bouyer 1016: #else /* !XEN */
1.60 yamt 1017: /* Switch ring0 esp */
1018: movl PCB_ESP0(%ebx),%eax
1019: movl %eax,CPUVAR(ESP0)
1.97 bouyer 1020: #endif /* !XEN */
1.1 fvdl 1021:
1022: /* Don't bother with the rest if switching to a system process. */
1.48 yamt 1023: testl $LW_SYSTEM,L_FLAG(%edi)
1024: jnz 4f
1.1 fvdl 1025:
1.97 bouyer 1026: #ifndef XEN
1.54 ad 1027: /* Restore thread-private %fs/%gs descriptors. */
1.60 yamt 1028: movl CPUVAR(GDT),%ecx
1.55 ad 1029: movl PCB_FSD(%ebx), %eax
1030: movl PCB_FSD+4(%ebx), %edx
1.54 ad 1031: movl %eax, (GUFS_SEL*8)(%ecx)
1032: movl %edx, (GUFS_SEL*8+4)(%ecx)
1.55 ad 1033: movl PCB_GSD(%ebx), %eax
1034: movl PCB_GSD+4(%ebx), %edx
1.54 ad 1035: movl %eax, (GUGS_SEL*8)(%ecx)
1036: movl %edx, (GUGS_SEL*8+4)(%ecx)
1.97 bouyer 1037: #endif /* !XEN */
1.54 ad 1038:
1.60 yamt 1039: /* Switch I/O bitmap */
1040: movl PCB_IOMAP(%ebx),%eax
1041: orl %eax,%eax
1.95 joerg 1042: jnz .Lcopy_iobitmap
1.60 yamt 1043: movl $(IOMAP_INVALOFF << 16),CPUVAR(IOBASE)
1044: .Liobitmap_done:
1045:
1.56 ad 1046: /* Is this process using RAS (restartable atomic sequences)? */
1047: movl L_PROC(%edi),%eax
1048: cmpl $0,P_RASLIST(%eax)
1049: jne 5f
1050:
1.48 yamt 1051: /*
1.90 rmind 1052: * Restore cr0 (including FPU state). Raise the IPL to IPL_HIGH.
1.48 yamt 1053: * FPU IPIs can alter the LWP's saved cr0. Dropping the priority
1054: * is deferred until mi_switch(), when cpu_switchto() returns.
1055: */
1.62 bouyer 1056: 2:
1.97 bouyer 1057: #ifdef XEN
1058: pushl %edi
1059: call _C_LABEL(i386_tls_switch)
1060: addl $4,%esp
1061: #else /* !XEN */
1.90 rmind 1062: movl $IPL_HIGH,CPUVAR(ILEVEL)
1.79 ad 1063: movl PCB_CR0(%ebx),%ecx /* has CR0_TS clear */
1.48 yamt 1064: movl %cr0,%edx
1.1 fvdl 1065:
1.30 junyoung 1066: /*
1.22 wiz 1067: * If our floating point registers are on a different CPU,
1.48 yamt 1068: * set CR0_TS so we'll trap rather than reuse bogus state.
1.1 fvdl 1069: */
1.79 ad 1070: cmpl CPUVAR(FPCURLWP),%edi
1.48 yamt 1071: je 3f
1.1 fvdl 1072: orl $CR0_TS,%ecx
1.48 yamt 1073:
1074: /* Reloading CR0 is very expensive - avoid if possible. */
1075: 3: cmpl %edx,%ecx
1076: je 4f
1.1 fvdl 1077: movl %ecx,%cr0
1.97 bouyer 1078: #endif /* !XEN */
1.1 fvdl 1079:
1.48 yamt 1080: /* Return to the new LWP, returning 'oldlwp' in %eax. */
1081: 4: movl %esi,%eax
1.1 fvdl 1082: popl %edi
1083: popl %esi
1084: popl %ebx
1085: ret
1.20 dsl 1086:
1.48 yamt 1087: /* Check for restartable atomic sequences (RAS). */
1088: 5: movl L_MD_REGS(%edi),%ecx
1089: pushl TF_EIP(%ecx)
1.20 dsl 1090: pushl %eax
1091: call _C_LABEL(ras_lookup)
1092: addl $8,%esp
1093: cmpl $-1,%eax
1.48 yamt 1094: je 2b
1.49 ad 1095: movl L_MD_REGS(%edi),%ecx
1096: movl %eax,TF_EIP(%ecx)
1.48 yamt 1097: jmp 2b
1.1 fvdl 1098:
1.60 yamt 1099: .Lcopy_iobitmap:
1100: /* Copy I/O bitmap. */
1.86 ad 1101: incl _C_LABEL(pmap_iobmp_evcnt)+EV_COUNT
1.60 yamt 1102: movl $(IOMAPSIZE/4),%ecx
1103: pushl %esi
1104: pushl %edi
1105: movl %eax,%esi /* pcb_iomap */
1106: movl CPUVAR(SELF),%edi
1107: leal CPU_INFO_IOMAP(%edi),%edi
1108: rep
1109: movsl
1110: popl %edi
1111: popl %esi
1112: movl $((CPU_INFO_IOMAP - CPU_INFO_TSS) << 16),CPUVAR(IOBASE)
1113: jmp .Liobitmap_done
1.72 yamt 1114: END(cpu_switchto)
1.60 yamt 1115:
1.1 fvdl 1116: /*
1117: * void savectx(struct pcb *pcb);
1.48 yamt 1118: *
1.1 fvdl 1119: * Update pcb, saving current processor state.
1120: */
1121: ENTRY(savectx)
1.47 skrll 1122: movl 4(%esp),%edx # edx = pcb
1.1 fvdl 1123: movl %esp,PCB_ESP(%edx)
1124: movl %ebp,PCB_EBP(%edx)
1125: ret
1.72 yamt 1126: END(savectx)
1.1 fvdl 1127:
1128: /*
1.48 yamt 1129: * osyscall()
1130: *
1.1 fvdl 1131: * Old call gate entry for syscall
1132: */
1133: IDTVEC(osyscall)
1.87 ad 1134: #ifndef XEN
1135: /* XXX we are in trouble! interrupts be off here. */
1.85 ad 1136: cli # must be first instruction
1.87 ad 1137: #endif
1.48 yamt 1138: pushfl # set eflags in trap frame
1.1 fvdl 1139: popl 8(%esp)
1.85 ad 1140: orl $PSL_I,(%esp) # re-enable ints on return to user
1.1 fvdl 1141: pushl $7 # size of instruction for restart
1142: jmp syscall1
1.72 yamt 1143: IDTVEC_END(osyscall)
1.1 fvdl 1144:
1145: /*
1.48 yamt 1146: * syscall()
1147: *
1.1 fvdl 1148: * Trap gate entry for syscall
1149: */
1150: IDTVEC(syscall)
1151: pushl $2 # size of instruction for restart
1152: syscall1:
1153: pushl $T_ASTFLT # trap # for doing ASTs
1154: INTRENTRY
1.85 ad 1155: STI(%eax)
1.1 fvdl 1156: #ifdef DIAGNOSTIC
1157: movl CPUVAR(ILEVEL),%ebx
1158: testl %ebx,%ebx
1159: jz 1f
1160: pushl $5f
1.84 ad 1161: call _C_LABEL(panic)
1.1 fvdl 1162: addl $4,%esp
1163: #ifdef DDB
1164: int $3
1165: #endif
1.30 junyoung 1166: 1:
1.1 fvdl 1167: #endif /* DIAGNOSTIC */
1.94 matt 1168: addl $1,CPUVAR(NSYSCALL) # count it atomically
1169: adcl $0,CPUVAR(NSYSCALL)+4 # count it atomically
1.68 ad 1170: movl CPUVAR(CURLWP),%edi
1171: movl L_PROC(%edi),%edx
1172: movl %esp,L_MD_REGS(%edi) # save pointer to frame
1.15 fvdl 1173: pushl %esp
1.1 fvdl 1174: call *P_MD_SYSCALL(%edx) # get pointer to syscall() function
1.15 fvdl 1175: addl $4,%esp
1.27 yamt 1176: .Lsyscall_checkast:
1.24 yamt 1177: /* Check for ASTs on exit to user mode. */
1.62 bouyer 1178: CLI(%eax)
1.68 ad 1179: movl L_MD_ASTPENDING(%edi), %eax
1180: orl CPUVAR(WANT_PMAPLOAD), %eax
1.24 yamt 1181: jnz 9f
1.62 bouyer 1182: #ifdef XEN
1183: STIC(%eax)
1184: jz 14f
1185: call _C_LABEL(stipending)
1186: testl %eax,%eax
1187: jz 14f
1188: /* process pending interrupts */
1189: CLI(%eax)
1190: movl CPUVAR(ILEVEL), %ebx
1191: movl $.Lsyscall_resume, %esi # address to resume loop at
1192: .Lsyscall_resume:
1193: movl %ebx,%eax # get cpl
1194: movl CPUVAR(IUNMASK)(,%eax,4),%eax
1195: andl CPUVAR(IPENDING),%eax # any non-masked bits left?
1196: jz 17f
1197: bsrl %eax,%eax
1198: btrl %eax,CPUVAR(IPENDING)
1199: movl CPUVAR(ISOURCES)(,%eax,4),%eax
1200: jmp *IS_RESUME(%eax)
1201: 17: movl %ebx, CPUVAR(ILEVEL) #restore cpl
1202: jmp .Lsyscall_checkast
1203: 14:
1204: #endif /* XEN */
1.1 fvdl 1205: #ifndef DIAGNOSTIC
1.24 yamt 1206: INTRFASTEXIT
1.1 fvdl 1207: #else /* DIAGNOSTIC */
1.24 yamt 1208: cmpl $IPL_NONE,CPUVAR(ILEVEL)
1.1 fvdl 1209: jne 3f
1210: INTRFASTEXIT
1.62 bouyer 1211: 3: STI(%eax)
1.1 fvdl 1212: pushl $4f
1.84 ad 1213: call _C_LABEL(panic)
1.1 fvdl 1214: addl $4,%esp
1.58 ad 1215: pushl $IPL_NONE
1216: call _C_LABEL(spllower)
1217: addl $4,%esp
1.53 yamt 1218: jmp .Lsyscall_checkast
1.84 ad 1219: 4: .asciz "SPL NOT LOWERED ON SYSCALL EXIT\n"
1220: 5: .asciz "SPL NOT ZERO ON SYSCALL ENTRY\n"
1.1 fvdl 1221: #endif /* DIAGNOSTIC */
1.68 ad 1222: 9:
1223: cmpl $0, CPUVAR(WANT_PMAPLOAD)
1224: jz 10f
1225: STI(%eax)
1.24 yamt 1226: call _C_LABEL(pmap_load)
1.27 yamt 1227: jmp .Lsyscall_checkast /* re-check ASTs */
1.68 ad 1228: 10:
1229: /* Always returning to user mode here. */
1230: movl $0, L_MD_ASTPENDING(%edi)
1231: STI(%eax)
1232: /* Pushed T_ASTFLT into tf_trapno on entry. */
1233: pushl %esp
1234: call _C_LABEL(trap)
1235: addl $4,%esp
1236: jmp .Lsyscall_checkast /* re-check ASTs */
1.72 yamt 1237: IDTVEC_END(syscall)
1.1 fvdl 1238:
1.81 ad 1239: IDTVEC(svr4_fasttrap)
1240: pushl $2 # size of instruction for restart
1241: pushl $T_ASTFLT # trap # for doing ASTs
1242: INTRENTRY
1.85 ad 1243: STI(%eax)
1.81 ad 1244: pushl $RW_READER
1245: pushl $_C_LABEL(svr4_fasttrap_lock)
1246: call _C_LABEL(rw_enter)
1247: addl $8,%esp
1248: call *_C_LABEL(svr4_fasttrap_vec)
1249: pushl $_C_LABEL(svr4_fasttrap_lock)
1250: call _C_LABEL(rw_exit)
1251: addl $4,%esp
1252: 2: /* Check for ASTs on exit to user mode. */
1253: cli
1254: CHECK_ASTPENDING(%eax)
1255: je 1f
1256: /* Always returning to user mode here. */
1257: CLEAR_ASTPENDING(%eax)
1258: sti
1259: /* Pushed T_ASTFLT into tf_trapno on entry. */
1260: pushl %esp
1261: call _C_LABEL(trap)
1262: addl $4,%esp
1263: jmp 2b
1264: 1: CHECK_DEFERRED_SWITCH
1265: jnz 9f
1266: INTRFASTEXIT
1267: 9: sti
1268: call _C_LABEL(pmap_load)
1269: cli
1270: jmp 2b
1271:
1.1 fvdl 1272: #if NNPX > 0
1273: /*
1274: * Special interrupt handlers. Someday intr0-intr15 will be used to count
1275: * interrupts. We'll still need a special exception 16 handler. The busy
1276: * latch stuff in probintr() can be moved to npxprobe().
1277: */
1278:
1.48 yamt 1279: /*
1280: * void probeintr(void)
1281: */
1.1 fvdl 1282: NENTRY(probeintr)
1283: ss
1284: incl _C_LABEL(npx_intrs_while_probing)
1285: pushl %eax
1286: movb $0x20,%al # EOI (asm in strings loses cpp features)
1287: outb %al,$0xa0 # IO_ICU2
1288: outb %al,$0x20 # IO_ICU1
1289: movb $0,%al
1290: outb %al,$0xf0 # clear BUSY# latch
1291: popl %eax
1292: iret
1.72 yamt 1293: END(probeintr)
1.1 fvdl 1294:
1.48 yamt 1295: /*
1296: * void probetrap(void)
1297: */
1.1 fvdl 1298: NENTRY(probetrap)
1299: ss
1300: incl _C_LABEL(npx_traps_while_probing)
1301: fnclex
1302: iret
1.72 yamt 1303: END(probetrap)
1.1 fvdl 1304:
1.48 yamt 1305: /*
1306: * int npx586bug1(int a, int b)
1307: */
1.1 fvdl 1308: NENTRY(npx586bug1)
1309: fildl 4(%esp) # x
1310: fildl 8(%esp) # y
1311: fld %st(1)
1312: fdiv %st(1),%st # x/y
1313: fmulp %st,%st(1) # (x/y)*y
1314: fsubrp %st,%st(1) # x-(x/y)*y
1315: pushl $0
1316: fistpl (%esp)
1317: popl %eax
1318: ret
1.72 yamt 1319: END(npx586bug1)
1.1 fvdl 1320: #endif /* NNPX > 0 */
1.50 ad 1321:
1322: /*
1.76 ad 1323: * void sse2_idlezero_page(void *pg)
1.50 ad 1324: *
1.76 ad 1325: * Zero a page without polluting the cache. Preemption must be
1326: * disabled by the caller. Abort if a preemption is pending.
1.50 ad 1327: */
1.76 ad 1328: ENTRY(sse2_idlezero_page)
1.50 ad 1329: pushl %ebp
1330: movl %esp,%ebp
1331: movl 8(%esp), %edx
1.76 ad 1332: movl $(PAGE_SIZE/32), %ecx
1.50 ad 1333: xorl %eax, %eax
1334: .align 16
1335: 1:
1.82 ad 1336: testl $RESCHED_KPREEMPT, CPUVAR(RESCHED)
1.76 ad 1337: jnz 2f
1.50 ad 1338: movnti %eax, 0(%edx)
1339: movnti %eax, 4(%edx)
1340: movnti %eax, 8(%edx)
1341: movnti %eax, 12(%edx)
1342: movnti %eax, 16(%edx)
1343: movnti %eax, 20(%edx)
1344: movnti %eax, 24(%edx)
1345: movnti %eax, 28(%edx)
1.76 ad 1346: addl $32, %edx
1.75 ad 1347: decl %ecx
1.50 ad 1348: jnz 1b
1349: sfence
1.76 ad 1350: incl %eax
1.50 ad 1351: pop %ebp
1352: ret
1.76 ad 1353: 2:
1.50 ad 1354: sfence
1355: popl %ebp
1356: ret
1.76 ad 1357: END(sse2_idlezero_page)
CVSweb <webmaster@jp.NetBSD.org>