Annotation of src/sys/arch/i386/i386/locore.S, Revision 1.12
1.12 ! agc 1: /* $NetBSD: locore.S,v 1.11 2003/06/26 16:47:15 drochner Exp $ */
1.1 fvdl 2:
3: /*-
4: * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Charles M. Hannum.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /*-
40: * Copyright (c) 1990 The Regents of the University of California.
41: * All rights reserved.
42: *
43: * This code is derived from software contributed to Berkeley by
44: * William Jolitz.
45: *
46: * Redistribution and use in source and binary forms, with or without
47: * modification, are permitted provided that the following conditions
48: * are met:
49: * 1. Redistributions of source code must retain the above copyright
50: * notice, this list of conditions and the following disclaimer.
51: * 2. Redistributions in binary form must reproduce the above copyright
52: * notice, this list of conditions and the following disclaimer in the
53: * documentation and/or other materials provided with the distribution.
1.12 ! agc 54: * 3. Neither the name of the University nor the names of its contributors
1.1 fvdl 55: * may be used to endorse or promote products derived from this software
56: * without specific prior written permission.
57: *
58: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68: * SUCH DAMAGE.
69: *
70: * @(#)locore.s 7.3 (Berkeley) 5/13/91
71: */
72:
73: #include "opt_cputype.h"
74: #include "opt_ddb.h"
75: #include "opt_ipkdb.h"
76: #include "opt_vm86.h"
77: #include "opt_user_ldt.h"
78: #include "opt_dummy_nops.h"
79: #include "opt_compat_oldboot.h"
80: #include "opt_multiprocessor.h"
81: #include "opt_lockdebug.h"
82: #include "opt_realmem.h"
83:
84: #include "npx.h"
85: #include "assym.h"
86: #include "apm.h"
87: #include "lapic.h"
88: #include "ioapic.h"
1.8 fvdl 89: #include "ksyms.h"
1.1 fvdl 90:
91: #include <sys/errno.h>
92: #include <sys/syscall.h>
93:
94: #include <machine/cputypes.h>
95: #include <machine/param.h>
96: #include <machine/pte.h>
97: #include <machine/segments.h>
98: #include <machine/specialreg.h>
99: #include <machine/trap.h>
100: #include <machine/bootinfo.h>
101:
102: #if NLAPIC > 0
103: #include <machine/i82489reg.h>
104: #endif
105:
106: /* LINTSTUB: include <sys/types.h> */
107: /* LINTSTUB: include <machine/cpu.h> */
108: /* LINTSTUB: include <sys/systm.h> */
109:
110: #include <machine/asm.h>
111:
112: #if defined(MULTIPROCESSOR)
113:
1.5 thorpej 114: #define SET_CURLWP(lwp,cpu) \
1.1 fvdl 115: movl CPUVAR(SELF),cpu ; \
1.5 thorpej 116: movl lwp,CPUVAR(CURLWP) ; \
117: movl cpu,L_CPU(lwp)
1.1 fvdl 118:
119: #else
120:
1.5 thorpej 121: #define SET_CURLWP(lwp,tcpu) movl lwp,CPUVAR(CURLWP)
122: #define GET_CURLWP(reg) movl CPUVAR(CURLWP),reg
1.1 fvdl 123:
124: #endif
125:
126: #define GET_CURPCB(reg) movl CPUVAR(CURPCB),reg
127: #define SET_CURPCB(reg) movl reg,CPUVAR(CURPCB)
128:
129: #define CLEAR_RESCHED(reg) movl reg,CPUVAR(RESCHED)
130:
131: /* XXX temporary kluge; these should not be here */
132: /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */
133: #include <dev/isa/isareg.h>
134:
135:
136: /* Disallow old names for REALBASEMEM */
137: #ifdef BIOSBASEMEM
138: #error BIOSBASEMEM option deprecated; use REALBASEMEM only if memory size reported by latest boot block is incorrect
139: #endif
140:
141: /* Disallow old names for REALEXTMEM */
142: #ifdef EXTMEM_SIZE
143: #error EXTMEM_SIZE option deprecated; use REALEXTMEM only if memory size reported by latest boot block is incorrect
144: #endif
145: #ifdef BIOSEXTMEM
146: #error BIOSEXTMEM option deprecated; use REALEXTMEM only if memory size reported by latest boot block is incorrect
147: #endif
148:
149: #include <machine/frameasm.h>
150:
151:
152: #ifdef MULTIPROCESSOR
153: #include <machine/i82489reg.h>
154: #endif
155:
156: /*
157: * PTmap is recursive pagemap at top of virtual address space.
158: * Within PTmap, the page directory can be found (third indirection).
159: *
160: * XXX 4 == sizeof pde
161: */
162: .set _C_LABEL(PTmap),(PDSLOT_PTE << PDSHIFT)
1.7 thorpej 163: .set _C_LABEL(PTD),(_C_LABEL(PTmap) + PDSLOT_PTE * PAGE_SIZE)
1.1 fvdl 164: .set _C_LABEL(PTDpde),(_C_LABEL(PTD) + PDSLOT_PTE * 4)
165:
166: /*
167: * APTmap, APTD is the alternate recursive pagemap.
168: * It's used when modifying another process's page tables.
169: *
170: * XXX 4 == sizeof pde
171: */
172: .set _C_LABEL(APTmap),(PDSLOT_APTE << PDSHIFT)
1.7 thorpej 173: .set _C_LABEL(APTD),(_C_LABEL(APTmap) + PDSLOT_APTE * PAGE_SIZE)
1.1 fvdl 174: .set _C_LABEL(APTDpde),(_C_LABEL(PTD) + PDSLOT_APTE * 4)
175:
176:
177: /*
178: * Initialization
179: */
180: .data
181:
182: .globl _C_LABEL(cpu)
183: .globl _C_LABEL(esym),_C_LABEL(boothowto)
184: .globl _C_LABEL(bootinfo),_C_LABEL(atdevbase)
185: #ifdef COMPAT_OLDBOOT
186: .globl _C_LABEL(bootdev)
187: #endif
188: .globl _C_LABEL(proc0paddr),_C_LABEL(PTDpaddr)
189: .globl _C_LABEL(biosbasemem),_C_LABEL(biosextmem)
190: .globl _C_LABEL(gdt)
191: #ifdef I586_CPU
192: .globl _C_LABEL(idt)
193: #endif
194: .globl _C_LABEL(lapic_tpr)
195:
196: #if NLAPIC > 0
197: #ifdef __ELF__
1.7 thorpej 198: .align PAGE_SIZE
1.1 fvdl 199: #else
200: .align 12
201: #endif
202: .globl _C_LABEL(local_apic), _C_LABEL(lapic_id)
203: _C_LABEL(local_apic):
204: .space LAPIC_ID
205: _C_LABEL(lapic_id):
206: .long 0x00000000
207: .space LAPIC_TPRI-(LAPIC_ID+4)
208: _C_LABEL(lapic_tpr):
209: .space LAPIC_PPRI-LAPIC_TPRI
210: _C_LABEL(lapic_ppr):
211: .space LAPIC_ISR-LAPIC_PPRI
212: _C_LABEL(lapic_isr):
1.7 thorpej 213: .space PAGE_SIZE-LAPIC_ISR
1.1 fvdl 214: #else
215: _C_LABEL(lapic_tpr):
216: .long 0
217: #endif
218:
219:
220: _C_LABEL(cpu): .long 0 # are we 386, 386sx, or 486,
221: # or Pentium, or..
222: _C_LABEL(esym): .long 0 # ptr to end of syms
223: _C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual
224: _C_LABEL(proc0paddr): .long 0
225: _C_LABEL(PTDpaddr): .long 0 # paddr of PTD, for libkvm
226: #ifndef REALBASEMEM
227: _C_LABEL(biosbasemem): .long 0 # base memory reported by BIOS
228: #else
229: _C_LABEL(biosbasemem): .long REALBASEMEM
230: #endif
231: #ifndef REALEXTMEM
232: _C_LABEL(biosextmem): .long 0 # extended memory reported by BIOS
233: #else
234: _C_LABEL(biosextmem): .long REALEXTMEM
235: #endif
236:
237: .space 512
238: tmpstk:
239:
240:
241: #define _RELOC(x) ((x) - KERNBASE)
242: #define RELOC(x) _RELOC(_C_LABEL(x))
243:
244: .text
245: .globl _C_LABEL(kernel_text)
246: .set _C_LABEL(kernel_text),KERNTEXTOFF
247:
248: .globl start
249: start: movw $0x1234,0x472 # warm boot
250:
251: /*
252: * Load parameters from stack
253: * (howto, [bootdev], bootinfo, esym, basemem, extmem).
254: */
255: movl 4(%esp),%eax
256: movl %eax,RELOC(boothowto)
257: #ifdef COMPAT_OLDBOOT
258: movl 8(%esp),%eax
259: movl %eax,RELOC(bootdev)
260: #endif
261: movl 12(%esp),%eax
262:
263: testl %eax, %eax
264: jz 1f
265: movl (%eax), %ebx /* number of entries */
266: movl $RELOC(bootinfo), %edi
267: movl %ebx, (%edi)
268: addl $4, %edi
269: 2:
270: testl %ebx, %ebx
271: jz 1f
272: addl $4, %eax
273: movl (%eax), %ecx /* address of entry */
274: pushl %eax
275: pushl (%ecx) /* len */
276: pushl %ecx
277: pushl %edi
278: addl (%ecx), %edi /* update dest pointer */
279: cmpl $_RELOC(_C_LABEL(bootinfo) + BOOTINFO_MAXSIZE), %edi
280: jg 2f
281: call _C_LABEL(memcpy)
282: addl $12, %esp
283: popl %eax
284: subl $1, %ebx
285: jmp 2b
286: 2: /* cleanup for overflow case */
287: addl $16, %esp
288: movl $RELOC(bootinfo), %edi
289: subl %ebx, (%edi) /* correct number of entries */
290: 1:
291:
292: movl 16(%esp),%eax
293: testl %eax,%eax
294: jz 1f
295: addl $KERNBASE,%eax
296: 1: movl %eax,RELOC(esym)
297:
298: movl RELOC(biosextmem),%eax
299: testl %eax,%eax
300: jnz 1f
301: movl 20(%esp),%eax
302: movl %eax,RELOC(biosextmem)
303: 1:
304: movl RELOC(biosbasemem),%eax
305: testl %eax,%eax
306: jnz 1f
307: movl 24(%esp),%eax
308: movl %eax,RELOC(biosbasemem)
309: 1:
310:
311: /* First, reset the PSL. */
312: pushl $PSL_MBO
313: popfl
314:
315: /* Clear segment registers; always null in proc0. */
316: xorl %eax,%eax
317: movw %ax,%fs
318: movw %ax,%gs
319: decl %eax
320: movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL
321:
322: /* Find out our CPU type. */
323:
324: try386: /* Try to toggle alignment check flag; does not exist on 386. */
325: pushfl
326: popl %eax
327: movl %eax,%ecx
328: orl $PSL_AC,%eax
329: pushl %eax
330: popfl
331: pushfl
332: popl %eax
333: xorl %ecx,%eax
334: andl $PSL_AC,%eax
335: pushl %ecx
336: popfl
337:
338: testl %eax,%eax
339: jnz try486
340:
341: /*
342: * Try the test of a NexGen CPU -- ZF will not change on a DIV
343: * instruction on a NexGen, it will on an i386. Documented in
344: * Nx586 Processor Recognition Application Note, NexGen, Inc.
345: */
346: movl $0x5555,%eax
347: xorl %edx,%edx
348: movl $2,%ecx
349: divl %ecx
350: jnz is386
351:
352: isnx586:
353: /*
354: * Don't try cpuid, as Nx586s reportedly don't support the
355: * PSL_ID bit.
356: */
357: movl $CPU_NX586,RELOC(cpu)
358: jmp 2f
359:
360: is386:
361: movl $CPU_386,RELOC(cpu)
362: jmp 2f
363:
364: try486: /* Try to toggle identification flag; does not exist on early 486s. */
365: pushfl
366: popl %eax
367: movl %eax,%ecx
368: xorl $PSL_ID,%eax
369: pushl %eax
370: popfl
371: pushfl
372: popl %eax
373: xorl %ecx,%eax
374: andl $PSL_ID,%eax
375: pushl %ecx
376: popfl
377:
378: testl %eax,%eax
379: jnz try586
380: is486: movl $CPU_486,RELOC(cpu)
381: /*
382: * Check Cyrix CPU
383: * Cyrix CPUs do not change the undefined flags following
384: * execution of the divide instruction which divides 5 by 2.
385: *
386: * Note: CPUID is enabled on M2, so it passes another way.
387: */
388: pushfl
389: movl $0x5555, %eax
390: xorl %edx, %edx
391: movl $2, %ecx
392: clc
393: divl %ecx
394: jnc trycyrix486
395: popfl
396: jmp 2f
397: trycyrix486:
398: movl $CPU_6x86,RELOC(cpu) # set CPU type
399: /*
400: * Check for Cyrix 486 CPU by seeing if the flags change during a
401: * divide. This is documented in the Cx486SLC/e SMM Programmer's
402: * Guide.
403: */
404: xorl %edx,%edx
405: cmpl %edx,%edx # set flags to known state
406: pushfl
407: popl %ecx # store flags in ecx
408: movl $-1,%eax
409: movl $4,%ebx
410: divl %ebx # do a long division
411: pushfl
412: popl %eax
413: xorl %ecx,%eax # are the flags different?
414: testl $0x8d5,%eax # only check C|PF|AF|Z|N|V
415: jne 2f # yes; must be Cyrix 6x86 CPU
416: movl $CPU_486DLC,RELOC(cpu) # set CPU type
417:
418: #ifndef CYRIX_CACHE_WORKS
419: /* Disable caching of the ISA hole only. */
420: invd
421: movb $CCR0,%al # Configuration Register index (CCR0)
422: outb %al,$0x22
423: inb $0x23,%al
424: orb $(CCR0_NC1|CCR0_BARB),%al
425: movb %al,%ah
426: movb $CCR0,%al
427: outb %al,$0x22
428: movb %ah,%al
429: outb %al,$0x23
430: invd
431: #else /* CYRIX_CACHE_WORKS */
432: /* Set cache parameters */
433: invd # Start with guaranteed clean cache
434: movb $CCR0,%al # Configuration Register index (CCR0)
435: outb %al,$0x22
436: inb $0x23,%al
437: andb $~CCR0_NC0,%al
438: #ifndef CYRIX_CACHE_REALLY_WORKS
439: orb $(CCR0_NC1|CCR0_BARB),%al
440: #else
441: orb $CCR0_NC1,%al
442: #endif
443: movb %al,%ah
444: movb $CCR0,%al
445: outb %al,$0x22
446: movb %ah,%al
447: outb %al,$0x23
448: /* clear non-cacheable region 1 */
449: movb $(NCR1+2),%al
450: outb %al,$0x22
451: movb $NCR_SIZE_0K,%al
452: outb %al,$0x23
453: /* clear non-cacheable region 2 */
454: movb $(NCR2+2),%al
455: outb %al,$0x22
456: movb $NCR_SIZE_0K,%al
457: outb %al,$0x23
458: /* clear non-cacheable region 3 */
459: movb $(NCR3+2),%al
460: outb %al,$0x22
461: movb $NCR_SIZE_0K,%al
462: outb %al,$0x23
463: /* clear non-cacheable region 4 */
464: movb $(NCR4+2),%al
465: outb %al,$0x22
466: movb $NCR_SIZE_0K,%al
467: outb %al,$0x23
468: /* enable caching in CR0 */
469: movl %cr0,%eax
470: andl $~(CR0_CD|CR0_NW),%eax
471: movl %eax,%cr0
472: invd
473: #endif /* CYRIX_CACHE_WORKS */
474:
475: jmp 2f
476:
477: try586: /* Use the `cpuid' instruction. */
478: xorl %eax,%eax
479: cpuid
480: movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL
481:
482: 2:
483: /*
484: * Finished with old stack; load new %esp now instead of later so we
485: * can trace this code without having to worry about the trace trap
486: * clobbering the memory test or the zeroing of the bss+bootstrap page
487: * tables.
488: *
489: * The boot program should check:
490: * text+data <= &stack_variable - more_space_for_stack
491: * text+data+bss+pad+space_for_page_tables <= end_of_memory
492: * Oops, the gdt is in the carcass of the boot program so clearing
493: * the rest of memory is still not possible.
494: */
495: movl $_RELOC(tmpstk),%esp # bootstrap stack end location
496:
497: /*
498: * Virtual address space of kernel:
499: *
500: * text | data | bss | [syms] | page dir | proc0 kstack
501: * 0 1 2 3
502: */
1.7 thorpej 503: #define PROC0PDIR ((0) * PAGE_SIZE)
504: #define PROC0STACK ((1) * PAGE_SIZE)
505: #define SYSMAP ((1+UPAGES) * PAGE_SIZE)
506: #define TABLESIZE ((1+UPAGES) * PAGE_SIZE) /* + nkpde * PAGE_SIZE */
1.1 fvdl 507:
508: /* Find end of kernel image. */
509: movl $RELOC(end),%edi
1.8 fvdl 510: #if (NKSYMS || defined(DDB) || defined(LKM)) && !defined(SYMTAB_SPACE)
1.1 fvdl 511: /* Save the symbols (if loaded). */
512: movl RELOC(esym),%eax
513: testl %eax,%eax
514: jz 1f
515: subl $KERNBASE,%eax
516: movl %eax,%edi
517: 1:
518: #endif
519:
520: /* Calculate where to start the bootstrap tables. */
521: movl %edi,%esi # edi = esym ? esym : end
522: addl $PGOFSET,%esi # page align up
523: andl $~PGOFSET,%esi
524:
525: /*
526: * Calculate the size of the kernel page table directory, and
527: * how many entries it will have.
528: */
529: movl RELOC(nkpde),%ecx # get nkpde
530: cmpl $NKPTP_MIN,%ecx # larger than min?
531: jge 1f
532: movl $NKPTP_MIN,%ecx # set at min
533: jmp 2f
534: 1: cmpl $NKPTP_MAX,%ecx # larger than max?
535: jle 2f
536: movl $NKPTP_MAX,%ecx
537: 2:
538:
539: /* Clear memory for bootstrap tables. */
540: shll $PGSHIFT,%ecx
541: addl $TABLESIZE,%ecx
542: addl %esi,%ecx # end of tables
543: subl %edi,%ecx # size of tables
544: shrl $2,%ecx
545: xorl %eax,%eax
546: cld
547: rep
548: stosl
549:
550: /*
551: * fillkpt
552: * eax = pte (page frame | control | status)
553: * ebx = page table address
554: * ecx = number of pages to map
555: */
556: #define fillkpt \
557: 1: movl %eax,(%ebx) ; \
1.7 thorpej 558: addl $PAGE_SIZE,%eax ; /* increment physical address */ \
1.1 fvdl 559: addl $4,%ebx ; /* next pte */ \
560: loop 1b ;
561:
562: /*
563: * Build initial page tables.
564: */
565: /* Calculate end of text segment, rounded to a page. */
566: leal (RELOC(etext)+PGOFSET),%edx
567: andl $~PGOFSET,%edx
568:
569: /* Skip over the first 1MB. */
570: movl $_RELOC(KERNTEXTOFF),%eax
571: movl %eax,%ecx
572: shrl $PGSHIFT,%ecx
573: leal (SYSMAP)(%esi,%ecx,4),%ebx
574:
575: /* Map the kernel text read-only. */
576: movl %edx,%ecx
577: subl %eax,%ecx
578: shrl $PGSHIFT,%ecx
579: orl $(PG_V|PG_KR),%eax
580: fillkpt
581:
582: /* Map the data, BSS, and bootstrap tables read-write. */
583: leal (PG_V|PG_KW)(%edx),%eax
584: movl RELOC(nkpde),%ecx
585: shll $PGSHIFT,%ecx
586: addl $TABLESIZE,%ecx
587: addl %esi,%ecx # end of tables
588: subl %edx,%ecx # subtract end of text
589: shrl $PGSHIFT,%ecx
590: fillkpt
591:
592: /* Map ISA I/O memory. */
593: movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set
594: movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s,
595: fillkpt
596:
597: /*
598: * Construct a page table directory.
599: */
600: /* Install PDEs for temporary double map of kernel. */
601: movl RELOC(nkpde),%ecx # for this many pde s,
602: leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
603: leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0,
604: fillkpt
605:
606: /* Map kernel PDEs. */
607: movl RELOC(nkpde),%ecx # for this many pde s,
608: leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # kernel pde offset
609: leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0,
610: fillkpt
611:
612: /* Install a PDE recursively mapping page directory as a page table! */
613: leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd
614: movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot
615:
616: /* Save phys. addr of PTD, for libkvm. */
617: movl %esi,RELOC(PTDpaddr)
618:
619: /* Load base of page directory and enable mapping. */
620: movl %esi,%eax # phys address of ptd in proc 0
621: movl %eax,%cr3 # load ptd addr into mmu
622: movl %cr0,%eax # get control word
623: # enable paging & NPX emulation
624: orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax
625: movl %eax,%cr0 # and let's page NOW!
626:
627: pushl $begin # jump to high mem
628: ret
629:
630: begin:
631: /* Now running relocated at KERNBASE. Remove double mapping. */
632: movl _C_LABEL(nkpde),%ecx # for this many pde s,
633: leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
634: addl $(KERNBASE), %ebx # now use relocated address
635: 1: movl $0,(%ebx)
636: addl $4,%ebx # next pde
637: loop 1b
638:
639: /* Relocate atdevbase. */
640: movl _C_LABEL(nkpde),%edx
641: shll $PGSHIFT,%edx
642: addl $(TABLESIZE+KERNBASE),%edx
643: addl %esi,%edx
644: movl %edx,_C_LABEL(atdevbase)
645:
646: /* Set up bootstrap stack. */
647: leal (PROC0STACK+KERNBASE)(%esi),%eax
648: movl %eax,_C_LABEL(proc0paddr)
649: leal (USPACE-FRAMESIZE)(%eax),%esp
650: movl %esi,PCB_CR3(%eax) # pcb->pcb_cr3
651: xorl %ebp,%ebp # mark end of frames
652:
653: subl $NGDT*8, %esp # space for temporary gdt
654: pushl %esp
655: call _C_LABEL(initgdt)
656: addl $4,%esp
657:
658: movl _C_LABEL(nkpde),%eax
659: shll $PGSHIFT,%eax
660: addl $TABLESIZE,%eax
661: addl %esi,%eax # skip past stack and page tables
662:
663: pushl %eax
664: call _C_LABEL(init386) # wire 386 chip for unix operation
665: addl $4+NGDT*8,%esp # pop temporary gdt
666:
667: #ifdef SAFARI_FIFO_HACK
668: movb $5,%al
669: movw $0x37b,%dx
670: outb %al,%dx
671: movw $0x37f,%dx
672: inb %dx,%al
673: movb %al,%cl
674:
675: orb $1,%cl
676:
677: movb $5,%al
678: movw $0x37b,%dx
679: outb %al,%dx
680: movw $0x37f,%dx
681: movb %cl,%al
682: outb %al,%dx
683: #endif /* SAFARI_FIFO_HACK */
684:
685: call _C_LABEL(main)
686:
687: /*
688: * void proc_trampoline(void);
689: * This is a trampoline function pushed onto the stack of a newly created
690: * process in order to do some additional setup. The trampoline is entered by
691: * cpu_switch()ing to the process, so we abuse the callee-saved registers used
692: * by cpu_switch() to store the information about the stub to call.
693: * NOTE: This function does not have a normal calling sequence!
694: */
695: /* LINTSTUB: Func: void proc_trampoline(void) */
696: NENTRY(proc_trampoline)
697: #ifdef MULTIPROCESSOR
698: call _C_LABEL(proc_trampoline_mp)
699: #endif
700: movl $IPL_NONE,CPUVAR(ILEVEL)
701: pushl %ebx
702: call *%esi
703: addl $4,%esp
704: INTRFASTEXIT
705: /* NOTREACHED */
706:
707: /*****************************************************************************/
708:
709: /*
710: * Signal trampoline; copied to top of user stack.
711: */
712: /* LINTSTUB: Var: char sigcode[1], esigcode[1]; */
713: NENTRY(sigcode)
714: /*
715: * Handler has returned here as if we called it. The sigcontext
716: * is on the stack after the 3 args "we" pushed.
717: */
718: leal 12(%esp),%eax # get pointer to sigcontext
719: movl %eax,4(%esp) # put it in the argument slot
720: # fake return address already there
721: movl $SYS___sigreturn14,%eax
722: int $0x80 # enter kernel with args on stack
723: movl $SYS_exit,%eax
724: int $0x80 # exit if sigreturn fails
725: .globl _C_LABEL(esigcode)
726: _C_LABEL(esigcode):
727:
728: /*****************************************************************************/
729:
730: /*
731: * The following primitives are used to fill and copy regions of memory.
732: */
733:
734: /*
735: * XXX No section 9 man page for fillw.
736: * fillw seems to be very sparsely used (only in pccons it seems.)
737: * One wonders if it couldn't be done without.
738: * -- Perry Metzger, May 7, 2001
739: */
740: /*
741: * void fillw(short pattern, void *addr, size_t len);
742: * Write len copies of pattern at addr.
743: */
744: /* LINTSTUB: Func: void fillw(short pattern, void *addr, size_t len) */
745: ENTRY(fillw)
746: pushl %edi
747: movl 8(%esp),%eax
748: movl 12(%esp),%edi
749: movw %ax,%cx
750: rorl $16,%eax
751: movw %cx,%ax
752: cld
753: movl 16(%esp),%ecx
754: shrl %ecx # do longwords
755: rep
756: stosl
757: movl 16(%esp),%ecx
758: andl $1,%ecx # do remainder
759: rep
760: stosw
761: popl %edi
762: ret
763:
764: /*
765: * int kcopy(const void *from, void *to, size_t len);
766: * Copy len bytes, abort on fault.
767: */
768: /* LINTSTUB: Func: int kcopy(const void *from, void *to, size_t len) */
769: ENTRY(kcopy)
770: pushl %esi
771: pushl %edi
772: GET_CURPCB(%eax) # load curpcb into eax and set on-fault
773: pushl PCB_ONFAULT(%eax)
774: movl $_C_LABEL(copy_fault), PCB_ONFAULT(%eax)
775:
776: movl 16(%esp),%esi
777: movl 20(%esp),%edi
778: movl 24(%esp),%ecx
779: movl %edi,%eax
780: subl %esi,%eax
781: cmpl %ecx,%eax # overlapping?
782: jb 1f
783: cld # nope, copy forward
784: shrl $2,%ecx # copy by 32-bit words
785: rep
786: movsl
787: movl 24(%esp),%ecx
788: andl $3,%ecx # any bytes left?
789: rep
790: movsb
791:
792: GET_CURPCB(%edx) # XXX save curpcb?
793: popl PCB_ONFAULT(%edx)
794: popl %edi
795: popl %esi
796: xorl %eax,%eax
797: ret
798:
799: ALIGN_TEXT
800: 1: addl %ecx,%edi # copy backward
801: addl %ecx,%esi
802: std
803: andl $3,%ecx # any fractional bytes?
804: decl %edi
805: decl %esi
806: rep
807: movsb
808: movl 24(%esp),%ecx # copy remainder by 32-bit words
809: shrl $2,%ecx
810: subl $3,%esi
811: subl $3,%edi
812: rep
813: movsl
814: cld
815:
816: GET_CURPCB(%edx)
817: popl PCB_ONFAULT(%edx)
818: popl %edi
819: popl %esi
820: xorl %eax,%eax
821: ret
822:
823: /*****************************************************************************/
824:
825: /*
826: * The following primitives are used to copy data in and out of the user's
827: * address space.
828: */
829:
830: /*
831: * Default to the lowest-common-denominator. We will improve it
832: * later.
833: */
834: #if defined(I386_CPU)
835: #define DEFAULT_COPYOUT _C_LABEL(i386_copyout)
836: #define DEFAULT_COPYIN _C_LABEL(i386_copyin)
837: #elif defined(I486_CPU)
838: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout)
839: #define DEFAULT_COPYIN _C_LABEL(i386_copyin)
840: #elif defined(I586_CPU)
841: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) /* XXX */
842: #define DEFAULT_COPYIN _C_LABEL(i386_copyin) /* XXX */
843: #elif defined(I686_CPU)
844: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) /* XXX */
845: #define DEFAULT_COPYIN _C_LABEL(i386_copyin) /* XXX */
846: #endif
847:
848: .data
849:
850: .globl _C_LABEL(copyout_func)
851: _C_LABEL(copyout_func):
852: .long DEFAULT_COPYOUT
853:
854: .globl _C_LABEL(copyin_func)
855: _C_LABEL(copyin_func):
856: .long DEFAULT_COPYIN
857:
858: .text
859:
860: /*
861: * int copyout(const void *from, void *to, size_t len);
862: * Copy len bytes into the user's address space.
863: * see copyout(9)
864: */
865: /* LINTSTUB: Func: int copyout(const void *kaddr, void *uaddr, size_t len) */
866: ENTRY(copyout)
867: jmp *_C_LABEL(copyout_func)
868:
869: #if defined(I386_CPU)
870: /* LINTSTUB: Func: int i386_copyout(const void *kaddr, void *uaddr, size_t len) */
871: ENTRY(i386_copyout)
872: pushl %esi
873: pushl %edi
874: pushl $0
875:
876: movl 16(%esp),%esi
877: movl 20(%esp),%edi
878: movl 24(%esp),%eax
879:
880: /*
881: * We check that the end of the destination buffer is not past the end
882: * of the user's address space. If it's not, then we only need to
883: * check that each page is writable. The 486 will do this for us; the
884: * 386 will not. (We assume that pages in user space that are not
885: * writable by the user are not writable by the kernel either.)
886: */
887: movl %edi,%edx
888: addl %eax,%edx
889: jc _C_LABEL(copy_efault)
890: cmpl $VM_MAXUSER_ADDRESS,%edx
891: ja _C_LABEL(copy_efault)
892:
893: testl %eax,%eax # anything to do?
894: jz 3f
895:
896: /*
897: * We have to check each PTE for (write) permission, since the CPU
898: * doesn't do it for us.
899: */
900:
901: /* Compute number of pages. */
902: movl %edi,%ecx
903: andl $PGOFSET,%ecx
904: addl %eax,%ecx
905: decl %ecx
906: shrl $PGSHIFT,%ecx
907:
908: /* Compute PTE offset for start address. */
909: shrl $PGSHIFT,%edi
910:
911: GET_CURPCB(%edx)
912: movl $2f,PCB_ONFAULT(%edx)
913:
914: 1: /* Check PTE for each page. */
915: testb $PG_RW,_C_LABEL(PTmap)(,%edi,4)
916: jz 2f
917:
918: 4: incl %edi
919: decl %ecx
920: jns 1b
921:
922: movl 20(%esp),%edi
923: movl 24(%esp),%eax
924: jmp 3f
925:
926: 2: /* Simulate a trap. */
927: pushl %ecx
928: movl %edi,%eax
929: shll $PGSHIFT,%eax
930: pushl %eax
931: call _C_LABEL(trapwrite) # trapwrite(addr)
932: addl $4,%esp # pop argument
933: popl %ecx
934: testl %eax,%eax # if not ok, return EFAULT
935: jz 4b
936: jmp _C_LABEL(copy_efault)
937:
938: 3: GET_CURPCB(%edx)
939: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx)
940:
941: /* bcopy(%esi, %edi, %eax); */
942: cld
943: movl %eax,%ecx
944: shrl $2,%ecx
945: rep
946: movsl
947: movl %eax,%ecx
948: andl $3,%ecx
949: rep
950: movsb
951:
952: popl PCB_ONFAULT(%edx)
953: popl %edi
954: popl %esi
955: xorl %eax,%eax
956: ret
957: #endif /* I386_CPU */
958:
959: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
960: /* LINTSTUB: Func: int i486_copyout(const void *kaddr, void *uaddr, size_t len) */
961: ENTRY(i486_copyout)
962: pushl %esi
963: pushl %edi
964: pushl $0
965:
966: movl 16(%esp),%esi
967: movl 20(%esp),%edi
968: movl 24(%esp),%eax
969:
970: /*
971: * We check that the end of the destination buffer is not past the end
972: * of the user's address space.
973: */
974: movl %edi,%edx
975: addl %eax,%edx
976: jc _C_LABEL(copy_efault)
977: cmpl $VM_MAXUSER_ADDRESS,%edx
978: ja _C_LABEL(copy_efault)
979:
980: GET_CURPCB(%edx)
981: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx)
982:
983: /* bcopy(%esi, %edi, %eax); */
984: cld
985: movl %eax,%ecx
986: shrl $2,%ecx
987: rep
988: movsl
989: movl %eax,%ecx
990: andl $3,%ecx
991: rep
992: movsb
993:
994: popl PCB_ONFAULT(%edx)
995: popl %edi
996: popl %esi
997: xorl %eax,%eax
998: ret
999: #endif /* I486_CPU || I586_CPU || I686_CPU */
1000:
1001: /*
1002: * int copyin(const void *from, void *to, size_t len);
1003: * Copy len bytes from the user's address space.
1004: * see copyin(9)
1005: */
1006: /* LINTSTUB: Func: int copyin(const void *uaddr, void *kaddr, size_t len) */
1007: ENTRY(copyin)
1008: jmp *_C_LABEL(copyin_func)
1009:
1010: #if defined(I386_CPU) || defined(I486_CPU) || defined(I586_CPU) || \
1011: defined(I686_CPU)
1012: /* LINTSTUB: Func: int i386_copyin(const void *uaddr, void *kaddr, size_t len) */
1013: ENTRY(i386_copyin)
1014: pushl %esi
1015: pushl %edi
1016: GET_CURPCB(%eax)
1017: pushl $0
1018: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%eax)
1019:
1020: movl 16(%esp),%esi
1021: movl 20(%esp),%edi
1022: movl 24(%esp),%eax
1023:
1024: /*
1025: * We check that the end of the destination buffer is not past the end
1026: * of the user's address space. If it's not, then we only need to
1027: * check that each page is readable, and the CPU will do that for us.
1028: */
1029: movl %esi,%edx
1030: addl %eax,%edx
1031: jc _C_LABEL(copy_efault)
1032: cmpl $VM_MAXUSER_ADDRESS,%edx
1033: ja _C_LABEL(copy_efault)
1034:
1035: /* bcopy(%esi, %edi, %eax); */
1036: cld
1037: movl %eax,%ecx
1038: shrl $2,%ecx
1039: rep
1040: movsl
1041: movl %eax,%ecx
1042: andl $3,%ecx
1043: rep
1044: movsb
1045:
1046: GET_CURPCB(%edx)
1047: popl PCB_ONFAULT(%edx)
1048: popl %edi
1049: popl %esi
1050: xorl %eax,%eax
1051: ret
1052: #endif /* I386_CPU || I486_CPU || I586_CPU || I686_CPU */
1053:
1054: /* LINTSTUB: Ignore */
1055: NENTRY(copy_efault)
1056: movl $EFAULT,%eax
1057:
1058: /* LINTSTUB: Ignore */
1059: NENTRY(copy_fault)
1060: GET_CURPCB(%edx)
1061: popl PCB_ONFAULT(%edx)
1062: popl %edi
1063: popl %esi
1064: ret
1065:
1066: /*
1067: * int copyoutstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1068: * Copy a NUL-terminated string, at most maxlen characters long, into the
1069: * user's address space. Return the number of characters copied (including the
1070: * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else
1071: * return 0 or EFAULT.
1072: * see copyoutstr(9)
1073: */
1074: /* LINTSTUB: Func: int copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) */
1075: ENTRY(copyoutstr)
1076: pushl %esi
1077: pushl %edi
1078:
1079: movl 12(%esp),%esi # esi = from
1080: movl 16(%esp),%edi # edi = to
1081: movl 20(%esp),%edx # edx = maxlen
1082:
1083: #if defined(I386_CPU)
1084: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1085: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1086: jne 5f
1087: #endif /* I486_CPU || I586_CPU || I686_CPU */
1088:
1089: /* Compute number of bytes in first page. */
1090: movl %edi,%eax
1091: andl $PGOFSET,%eax
1.7 thorpej 1092: movl $PAGE_SIZE,%ecx
1093: subl %eax,%ecx # ecx = PAGE_SIZE - (src % PAGE_SIZE)
1.1 fvdl 1094:
1095: GET_CURPCB(%eax)
1096: movl $6f,PCB_ONFAULT(%eax)
1097:
1098: 1: /*
1099: * Once per page, check that we are still within the bounds of user
1100: * space, and check for a write fault.
1101: */
1102: cmpl $VM_MAXUSER_ADDRESS,%edi
1103: jae _C_LABEL(copystr_efault)
1104:
1105: /* Compute PTE offset. */
1106: movl %edi,%eax
1107: shrl $PGSHIFT,%eax # calculate pte address
1108:
1109: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1110: jnz 2f
1111:
1112: 6: /* Simulate a trap. */
1113: pushl %edx
1114: pushl %edi
1115: call _C_LABEL(trapwrite) # trapwrite(addr)
1116: addl $4,%esp # clear argument from stack
1117: popl %edx
1118: testl %eax,%eax
1119: jnz _C_LABEL(copystr_efault)
1120:
1121: 2: /* Copy up to end of this page. */
1122: subl %ecx,%edx # predecrement total count
1123: jnc 3f
1124: addl %edx,%ecx # ecx += (edx - ecx) = edx
1125: xorl %edx,%edx
1126:
1127: 3: decl %ecx
1128: js 4f
1129: lodsb
1130: stosb
1131: testb %al,%al
1132: jnz 3b
1133:
1134: /* Success -- 0 byte reached. */
1135: addl %ecx,%edx # add back residual for this page
1136: xorl %eax,%eax
1137: jmp copystr_return
1138:
1139: 4: /* Go to next page, if any. */
1.7 thorpej 1140: movl $PAGE_SIZE,%ecx
1.1 fvdl 1141: testl %edx,%edx
1142: jnz 1b
1143:
1144: /* edx is zero -- return ENAMETOOLONG. */
1145: movl $ENAMETOOLONG,%eax
1146: jmp copystr_return
1147: #endif /* I386_CPU */
1148:
1149: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1150: 5: GET_CURPCB(%eax)
1151: movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%eax)
1152: /*
1153: * Get min(%edx, VM_MAXUSER_ADDRESS-%edi).
1154: */
1155: movl $VM_MAXUSER_ADDRESS,%eax
1156: subl %edi,%eax
1157: cmpl %edx,%eax
1158: jae 1f
1159: movl %eax,%edx
1160: movl %eax,20(%esp)
1161:
1162: 1: incl %edx
1163: cld
1164:
1165: 1: decl %edx
1166: jz 2f
1167: lodsb
1168: stosb
1169: testb %al,%al
1170: jnz 1b
1171:
1172: /* Success -- 0 byte reached. */
1173: decl %edx
1174: xorl %eax,%eax
1175: jmp copystr_return
1176:
1177: 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
1178: cmpl $VM_MAXUSER_ADDRESS,%edi
1179: jae _C_LABEL(copystr_efault)
1180: movl $ENAMETOOLONG,%eax
1181: jmp copystr_return
1182: #endif /* I486_CPU || I586_CPU || I686_CPU */
1183:
1184: /*
1185: * int copyinstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1186: * Copy a NUL-terminated string, at most maxlen characters long, from the
1187: * user's address space. Return the number of characters copied (including the
1188: * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else
1189: * return 0 or EFAULT.
1190: * see copyinstr(9)
1191: */
1192: /* LINTSTUB: Func: int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) */
1193: ENTRY(copyinstr)
1194: pushl %esi
1195: pushl %edi
1196: GET_CURPCB(%ecx)
1197: movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%ecx)
1198:
1199: movl 12(%esp),%esi # %esi = from
1200: movl 16(%esp),%edi # %edi = to
1201: movl 20(%esp),%edx # %edx = maxlen
1202:
1203: /*
1204: * Get min(%edx, VM_MAXUSER_ADDRESS-%esi).
1205: */
1206: movl $VM_MAXUSER_ADDRESS,%eax
1207: subl %esi,%eax
1208: cmpl %edx,%eax
1209: jae 1f
1210: movl %eax,%edx
1211: movl %eax,20(%esp)
1212:
1213: 1: incl %edx
1214: cld
1215:
1216: 1: decl %edx
1217: jz 2f
1218: lodsb
1219: stosb
1220: testb %al,%al
1221: jnz 1b
1222:
1223: /* Success -- 0 byte reached. */
1224: decl %edx
1225: xorl %eax,%eax
1226: jmp copystr_return
1227:
1228: 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
1229: cmpl $VM_MAXUSER_ADDRESS,%esi
1230: jae _C_LABEL(copystr_efault)
1231: movl $ENAMETOOLONG,%eax
1232: jmp copystr_return
1233:
1234: /* LINTSTUB: Ignore */
1235: NENTRY(copystr_efault)
1236: movl $EFAULT,%eax
1237:
1238: /* LINTSTUB: Ignore */
1239: NENTRY(copystr_fault)
1240: copystr_return:
1241: /* Set *lencopied and return %eax. */
1242: GET_CURPCB(%ecx)
1243: movl $0,PCB_ONFAULT(%ecx)
1244: movl 20(%esp),%ecx
1245: subl %edx,%ecx
1246: movl 24(%esp),%edx
1247: testl %edx,%edx
1248: jz 8f
1249: movl %ecx,(%edx)
1250:
1251: 8: popl %edi
1252: popl %esi
1253: ret
1254:
1255: /*
1256: * int copystr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1257: * Copy a NUL-terminated string, at most maxlen characters long. Return the
1258: * number of characters copied (including the NUL) in *lencopied. If the
1259: * string is too long, return ENAMETOOLONG; else return 0.
1260: * see copystr(9)
1261: */
1262: /* LINTSTUB: Func: int copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done) */
1263: ENTRY(copystr)
1264: pushl %esi
1265: pushl %edi
1266:
1267: movl 12(%esp),%esi # esi = from
1268: movl 16(%esp),%edi # edi = to
1269: movl 20(%esp),%edx # edx = maxlen
1270: incl %edx
1271: cld
1272:
1273: 1: decl %edx
1274: jz 4f
1275: lodsb
1276: stosb
1277: testb %al,%al
1278: jnz 1b
1279:
1280: /* Success -- 0 byte reached. */
1281: decl %edx
1282: xorl %eax,%eax
1283: jmp 6f
1284:
1285: 4: /* edx is zero -- return ENAMETOOLONG. */
1286: movl $ENAMETOOLONG,%eax
1287:
1288: 6: /* Set *lencopied and return %eax. */
1289: movl 20(%esp),%ecx
1290: subl %edx,%ecx
1291: movl 24(%esp),%edx
1292: testl %edx,%edx
1293: jz 7f
1294: movl %ecx,(%edx)
1295:
1296: 7: popl %edi
1297: popl %esi
1298: ret
1299:
1300: /*
1301: * long fuword(const void *uaddr);
1302: * Fetch an int from the user's address space.
1303: * see fuword(9)
1304: */
1305: /* LINTSTUB: Func: long fuword(const void *base) */
1306: ENTRY(fuword)
1307: movl 4(%esp),%edx
1308: cmpl $VM_MAXUSER_ADDRESS-4,%edx
1309: ja _C_LABEL(fusuaddrfault)
1310: GET_CURPCB(%ecx)
1311: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1312: movl (%edx),%eax
1313: movl $0,PCB_ONFAULT(%ecx)
1314: ret
1315:
1316: /*
1317: * int fusword(const void *uaddr);
1318: * Fetch a short from the user's address space.
1319: * see fusword(9)
1320: */
1321: /* LINTSTUB: Func: int fusword(const void *base) */
1322: ENTRY(fusword)
1323: movl 4(%esp),%edx
1324: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1325: ja _C_LABEL(fusuaddrfault)
1326: GET_CURPCB(%ecx)
1327: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1328: movzwl (%edx),%eax
1329: movl $0,PCB_ONFAULT(%ecx)
1330: ret
1331:
1332: /*
1333: * int fuswintr(const void *uaddr);
1334: * Fetch a short from the user's address space. Can be called during an
1335: * interrupt.
1336: * see fuswintr(9)
1337: */
1338: /* LINTSTUB: Func: int fuswintr(const void *base) */
1339: ENTRY(fuswintr)
1340: movl 4(%esp),%edx
1341: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1342: ja _C_LABEL(fusuaddrfault)
1.5 thorpej 1343: movl CPUVAR(CURLWP),%ecx
1344: movl L_ADDR(%ecx),%ecx
1.1 fvdl 1345: movl $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)
1346: movzwl (%edx),%eax
1347: movl $0,PCB_ONFAULT(%ecx)
1348: ret
1349:
1350: /*
1351: * int fubyte(const void *uaddr);
1352: * Fetch a byte from the user's address space.
1353: * see fubyte(9)
1354: */
1355: /* LINTSTUB: Func: int fubyte(const void *base) */
1356: ENTRY(fubyte)
1357: movl 4(%esp),%edx
1358: cmpl $VM_MAXUSER_ADDRESS-1,%edx
1359: ja _C_LABEL(fusuaddrfault)
1360: GET_CURPCB(%ecx)
1361: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1362: movzbl (%edx),%eax
1363: movl $0,PCB_ONFAULT(%ecx)
1364: ret
1365:
1366: /*
1367: * Handle faults from [fs]u*(). Clean up and return -1.
1368: */
1369: /* LINTSTUB: Ignore */
1370: NENTRY(fusufault)
1371: movl $0,PCB_ONFAULT(%ecx)
1372: movl $-1,%eax
1373: ret
1374:
1375: /*
1376: * Handle faults from [fs]u*(). Clean up and return -1. This differs from
1377: * fusufault() in that trap() will recognize it and return immediately rather
1378: * than trying to page fault.
1379: */
1380: /* LINTSTUB: Ignore */
1381: NENTRY(fusubail)
1382: movl $0,PCB_ONFAULT(%ecx)
1383: movl $-1,%eax
1384: ret
1385:
1386: /*
1387: * Handle earlier faults from [fs]u*(), due to our of range addresses.
1388: */
1389: /* LINTSTUB: Ignore */
1390: NENTRY(fusuaddrfault)
1391: movl $-1,%eax
1392: ret
1393:
1394: /*
1395: * int suword(void *uaddr, long x);
1396: * Store an int in the user's address space.
1397: * see suword(9)
1398: */
1399: /* LINTSTUB: Func: int suword(void *base, long c) */
1400: ENTRY(suword)
1401: movl 4(%esp),%edx
1402: cmpl $VM_MAXUSER_ADDRESS-4,%edx
1403: ja _C_LABEL(fusuaddrfault)
1404:
1405: #if defined(I386_CPU)
1406: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1407: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1408: jne 2f
1409: #endif /* I486_CPU || I586_CPU || I686_CPU */
1410:
1411: GET_CURPCB(%eax)
1412: movl $3f,PCB_ONFAULT(%eax)
1413:
1414: movl %edx,%eax
1415: shrl $PGSHIFT,%eax # calculate pte address
1416: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1417: jnz 1f
1418:
1419: 3: /* Simulate a trap. */
1420: pushl %edx
1421: pushl %edx
1422: call _C_LABEL(trapwrite) # trapwrite(addr)
1423: addl $4,%esp # clear parameter from the stack
1424: popl %edx
1425: GET_CURPCB(%ecx)
1426: testl %eax,%eax
1427: jnz _C_LABEL(fusufault)
1428:
1429: 1: /* XXX also need to check the following 3 bytes for validity! */
1430: #endif
1431:
1432: 2: GET_CURPCB(%ecx)
1433: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1434:
1435: movl 8(%esp),%eax
1436: movl %eax,(%edx)
1437: xorl %eax,%eax
1438: movl %eax,PCB_ONFAULT(%ecx)
1439: ret
1440:
1441: /*
1442: * int susword(void *uaddr, short x);
1443: * Store a short in the user's address space.
1444: * see susword(9)
1445: */
1446: /* LINTSTUB: Func: int susword(void *base, short c) */
1447: ENTRY(susword)
1448: movl 4(%esp),%edx
1449: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1450: ja _C_LABEL(fusuaddrfault)
1451:
1452: #if defined(I386_CPU)
1453: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1454: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1455: jne 2f
1456: #endif /* I486_CPU || I586_CPU || I686_CPU */
1457:
1458: GET_CURPCB(%eax)
1459: movl $3f,PCB_ONFAULT(%eax)
1460:
1461: movl %edx,%eax
1462: shrl $PGSHIFT,%eax # calculate pte address
1463: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1464: jnz 1f
1465:
1466: 3: /* Simulate a trap. */
1467: pushl %edx
1468: pushl %edx
1469: call _C_LABEL(trapwrite) # trapwrite(addr)
1470: addl $4,%esp # clear parameter from the stack
1471: popl %edx
1472: GET_CURPCB(%ecx)
1473: testl %eax,%eax
1474: jnz _C_LABEL(fusufault)
1475:
1476: 1: /* XXX also need to check the following byte for validity! */
1477: #endif
1478:
1479: 2: GET_CURPCB(%ecx)
1480: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1481:
1482: movl 8(%esp),%eax
1483: movw %ax,(%edx)
1484: xorl %eax,%eax
1485: movl %eax,PCB_ONFAULT(%ecx)
1486: ret
1487:
1488: /*
1489: * int suswintr(void *uaddr, short x);
1490: * Store a short in the user's address space. Can be called during an
1491: * interrupt.
1492: * see suswintr(9)
1493: */
1494: /* LINTSTUB: Func: int suswintr(void *base, short c) */
1495: ENTRY(suswintr)
1496: movl 4(%esp),%edx
1497: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1498: ja _C_LABEL(fusuaddrfault)
1.5 thorpej 1499: movl CPUVAR(CURLWP),%ecx
1500: movl L_ADDR(%ecx),%ecx
1.1 fvdl 1501: movl $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)
1502:
1503: #if defined(I386_CPU)
1504: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1505: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1506: jne 2f
1507: #endif /* I486_CPU || I586_CPU || I686_CPU */
1508:
1509: movl %edx,%eax
1510: shrl $PGSHIFT,%eax # calculate pte address
1511: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1512: jnz 1f
1513:
1514: /* Simulate a trap. */
1515: jmp _C_LABEL(fusubail)
1516:
1517: 1: /* XXX also need to check the following byte for validity! */
1518: #endif
1519:
1520: 2: movl 8(%esp),%eax
1521: movw %ax,(%edx)
1522: xorl %eax,%eax
1523: movl %eax,PCB_ONFAULT(%ecx)
1524: ret
1525:
1526: /*
1527: * int subyte(void *uaddr, char x);
1528: * Store a byte in the user's address space.
1529: * see subyte(9)
1530: */
1531: /* LINTSTUB: Func: int subyte(void *base, int c) */
1532: ENTRY(subyte)
1533: movl 4(%esp),%edx
1534: cmpl $VM_MAXUSER_ADDRESS-1,%edx
1535: ja _C_LABEL(fusuaddrfault)
1536:
1537: #if defined(I386_CPU)
1538: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1539: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1540: jne 2f
1541: #endif /* I486_CPU || I586_CPU || I686_CPU */
1542:
1543: GET_CURPCB(%eax)
1544: movl $3f,PCB_ONFAULT(%eax)
1545:
1546: movl %edx,%eax
1547: shrl $PGSHIFT,%eax # calculate pte address
1548: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1549: jnz 1f
1550:
1551: 3: /* Simulate a trap. */
1552: pushl %edx
1553: pushl %edx
1554: call _C_LABEL(trapwrite) # trapwrite(addr)
1555: addl $4,%esp # clear parameter from the stack
1556: popl %edx
1557: GET_CURPCB(%ecx)
1558: testl %eax,%eax
1559: jnz _C_LABEL(fusufault)
1560:
1561: 1:
1562: #endif
1563:
1564: 2: GET_CURPCB(%ecx)
1565: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1566:
1567: movb 8(%esp),%al
1568: movb %al,(%edx)
1569: xorl %eax,%eax
1570: movl %eax,PCB_ONFAULT(%ecx)
1571: ret
1572:
1573: /*****************************************************************************/
1574:
1575: /*
1576: * The following is i386-specific nonsense.
1577: */
1578:
1579: /*
1580: * void lgdt(struct region_descriptor *rdp);
1581: * Load a new GDT pointer (and do any necessary cleanup).
1582: * XXX It's somewhat questionable whether reloading all the segment registers
1583: * is necessary, since the actual descriptor data is not changed except by
1584: * process creation and exit, both of which clean up via task switches. OTOH,
1585: * this only happens at run time when the GDT is resized.
1586: */
1587: /* LINTSTUB: Func: void lgdt(struct region_descriptor *rdp) */
1588: NENTRY(lgdt)
1589: /* Reload the descriptor table. */
1590: movl 4(%esp),%eax
1591: lgdt (%eax)
1592: /* Flush the prefetch queue. */
1593: jmp 1f
1594: nop
1595: 1: /* Reload "stale" selectors. */
1596: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
1597: movw %ax,%ds
1598: movw %ax,%es
1599: movw %ax,%gs
1600: movw %ax,%ss
1601: movl $GSEL(GCPU_SEL, SEL_KPL),%eax
1602: movw %ax,%fs
1603: /* Reload code selector by doing intersegment return. */
1604: popl %eax
1605: pushl $GSEL(GCODE_SEL, SEL_KPL)
1606: pushl %eax
1607: lret
1608:
1609: /*****************************************************************************/
1610:
1611: /*
1612: * These functions are primarily used by DDB.
1613: */
1614:
1615: /* LINTSTUB: Func: int setjmp (label_t *l) */
1616: ENTRY(setjmp)
1617: movl 4(%esp),%eax
1618: movl %ebx,(%eax) # save ebx
1619: movl %esp,4(%eax) # save esp
1620: movl %ebp,8(%eax) # save ebp
1621: movl %esi,12(%eax) # save esi
1622: movl %edi,16(%eax) # save edi
1623: movl (%esp),%edx # get rta
1624: movl %edx,20(%eax) # save eip
1625: xorl %eax,%eax # return (0);
1626: ret
1627:
1628: /* LINTSTUB: Func: void longjmp (label_t *l) */
1629: ENTRY(longjmp)
1630: movl 4(%esp),%eax
1631: movl (%eax),%ebx # restore ebx
1632: movl 4(%eax),%esp # restore esp
1633: movl 8(%eax),%ebp # restore ebp
1634: movl 12(%eax),%esi # restore esi
1635: movl 16(%eax),%edi # restore edi
1636: movl 20(%eax),%edx # get rta
1637: movl %edx,(%esp) # put in return frame
1638: xorl %eax,%eax # return (1);
1639: incl %eax
1640: ret
1641:
1642: /*****************************************************************************/
1643:
1644: .globl _C_LABEL(sched_whichqs),_C_LABEL(sched_qs)
1645: .globl _C_LABEL(uvmexp),_C_LABEL(panic)
1646:
1647: #ifdef DIAGNOSTIC
1648: NENTRY(switch_error)
1649: pushl $1f
1650: call _C_LABEL(panic)
1651: /* NOTREACHED */
1652: 1: .asciz "cpu_switch"
1653: #endif /* DIAGNOSTIC */
1654:
1655: /*
1.5 thorpej 1656: * void cpu_switch(struct lwp *)
1.1 fvdl 1657: * Find a runnable process and switch to it. Wait if necessary. If the new
1658: * process is the same as the old one, we short-circuit the context save and
1659: * restore.
1660: *
1661: * Note that the stack frame layout is known to "struct switchframe"
1662: * in <machine/frame.h> and to the code in cpu_fork() which initializes
1.5 thorpej 1663: * it for a new lwp.
1.1 fvdl 1664: */
1665: ENTRY(cpu_switch)
1666: pushl %ebx
1667: pushl %esi
1668: pushl %edi
1669:
1670: #ifdef DEBUG
1671: cmpl $IPL_SCHED,CPUVAR(ILEVEL)
1672: jae 1f
1.5 thorpej 1673: pushl $2f
1.1 fvdl 1674: call _C_LABEL(panic)
1675: /* NOTREACHED */
1.5 thorpej 1676: 2: .asciz "not splsched() in cpu_switch!"
1.1 fvdl 1677: 1:
1678: #endif /* DEBUG */
1679:
1.5 thorpej 1680: movl 16(%esp),%esi # current
1.1 fvdl 1681:
1682: /*
1.5 thorpej 1683: * Clear curlwp so that we don't accumulate system time while idle.
1684: * This also insures that schedcpu() will move the old lwp to
1.1 fvdl 1685: * the correct queue if it happens to get called from the spllower()
1686: * below and changes the priority. (See corresponding comment in
1687: * userret()).
1688: */
1.5 thorpej 1689: movl $0,CPUVAR(CURLWP)
1.1 fvdl 1690: /*
1.5 thorpej 1691: * First phase: find new lwp.
1.1 fvdl 1692: *
1693: * Registers:
1694: * %eax - queue head, scratch, then zero
1695: * %ebx - queue number
1696: * %ecx - cached value of whichqs
1.5 thorpej 1697: * %edx - next lwp in queue
1698: * %esi - old lwp
1699: * %edi - new lwp
1.1 fvdl 1700: */
1701:
1.5 thorpej 1702: /* Look for new lwp. */
1.1 fvdl 1703: cli # splhigh doesn't do a cli
1704: movl _C_LABEL(sched_whichqs),%ecx
1705: bsfl %ecx,%ebx # find a full q
1706: jnz switch_dequeue
1707:
1708: /*
1709: * idling: save old context.
1710: *
1711: * Registers:
1712: * %eax, %ecx - scratch
1.5 thorpej 1713: * %esi - old lwp, then old pcb
1.1 fvdl 1714: * %edi - idle pcb
1715: */
1716:
1717: pushl %esi
1718: call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc)
1719: addl $4,%esp
1720:
1.5 thorpej 1721: movl L_ADDR(%esi),%esi
1.1 fvdl 1722:
1723: /* Save stack pointers. */
1724: movl %esp,PCB_ESP(%esi)
1725: movl %ebp,PCB_EBP(%esi)
1726:
1727: /* Find idle PCB for this CPU */
1728: #ifndef MULTIPROCESSOR
1.5 thorpej 1729: movl $_C_LABEL(lwp0),%ebx
1730: movl L_ADDR(%ebx),%edi
1731: movl L_MD_TSS_SEL(%ebx),%edx
1.1 fvdl 1732: #else
1733: movl CPUVAR(IDLE_PCB),%edi
1734: movl CPUVAR(IDLE_TSS_SEL),%edx
1735: #endif
1.5 thorpej 1736: movl $0,CPUVAR(CURLWP) /* In case we fault... */
1.1 fvdl 1737:
1738: /* Restore the idle context (avoid interrupts) */
1739: cli
1740:
1741: /* Restore stack pointers. */
1742: movl PCB_ESP(%edi),%esp
1743: movl PCB_EBP(%edi),%ebp
1744:
1745:
1746: /* Switch address space. */
1747: movl PCB_CR3(%edi),%ecx
1748: movl %ecx,%cr3
1749:
1750: /* Switch TSS. Reset "task busy" flag before loading. */
1751: #ifdef MULTIPROCESSOR
1752: movl CPUVAR(GDT),%eax
1753: #else
1754: movl _C_LABEL(gdt),%eax
1755: #endif
1756: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
1757: ltr %dx
1758:
1759: /* We're always in the kernel, so we don't need the LDT. */
1760:
1761: /* Restore cr0 (including FPU state). */
1762: movl PCB_CR0(%edi),%ecx
1763: movl %ecx,%cr0
1764:
1765: /* Record new pcb. */
1766: SET_CURPCB(%edi)
1767:
1768: xorl %esi,%esi
1769: sti
1770: idle_unlock:
1771: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1772: call _C_LABEL(sched_unlock_idle)
1773: #endif
1774: /* Interrupts are okay again. */
1.2 fvdl 1775: pushl $IPL_NONE # spl0()
1.1 fvdl 1776: call _C_LABEL(Xspllower) # process pending interrupts
1.2 fvdl 1777: addl $4,%esp
1.1 fvdl 1778: jmp idle_start
1779: idle_zero:
1780: sti
1781: call _C_LABEL(uvm_pageidlezero)
1782: cli
1783: cmpl $0,_C_LABEL(sched_whichqs)
1784: jnz idle_exit
1785: idle_loop:
1786: /* Try to zero some pages. */
1787: movl _C_LABEL(uvm)+UVM_PAGE_IDLE_ZERO,%ecx
1788: testl %ecx,%ecx
1789: jnz idle_zero
1790: sti
1791: hlt
1792: NENTRY(mpidle)
1793: idle_start:
1794: cli
1795: cmpl $0,_C_LABEL(sched_whichqs)
1796: jz idle_loop
1797: idle_exit:
1798: movl $IPL_HIGH,CPUVAR(ILEVEL) # splhigh
1799: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1800: call _C_LABEL(sched_lock_idle)
1801: #endif
1802: movl _C_LABEL(sched_whichqs),%ecx
1803: bsfl %ecx,%ebx
1804: jz idle_unlock
1805:
1806: switch_dequeue:
1807: /*
1808: * we're running at splhigh(), but it's otherwise okay to take
1809: * interrupts here.
1810: */
1811: sti
1812: leal _C_LABEL(sched_qs)(,%ebx,8),%eax # select q
1813:
1.5 thorpej 1814: movl L_FORW(%eax),%edi # unlink from front of process q
1.1 fvdl 1815: #ifdef DIAGNOSTIC
1816: cmpl %edi,%eax # linked to self (i.e. nothing queued)?
1817: je _C_LABEL(switch_error) # not possible
1818: #endif /* DIAGNOSTIC */
1.5 thorpej 1819: movl L_FORW(%edi),%edx
1820: movl %edx,L_FORW(%eax)
1821: movl %eax,L_BACK(%edx)
1.1 fvdl 1822:
1823: cmpl %edx,%eax # q empty?
1824: jne 3f
1825:
1826: btrl %ebx,%ecx # yes, clear to indicate empty
1827: movl %ecx,_C_LABEL(sched_whichqs) # update q status
1828:
1829: 3: /* We just did it. */
1830: xorl %eax,%eax
1831: CLEAR_RESCHED(%eax)
1832:
1.5 thorpej 1833: switch_resume:
1.1 fvdl 1834: #ifdef DIAGNOSTIC
1.5 thorpej 1835: cmpl %eax,L_WCHAN(%edi) # Waiting for something?
1.1 fvdl 1836: jne _C_LABEL(switch_error) # Yes; shouldn't be queued.
1.5 thorpej 1837: cmpb $LSRUN,L_STAT(%edi) # In run state?
1.1 fvdl 1838: jne _C_LABEL(switch_error) # No; shouldn't be queued.
1839: #endif /* DIAGNOSTIC */
1840:
1.5 thorpej 1841: /* Isolate lwp. XXX Is this necessary? */
1842: movl %eax,L_BACK(%edi)
1.1 fvdl 1843:
1.5 thorpej 1844: /* Record new lwp. */
1845: movb $LSONPROC,L_STAT(%edi) # l->l_stat = LSONPROC
1846: SET_CURLWP(%edi,%ecx)
1.1 fvdl 1847:
1.5 thorpej 1848: /* Skip context switch if same lwp. */
1.10 fvdl 1849: xorl %ebx,%ebx
1.1 fvdl 1850: cmpl %edi,%esi
1851: je switch_return
1852:
1.5 thorpej 1853: /* If old lwp exited, don't bother. */
1.1 fvdl 1854: testl %esi,%esi
1855: jz switch_exited
1856:
1857: /*
1858: * Second phase: save old context.
1859: *
1860: * Registers:
1861: * %eax, %ecx - scratch
1.5 thorpej 1862: * %esi - old lwp, then old pcb
1863: * %edi - new lwp
1.1 fvdl 1864: */
1865:
1866: pushl %esi
1867: call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc)
1868: addl $4,%esp
1869:
1.5 thorpej 1870: movl L_ADDR(%esi),%esi
1.1 fvdl 1871:
1872: /* Save stack pointers. */
1873: movl %esp,PCB_ESP(%esi)
1874: movl %ebp,PCB_EBP(%esi)
1875:
1876: switch_exited:
1877: /*
1878: * Third phase: restore saved context.
1879: *
1880: * Registers:
1881: * %eax, %ebx, %ecx, %edx - scratch
1882: * %esi - new pcb
1.5 thorpej 1883: * %edi - new lwp
1.1 fvdl 1884: */
1885:
1886: /* No interrupts while loading new state. */
1887: cli
1.5 thorpej 1888: movl L_ADDR(%edi),%esi
1.1 fvdl 1889:
1890: /* Restore stack pointers. */
1891: movl PCB_ESP(%esi),%esp
1892: movl PCB_EBP(%esi),%ebp
1893:
1894: #if 0
1895: /* Don't bother with the rest if switching to a system process. */
1.5 thorpej 1896: testl $P_SYSTEM,L_FLAG(%edi); XXX NJWLWP lwp's don't have P_SYSTEM!
1.1 fvdl 1897: jnz switch_restored
1898: #endif
1899:
1900: #ifdef MULTIPROCESSOR
1901: movl CPUVAR(GDT),%eax
1902: #else
1903: /* Load TSS info. */
1904: movl _C_LABEL(gdt),%eax
1905: #endif
1.5 thorpej 1906: movl L_MD_TSS_SEL(%edi),%edx
1.1 fvdl 1907:
1908: /* Switch TSS. Reset "task busy" flag before loading. */
1909: andl $~0x0200,4(%eax,%edx, 1)
1910: ltr %dx
1911:
1912: pushl %edi
1913: call _C_LABEL(pmap_activate) # pmap_activate(p)
1914: addl $4,%esp
1915:
1916: #if 0
1917: switch_restored:
1918: #endif
1919: /* Restore cr0 (including FPU state). */
1920: movl PCB_CR0(%esi),%ecx
1921: #ifdef MULTIPROCESSOR
1922: /*
1923: * If our floating point registers are on a different cpu,
1924: * clear CR0_TS so we'll trap rather than reuse bogus state.
1925: */
1926: movl PCB_FPCPU(%esi),%ebx
1927: cmpl CPUVAR(SELF),%ebx
1928: jz 1f
1929: orl $CR0_TS,%ecx
1930: 1:
1931: #endif
1932: movl %ecx,%cr0
1933:
1934: /* Record new pcb. */
1935: SET_CURPCB(%esi)
1936:
1937: /* Interrupts are okay again. */
1938: sti
1939:
1940: /*
1941: * Check for restartable atomic sequences (RAS)
1942: */
1.5 thorpej 1943: movl CPUVAR(CURLWP),%edi
1944: movl L_PROC(%edi),%esi
1945: cmpl $0,P_NRAS(%esi)
1.1 fvdl 1946: je 1f
1.5 thorpej 1947: movl L_MD_REGS(%edi),%ebx
1.4 gmcgarry 1948: movl TF_EIP(%ebx),%eax
1.1 fvdl 1949: pushl %eax
1.5 thorpej 1950: pushl %esi
1.1 fvdl 1951: call _C_LABEL(ras_lookup)
1952: addl $8,%esp
1953: cmpl $-1,%eax
1954: je 1f
1.4 gmcgarry 1955: movl %eax,TF_EIP(%ebx)
1.1 fvdl 1956: 1:
1.10 fvdl 1957: movl $1,%ebx
1.1 fvdl 1958:
1959: switch_return:
1960: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1961: call _C_LABEL(sched_unlock_idle)
1962: #endif
1.2 fvdl 1963: pushl $IPL_NONE # spl0()
1.1 fvdl 1964: call _C_LABEL(Xspllower) # process pending interrupts
1.2 fvdl 1965: addl $4,%esp
1.1 fvdl 1966: movl $IPL_HIGH,CPUVAR(ILEVEL) # splhigh()
1.9 fvdl 1967:
1968: movl %ebx,%eax
1.1 fvdl 1969:
1970: popl %edi
1971: popl %esi
1972: popl %ebx
1973: ret
1974:
1975: /*
1.5 thorpej 1976: * void cpu_switchto(struct lwp *current, struct lwp *next)
1977: * Switch to the specified next LWP.
1978: */
1979: ENTRY(cpu_switchto)
1980: pushl %ebx
1981: pushl %esi
1982: pushl %edi
1983:
1984: #ifdef DEBUG
1985: cmpl $IPL_SCHED,CPUVAR(ILEVEL)
1986: jae 1f
1987: pushl $2f
1988: call _C_LABEL(panic)
1989: /* NOTREACHED */
1990: 2: .asciz "not splsched() in cpu_switchto!"
1991: 1:
1992: #endif /* DEBUG */
1993:
1994: movl 16(%esp),%esi # current
1995: movl 20(%esp),%edi # next
1996:
1997: /*
1998: * Clear curlwp so that we don't accumulate system time while idle.
1999: * This also insures that schedcpu() will move the old process to
2000: * the correct queue if it happens to get called from the spllower()
2001: * below and changes the priority. (See corresponding comment in
2002: * usrret()).
2003: *
2004: * XXX Is this necessary? We know we won't go idle.
2005: */
2006: movl $0,CPUVAR(CURLWP)
2007:
2008: /*
2009: * We're running at splhigh(), but it's otherwise okay to take
2010: * interrupts here.
2011: */
2012: sti
2013:
2014: /* Jump into the middle of cpu_switch */
2015: xorl %eax,%eax
2016: jmp switch_resume
2017:
2018: /*
2019: * void switch_exit(struct lwp *l, void (*exit)(struct lwp *));
2020: * Switch to the appropriate idle context (lwp0's if uniprocessor; the cpu's
1.1 fvdl 2021: * if multiprocessor) and deallocate the address space and kernel stack for p.
2022: * Then jump into cpu_switch(), as if we were in the idle proc all along.
2023: */
2024: #ifndef MULTIPROCESSOR
1.5 thorpej 2025: .globl _C_LABEL(lwp0)
1.1 fvdl 2026: #endif
2027: .globl _C_LABEL(uvmspace_free),_C_LABEL(kernel_map)
2028: .globl _C_LABEL(uvm_km_free),_C_LABEL(tss_free)
1.6 drochner 2029: /* LINTSTUB: Func: void switch_exit(struct lwp *l, void (*exit)(struct lwp *)) */
1.1 fvdl 2030: ENTRY(switch_exit)
2031: movl 4(%esp),%edi # old process
1.5 thorpej 2032: movl 8(%esp),%eax # exit func
1.1 fvdl 2033: #ifndef MULTIPROCESSOR
1.5 thorpej 2034: movl $_C_LABEL(lwp0),%ebx
2035: movl L_ADDR(%ebx),%esi
2036: movl L_MD_TSS_SEL(%ebx),%edx
1.1 fvdl 2037: #else
2038: movl CPUVAR(IDLE_PCB),%esi
2039: movl CPUVAR(IDLE_TSS_SEL),%edx
2040: #endif
2041: /* In case we fault... */
1.5 thorpej 2042: movl $0,CPUVAR(CURLWP)
1.1 fvdl 2043:
2044: /* Restore the idle context. */
2045: cli
2046:
2047: /* Restore stack pointers. */
2048: movl PCB_ESP(%esi),%esp
2049: movl PCB_EBP(%esi),%ebp
2050:
1.5 thorpej 2051: /* Save exit func. */
2052: pushl %eax
2053:
1.1 fvdl 2054: /* Load TSS info. */
2055: #ifdef MULTIPROCESSOR
2056: movl CPUVAR(GDT),%eax
2057: #else
2058: /* Load TSS info. */
2059: movl _C_LABEL(gdt),%eax
2060: #endif
2061:
2062: /* Switch address space. */
2063: movl PCB_CR3(%esi),%ecx
2064: movl %ecx,%cr3
2065:
2066: /* Switch TSS. */
2067: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
2068: ltr %dx
2069:
2070: /* We're always in the kernel, so we don't need the LDT. */
2071:
2072: /* Restore cr0 (including FPU state). */
2073: movl PCB_CR0(%esi),%ecx
2074: movl %ecx,%cr0
2075:
2076: /* Record new pcb. */
2077: SET_CURPCB(%esi)
2078:
2079: /* Interrupts are okay again. */
2080: sti
2081:
2082: /*
2083: * Schedule the dead process's vmspace and stack to be freed.
2084: */
1.5 thorpej 2085: movl 0(%esp),%eax /* %eax = exit func */
2086: movl %edi,0(%esp) /* {lwp_}exit2(l) */
2087: call *%eax
1.1 fvdl 2088: addl $4,%esp
2089:
2090: /* Jump into cpu_switch() with the right state. */
2091: xorl %esi,%esi
1.5 thorpej 2092: movl %esi,CPUVAR(CURLWP)
1.1 fvdl 2093: jmp idle_start
2094:
2095: /*
2096: * void savectx(struct pcb *pcb);
2097: * Update pcb, saving current processor state.
2098: */
2099: /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
2100: ENTRY(savectx)
2101: movl 4(%esp),%edx # edx = p->p_addr
2102:
2103: /* Save stack pointers. */
2104: movl %esp,PCB_ESP(%edx)
2105: movl %ebp,PCB_EBP(%edx)
2106:
2107: ret
2108:
2109: /*
2110: * Old call gate entry for syscall
2111: */
2112: /* LINTSTUB: Var: char Xosyscall[1]; */
2113: IDTVEC(osyscall)
2114: /* Set eflags in trap frame. */
2115: pushfl
2116: popl 8(%esp)
2117: pushl $7 # size of instruction for restart
2118: jmp syscall1
2119:
2120: /*
2121: * Trap gate entry for syscall
2122: */
2123: /* LINTSTUB: Var: char Xsyscall[1]; */
2124: IDTVEC(syscall)
2125: pushl $2 # size of instruction for restart
2126: syscall1:
2127: pushl $T_ASTFLT # trap # for doing ASTs
2128: INTRENTRY
2129:
2130: #ifdef DIAGNOSTIC
2131: movl CPUVAR(ILEVEL),%ebx
2132: testl %ebx,%ebx
2133: jz 1f
2134: pushl $5f
2135: call _C_LABEL(printf)
2136: addl $4,%esp
2137: #ifdef DDB
2138: int $3
2139: #endif
2140: 1:
2141: #endif /* DIAGNOSTIC */
1.5 thorpej 2142: movl CPUVAR(CURLWP),%edx
2143: movl %esp,L_MD_REGS(%edx) # save pointer to frame
2144: movl L_PROC(%edx),%edx
1.1 fvdl 2145: call *P_MD_SYSCALL(%edx) # get pointer to syscall() function
2146: 2: /* Check for ASTs on exit to user mode. */
2147: cli
1.5 thorpej 2148: CHECK_ASTPENDING(%eax)
1.1 fvdl 2149: je 1f
2150: /* Always returning to user mode here. */
1.5 thorpej 2151: CLEAR_ASTPENDING(%eax)
1.1 fvdl 2152: sti
2153: /* Pushed T_ASTFLT into tf_trapno on entry. */
2154: call _C_LABEL(trap)
2155: jmp 2b
2156: #ifndef DIAGNOSTIC
2157: 1: INTRFASTEXIT
2158: #else /* DIAGNOSTIC */
2159: 1: cmpl $IPL_NONE,CPUVAR(ILEVEL)
2160: jne 3f
2161: INTRFASTEXIT
2162: 3: sti
2163: pushl $4f
2164: call _C_LABEL(printf)
2165: addl $4,%esp
2166: #ifdef DDB
2167: int $3
2168: #endif /* DDB */
2169: movl $IPL_NONE,CPUVAR(ILEVEL)
2170: jmp 2b
2171: 4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL EXIT\n"
2172: 5: .asciz "WARNING: SPL NOT ZERO ON SYSCALL ENTRY\n"
2173: #endif /* DIAGNOSTIC */
2174:
2175: #if NNPX > 0
2176: /*
2177: * Special interrupt handlers. Someday intr0-intr15 will be used to count
2178: * interrupts. We'll still need a special exception 16 handler. The busy
2179: * latch stuff in probintr() can be moved to npxprobe().
2180: */
2181:
2182: /* LINTSTUB: Func: void probeintr(void) */
2183: NENTRY(probeintr)
2184: ss
2185: incl _C_LABEL(npx_intrs_while_probing)
2186: pushl %eax
2187: movb $0x20,%al # EOI (asm in strings loses cpp features)
2188: outb %al,$0xa0 # IO_ICU2
2189: outb %al,$0x20 # IO_ICU1
2190: movb $0,%al
2191: outb %al,$0xf0 # clear BUSY# latch
2192: popl %eax
2193: iret
2194:
2195: /* LINTSTUB: Func: void probetrap(void) */
2196: NENTRY(probetrap)
2197: ss
2198: incl _C_LABEL(npx_traps_while_probing)
2199: fnclex
2200: iret
2201:
2202: /* LINTSTUB: Func: int npx586bug1(int a, int b) */
2203: NENTRY(npx586bug1)
2204: fildl 4(%esp) # x
2205: fildl 8(%esp) # y
2206: fld %st(1)
2207: fdiv %st(1),%st # x/y
2208: fmulp %st,%st(1) # (x/y)*y
2209: fsubrp %st,%st(1) # x-(x/y)*y
2210: pushl $0
2211: fistpl (%esp)
2212: popl %eax
2213: ret
2214: #endif /* NNPX > 0 */
CVSweb <webmaster@jp.NetBSD.org>