Annotation of src/sys/arch/i386/i386/locore.S, Revision 1.7
1.7 ! thorpej 1: /* $NetBSD: locore.S,v 1.6 2003/02/09 15:49:34 drochner Exp $ */
1.1 fvdl 2:
3: /*-
4: * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Charles M. Hannum.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /*-
40: * Copyright (c) 1990 The Regents of the University of California.
41: * All rights reserved.
42: *
43: * This code is derived from software contributed to Berkeley by
44: * William Jolitz.
45: *
46: * Redistribution and use in source and binary forms, with or without
47: * modification, are permitted provided that the following conditions
48: * are met:
49: * 1. Redistributions of source code must retain the above copyright
50: * notice, this list of conditions and the following disclaimer.
51: * 2. Redistributions in binary form must reproduce the above copyright
52: * notice, this list of conditions and the following disclaimer in the
53: * documentation and/or other materials provided with the distribution.
54: * 3. All advertising materials mentioning features or use of this software
55: * must display the following acknowledgement:
56: * This product includes software developed by the University of
57: * California, Berkeley and its contributors.
58: * 4. Neither the name of the University nor the names of its contributors
59: * may be used to endorse or promote products derived from this software
60: * without specific prior written permission.
61: *
62: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72: * SUCH DAMAGE.
73: *
74: * @(#)locore.s 7.3 (Berkeley) 5/13/91
75: */
76:
77: #include "opt_cputype.h"
78: #include "opt_ddb.h"
79: #include "opt_ipkdb.h"
80: #include "opt_vm86.h"
81: #include "opt_user_ldt.h"
82: #include "opt_dummy_nops.h"
83: #include "opt_compat_oldboot.h"
84: #include "opt_multiprocessor.h"
85: #include "opt_lockdebug.h"
86: #include "opt_realmem.h"
87:
88: #include "npx.h"
89: #include "assym.h"
90: #include "apm.h"
91: #include "lapic.h"
92: #include "ioapic.h"
93:
94: #include <sys/errno.h>
95: #include <sys/syscall.h>
96:
97: #include <machine/cputypes.h>
98: #include <machine/param.h>
99: #include <machine/pte.h>
100: #include <machine/segments.h>
101: #include <machine/specialreg.h>
102: #include <machine/trap.h>
103: #include <machine/bootinfo.h>
104:
105: #if NLAPIC > 0
106: #include <machine/i82489reg.h>
107: #endif
108:
109: /* LINTSTUB: include <sys/types.h> */
110: /* LINTSTUB: include <machine/cpu.h> */
111: /* LINTSTUB: include <sys/systm.h> */
112:
113: #include <machine/asm.h>
114:
115: #if defined(MULTIPROCESSOR)
116:
1.5 thorpej 117: #define SET_CURLWP(lwp,cpu) \
1.1 fvdl 118: movl CPUVAR(SELF),cpu ; \
1.5 thorpej 119: movl lwp,CPUVAR(CURLWP) ; \
120: movl cpu,L_CPU(lwp)
1.1 fvdl 121:
122: #else
123:
1.5 thorpej 124: #define SET_CURLWP(lwp,tcpu) movl lwp,CPUVAR(CURLWP)
125: #define GET_CURLWP(reg) movl CPUVAR(CURLWP),reg
1.1 fvdl 126:
127: #endif
128:
129: #define GET_CURPCB(reg) movl CPUVAR(CURPCB),reg
130: #define SET_CURPCB(reg) movl reg,CPUVAR(CURPCB)
131:
132: #define CLEAR_RESCHED(reg) movl reg,CPUVAR(RESCHED)
133:
134: /* XXX temporary kluge; these should not be here */
135: /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */
136: #include <dev/isa/isareg.h>
137:
138:
139: /* Disallow old names for REALBASEMEM */
140: #ifdef BIOSBASEMEM
141: #error BIOSBASEMEM option deprecated; use REALBASEMEM only if memory size reported by latest boot block is incorrect
142: #endif
143:
144: /* Disallow old names for REALEXTMEM */
145: #ifdef EXTMEM_SIZE
146: #error EXTMEM_SIZE option deprecated; use REALEXTMEM only if memory size reported by latest boot block is incorrect
147: #endif
148: #ifdef BIOSEXTMEM
149: #error BIOSEXTMEM option deprecated; use REALEXTMEM only if memory size reported by latest boot block is incorrect
150: #endif
151:
152: #include <machine/frameasm.h>
153:
154:
155: #ifdef MULTIPROCESSOR
156: #include <machine/i82489reg.h>
157: #endif
158:
159: /*
160: * PTmap is recursive pagemap at top of virtual address space.
161: * Within PTmap, the page directory can be found (third indirection).
162: *
163: * XXX 4 == sizeof pde
164: */
165: .set _C_LABEL(PTmap),(PDSLOT_PTE << PDSHIFT)
1.7 ! thorpej 166: .set _C_LABEL(PTD),(_C_LABEL(PTmap) + PDSLOT_PTE * PAGE_SIZE)
1.1 fvdl 167: .set _C_LABEL(PTDpde),(_C_LABEL(PTD) + PDSLOT_PTE * 4)
168:
169: /*
170: * APTmap, APTD is the alternate recursive pagemap.
171: * It's used when modifying another process's page tables.
172: *
173: * XXX 4 == sizeof pde
174: */
175: .set _C_LABEL(APTmap),(PDSLOT_APTE << PDSHIFT)
1.7 ! thorpej 176: .set _C_LABEL(APTD),(_C_LABEL(APTmap) + PDSLOT_APTE * PAGE_SIZE)
1.1 fvdl 177: .set _C_LABEL(APTDpde),(_C_LABEL(PTD) + PDSLOT_APTE * 4)
178:
179:
180: /*
181: * Initialization
182: */
183: .data
184:
185: .globl _C_LABEL(cpu)
186: .globl _C_LABEL(cpu_feature)
187: .globl _C_LABEL(esym),_C_LABEL(boothowto)
188: .globl _C_LABEL(bootinfo),_C_LABEL(atdevbase)
189: #ifdef COMPAT_OLDBOOT
190: .globl _C_LABEL(bootdev)
191: #endif
192: .globl _C_LABEL(proc0paddr),_C_LABEL(PTDpaddr)
193: .globl _C_LABEL(biosbasemem),_C_LABEL(biosextmem)
194: .globl _C_LABEL(gdt)
195: #ifdef I586_CPU
196: .globl _C_LABEL(idt)
197: #endif
198: .globl _C_LABEL(lapic_tpr)
199:
200: #if NLAPIC > 0
201: #ifdef __ELF__
1.7 ! thorpej 202: .align PAGE_SIZE
1.1 fvdl 203: #else
204: .align 12
205: #endif
206: .globl _C_LABEL(local_apic), _C_LABEL(lapic_id)
207: _C_LABEL(local_apic):
208: .space LAPIC_ID
209: _C_LABEL(lapic_id):
210: .long 0x00000000
211: .space LAPIC_TPRI-(LAPIC_ID+4)
212: _C_LABEL(lapic_tpr):
213: .space LAPIC_PPRI-LAPIC_TPRI
214: _C_LABEL(lapic_ppr):
215: .space LAPIC_ISR-LAPIC_PPRI
216: _C_LABEL(lapic_isr):
1.7 ! thorpej 217: .space PAGE_SIZE-LAPIC_ISR
1.1 fvdl 218: #else
219: _C_LABEL(lapic_tpr):
220: .long 0
221: #endif
222:
223:
224: _C_LABEL(cpu): .long 0 # are we 386, 386sx, or 486,
225: # or Pentium, or..
226: _C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid'
227: # instruction
228: _C_LABEL(esym): .long 0 # ptr to end of syms
229: _C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual
230: _C_LABEL(proc0paddr): .long 0
231: _C_LABEL(PTDpaddr): .long 0 # paddr of PTD, for libkvm
232: #ifndef REALBASEMEM
233: _C_LABEL(biosbasemem): .long 0 # base memory reported by BIOS
234: #else
235: _C_LABEL(biosbasemem): .long REALBASEMEM
236: #endif
237: #ifndef REALEXTMEM
238: _C_LABEL(biosextmem): .long 0 # extended memory reported by BIOS
239: #else
240: _C_LABEL(biosextmem): .long REALEXTMEM
241: #endif
242:
243: .space 512
244: tmpstk:
245:
246:
247: #define _RELOC(x) ((x) - KERNBASE)
248: #define RELOC(x) _RELOC(_C_LABEL(x))
249:
250: .text
251: .globl _C_LABEL(kernel_text)
252: .set _C_LABEL(kernel_text),KERNTEXTOFF
253:
254: .globl start
255: start: movw $0x1234,0x472 # warm boot
256:
257: /*
258: * Load parameters from stack
259: * (howto, [bootdev], bootinfo, esym, basemem, extmem).
260: */
261: movl 4(%esp),%eax
262: movl %eax,RELOC(boothowto)
263: #ifdef COMPAT_OLDBOOT
264: movl 8(%esp),%eax
265: movl %eax,RELOC(bootdev)
266: #endif
267: movl 12(%esp),%eax
268:
269: testl %eax, %eax
270: jz 1f
271: movl (%eax), %ebx /* number of entries */
272: movl $RELOC(bootinfo), %edi
273: movl %ebx, (%edi)
274: addl $4, %edi
275: 2:
276: testl %ebx, %ebx
277: jz 1f
278: addl $4, %eax
279: movl (%eax), %ecx /* address of entry */
280: pushl %eax
281: pushl (%ecx) /* len */
282: pushl %ecx
283: pushl %edi
284: addl (%ecx), %edi /* update dest pointer */
285: cmpl $_RELOC(_C_LABEL(bootinfo) + BOOTINFO_MAXSIZE), %edi
286: jg 2f
287: call _C_LABEL(memcpy)
288: addl $12, %esp
289: popl %eax
290: subl $1, %ebx
291: jmp 2b
292: 2: /* cleanup for overflow case */
293: addl $16, %esp
294: movl $RELOC(bootinfo), %edi
295: subl %ebx, (%edi) /* correct number of entries */
296: 1:
297:
298: movl 16(%esp),%eax
299: testl %eax,%eax
300: jz 1f
301: addl $KERNBASE,%eax
302: 1: movl %eax,RELOC(esym)
303:
304: movl RELOC(biosextmem),%eax
305: testl %eax,%eax
306: jnz 1f
307: movl 20(%esp),%eax
308: movl %eax,RELOC(biosextmem)
309: 1:
310: movl RELOC(biosbasemem),%eax
311: testl %eax,%eax
312: jnz 1f
313: movl 24(%esp),%eax
314: movl %eax,RELOC(biosbasemem)
315: 1:
316:
317: /* First, reset the PSL. */
318: pushl $PSL_MBO
319: popfl
320:
321: /* Clear segment registers; always null in proc0. */
322: xorl %eax,%eax
323: movw %ax,%fs
324: movw %ax,%gs
325: decl %eax
326: movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL
327:
328: /* Find out our CPU type. */
329:
330: try386: /* Try to toggle alignment check flag; does not exist on 386. */
331: pushfl
332: popl %eax
333: movl %eax,%ecx
334: orl $PSL_AC,%eax
335: pushl %eax
336: popfl
337: pushfl
338: popl %eax
339: xorl %ecx,%eax
340: andl $PSL_AC,%eax
341: pushl %ecx
342: popfl
343:
344: testl %eax,%eax
345: jnz try486
346:
347: /*
348: * Try the test of a NexGen CPU -- ZF will not change on a DIV
349: * instruction on a NexGen, it will on an i386. Documented in
350: * Nx586 Processor Recognition Application Note, NexGen, Inc.
351: */
352: movl $0x5555,%eax
353: xorl %edx,%edx
354: movl $2,%ecx
355: divl %ecx
356: jnz is386
357:
358: isnx586:
359: /*
360: * Don't try cpuid, as Nx586s reportedly don't support the
361: * PSL_ID bit.
362: */
363: movl $CPU_NX586,RELOC(cpu)
364: jmp 2f
365:
366: is386:
367: movl $CPU_386,RELOC(cpu)
368: jmp 2f
369:
370: try486: /* Try to toggle identification flag; does not exist on early 486s. */
371: pushfl
372: popl %eax
373: movl %eax,%ecx
374: xorl $PSL_ID,%eax
375: pushl %eax
376: popfl
377: pushfl
378: popl %eax
379: xorl %ecx,%eax
380: andl $PSL_ID,%eax
381: pushl %ecx
382: popfl
383:
384: testl %eax,%eax
385: jnz try586
386: is486: movl $CPU_486,RELOC(cpu)
387: /*
388: * Check Cyrix CPU
389: * Cyrix CPUs do not change the undefined flags following
390: * execution of the divide instruction which divides 5 by 2.
391: *
392: * Note: CPUID is enabled on M2, so it passes another way.
393: */
394: pushfl
395: movl $0x5555, %eax
396: xorl %edx, %edx
397: movl $2, %ecx
398: clc
399: divl %ecx
400: jnc trycyrix486
401: popfl
402: jmp 2f
403: trycyrix486:
404: movl $CPU_6x86,RELOC(cpu) # set CPU type
405: /*
406: * Check for Cyrix 486 CPU by seeing if the flags change during a
407: * divide. This is documented in the Cx486SLC/e SMM Programmer's
408: * Guide.
409: */
410: xorl %edx,%edx
411: cmpl %edx,%edx # set flags to known state
412: pushfl
413: popl %ecx # store flags in ecx
414: movl $-1,%eax
415: movl $4,%ebx
416: divl %ebx # do a long division
417: pushfl
418: popl %eax
419: xorl %ecx,%eax # are the flags different?
420: testl $0x8d5,%eax # only check C|PF|AF|Z|N|V
421: jne 2f # yes; must be Cyrix 6x86 CPU
422: movl $CPU_486DLC,RELOC(cpu) # set CPU type
423:
424: #ifndef CYRIX_CACHE_WORKS
425: /* Disable caching of the ISA hole only. */
426: invd
427: movb $CCR0,%al # Configuration Register index (CCR0)
428: outb %al,$0x22
429: inb $0x23,%al
430: orb $(CCR0_NC1|CCR0_BARB),%al
431: movb %al,%ah
432: movb $CCR0,%al
433: outb %al,$0x22
434: movb %ah,%al
435: outb %al,$0x23
436: invd
437: #else /* CYRIX_CACHE_WORKS */
438: /* Set cache parameters */
439: invd # Start with guaranteed clean cache
440: movb $CCR0,%al # Configuration Register index (CCR0)
441: outb %al,$0x22
442: inb $0x23,%al
443: andb $~CCR0_NC0,%al
444: #ifndef CYRIX_CACHE_REALLY_WORKS
445: orb $(CCR0_NC1|CCR0_BARB),%al
446: #else
447: orb $CCR0_NC1,%al
448: #endif
449: movb %al,%ah
450: movb $CCR0,%al
451: outb %al,$0x22
452: movb %ah,%al
453: outb %al,$0x23
454: /* clear non-cacheable region 1 */
455: movb $(NCR1+2),%al
456: outb %al,$0x22
457: movb $NCR_SIZE_0K,%al
458: outb %al,$0x23
459: /* clear non-cacheable region 2 */
460: movb $(NCR2+2),%al
461: outb %al,$0x22
462: movb $NCR_SIZE_0K,%al
463: outb %al,$0x23
464: /* clear non-cacheable region 3 */
465: movb $(NCR3+2),%al
466: outb %al,$0x22
467: movb $NCR_SIZE_0K,%al
468: outb %al,$0x23
469: /* clear non-cacheable region 4 */
470: movb $(NCR4+2),%al
471: outb %al,$0x22
472: movb $NCR_SIZE_0K,%al
473: outb %al,$0x23
474: /* enable caching in CR0 */
475: movl %cr0,%eax
476: andl $~(CR0_CD|CR0_NW),%eax
477: movl %eax,%cr0
478: invd
479: #endif /* CYRIX_CACHE_WORKS */
480:
481: jmp 2f
482:
483: try586: /* Use the `cpuid' instruction. */
484: xorl %eax,%eax
485: cpuid
486: movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL
487:
488: 2:
489: /*
490: * Finished with old stack; load new %esp now instead of later so we
491: * can trace this code without having to worry about the trace trap
492: * clobbering the memory test or the zeroing of the bss+bootstrap page
493: * tables.
494: *
495: * The boot program should check:
496: * text+data <= &stack_variable - more_space_for_stack
497: * text+data+bss+pad+space_for_page_tables <= end_of_memory
498: * Oops, the gdt is in the carcass of the boot program so clearing
499: * the rest of memory is still not possible.
500: */
501: movl $_RELOC(tmpstk),%esp # bootstrap stack end location
502:
503: /*
504: * Virtual address space of kernel:
505: *
506: * text | data | bss | [syms] | page dir | proc0 kstack
507: * 0 1 2 3
508: */
1.7 ! thorpej 509: #define PROC0PDIR ((0) * PAGE_SIZE)
! 510: #define PROC0STACK ((1) * PAGE_SIZE)
! 511: #define SYSMAP ((1+UPAGES) * PAGE_SIZE)
! 512: #define TABLESIZE ((1+UPAGES) * PAGE_SIZE) /* + nkpde * PAGE_SIZE */
1.1 fvdl 513:
514: /* Find end of kernel image. */
515: movl $RELOC(end),%edi
516: #if defined(DDB) && !defined(SYMTAB_SPACE)
517: /* Save the symbols (if loaded). */
518: movl RELOC(esym),%eax
519: testl %eax,%eax
520: jz 1f
521: subl $KERNBASE,%eax
522: movl %eax,%edi
523: 1:
524: #endif
525:
526: /* Calculate where to start the bootstrap tables. */
527: movl %edi,%esi # edi = esym ? esym : end
528: addl $PGOFSET,%esi # page align up
529: andl $~PGOFSET,%esi
530:
531: /*
532: * Calculate the size of the kernel page table directory, and
533: * how many entries it will have.
534: */
535: movl RELOC(nkpde),%ecx # get nkpde
536: cmpl $NKPTP_MIN,%ecx # larger than min?
537: jge 1f
538: movl $NKPTP_MIN,%ecx # set at min
539: jmp 2f
540: 1: cmpl $NKPTP_MAX,%ecx # larger than max?
541: jle 2f
542: movl $NKPTP_MAX,%ecx
543: 2:
544:
545: /* Clear memory for bootstrap tables. */
546: shll $PGSHIFT,%ecx
547: addl $TABLESIZE,%ecx
548: addl %esi,%ecx # end of tables
549: subl %edi,%ecx # size of tables
550: shrl $2,%ecx
551: xorl %eax,%eax
552: cld
553: rep
554: stosl
555:
556: /*
557: * fillkpt
558: * eax = pte (page frame | control | status)
559: * ebx = page table address
560: * ecx = number of pages to map
561: */
562: #define fillkpt \
563: 1: movl %eax,(%ebx) ; \
1.7 ! thorpej 564: addl $PAGE_SIZE,%eax ; /* increment physical address */ \
1.1 fvdl 565: addl $4,%ebx ; /* next pte */ \
566: loop 1b ;
567:
568: /*
569: * Build initial page tables.
570: */
571: /* Calculate end of text segment, rounded to a page. */
572: leal (RELOC(etext)+PGOFSET),%edx
573: andl $~PGOFSET,%edx
574:
575: /* Skip over the first 1MB. */
576: movl $_RELOC(KERNTEXTOFF),%eax
577: movl %eax,%ecx
578: shrl $PGSHIFT,%ecx
579: leal (SYSMAP)(%esi,%ecx,4),%ebx
580:
581: /* Map the kernel text read-only. */
582: movl %edx,%ecx
583: subl %eax,%ecx
584: shrl $PGSHIFT,%ecx
585: orl $(PG_V|PG_KR),%eax
586: fillkpt
587:
588: /* Map the data, BSS, and bootstrap tables read-write. */
589: leal (PG_V|PG_KW)(%edx),%eax
590: movl RELOC(nkpde),%ecx
591: shll $PGSHIFT,%ecx
592: addl $TABLESIZE,%ecx
593: addl %esi,%ecx # end of tables
594: subl %edx,%ecx # subtract end of text
595: shrl $PGSHIFT,%ecx
596: fillkpt
597:
598: /* Map ISA I/O memory. */
599: movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set
600: movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s,
601: fillkpt
602:
603: /*
604: * Construct a page table directory.
605: */
606: /* Install PDEs for temporary double map of kernel. */
607: movl RELOC(nkpde),%ecx # for this many pde s,
608: leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
609: leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0,
610: fillkpt
611:
612: /* Map kernel PDEs. */
613: movl RELOC(nkpde),%ecx # for this many pde s,
614: leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # kernel pde offset
615: leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0,
616: fillkpt
617:
618: /* Install a PDE recursively mapping page directory as a page table! */
619: leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd
620: movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot
621:
622: /* Save phys. addr of PTD, for libkvm. */
623: movl %esi,RELOC(PTDpaddr)
624:
625: /* Load base of page directory and enable mapping. */
626: movl %esi,%eax # phys address of ptd in proc 0
627: movl %eax,%cr3 # load ptd addr into mmu
628: movl %cr0,%eax # get control word
629: # enable paging & NPX emulation
630: orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax
631: movl %eax,%cr0 # and let's page NOW!
632:
633: pushl $begin # jump to high mem
634: ret
635:
636: begin:
637: /* Now running relocated at KERNBASE. Remove double mapping. */
638: movl _C_LABEL(nkpde),%ecx # for this many pde s,
639: leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
640: addl $(KERNBASE), %ebx # now use relocated address
641: 1: movl $0,(%ebx)
642: addl $4,%ebx # next pde
643: loop 1b
644:
645: /* Relocate atdevbase. */
646: movl _C_LABEL(nkpde),%edx
647: shll $PGSHIFT,%edx
648: addl $(TABLESIZE+KERNBASE),%edx
649: addl %esi,%edx
650: movl %edx,_C_LABEL(atdevbase)
651:
652: /* Set up bootstrap stack. */
653: leal (PROC0STACK+KERNBASE)(%esi),%eax
654: movl %eax,_C_LABEL(proc0paddr)
655: leal (USPACE-FRAMESIZE)(%eax),%esp
656: movl %esi,PCB_CR3(%eax) # pcb->pcb_cr3
657: xorl %ebp,%ebp # mark end of frames
658:
659: subl $NGDT*8, %esp # space for temporary gdt
660: pushl %esp
661: call _C_LABEL(initgdt)
662: addl $4,%esp
663:
664: movl _C_LABEL(nkpde),%eax
665: shll $PGSHIFT,%eax
666: addl $TABLESIZE,%eax
667: addl %esi,%eax # skip past stack and page tables
668:
669: pushl %eax
670: call _C_LABEL(init386) # wire 386 chip for unix operation
671: addl $4+NGDT*8,%esp # pop temporary gdt
672:
673: #ifdef SAFARI_FIFO_HACK
674: movb $5,%al
675: movw $0x37b,%dx
676: outb %al,%dx
677: movw $0x37f,%dx
678: inb %dx,%al
679: movb %al,%cl
680:
681: orb $1,%cl
682:
683: movb $5,%al
684: movw $0x37b,%dx
685: outb %al,%dx
686: movw $0x37f,%dx
687: movb %cl,%al
688: outb %al,%dx
689: #endif /* SAFARI_FIFO_HACK */
690:
691: call _C_LABEL(main)
692:
693: /*
694: * void proc_trampoline(void);
695: * This is a trampoline function pushed onto the stack of a newly created
696: * process in order to do some additional setup. The trampoline is entered by
697: * cpu_switch()ing to the process, so we abuse the callee-saved registers used
698: * by cpu_switch() to store the information about the stub to call.
699: * NOTE: This function does not have a normal calling sequence!
700: */
701: /* LINTSTUB: Func: void proc_trampoline(void) */
702: NENTRY(proc_trampoline)
703: #ifdef MULTIPROCESSOR
704: call _C_LABEL(proc_trampoline_mp)
705: #endif
706: movl $IPL_NONE,CPUVAR(ILEVEL)
707: pushl %ebx
708: call *%esi
709: addl $4,%esp
710: INTRFASTEXIT
711: /* NOTREACHED */
712:
713: /*****************************************************************************/
714:
715: /*
716: * Signal trampoline; copied to top of user stack.
717: */
718: /* LINTSTUB: Var: char sigcode[1], esigcode[1]; */
719: NENTRY(sigcode)
720: /*
721: * Handler has returned here as if we called it. The sigcontext
722: * is on the stack after the 3 args "we" pushed.
723: */
724: leal 12(%esp),%eax # get pointer to sigcontext
725: movl %eax,4(%esp) # put it in the argument slot
726: # fake return address already there
727: movl $SYS___sigreturn14,%eax
728: int $0x80 # enter kernel with args on stack
729: movl $SYS_exit,%eax
730: int $0x80 # exit if sigreturn fails
731: .globl _C_LABEL(esigcode)
732: _C_LABEL(esigcode):
733:
734: /*****************************************************************************/
735:
736: /*
737: * The following primitives are used to fill and copy regions of memory.
738: */
739:
740: /*
741: * XXX No section 9 man page for fillw.
742: * fillw seems to be very sparsely used (only in pccons it seems.)
743: * One wonders if it couldn't be done without.
744: * -- Perry Metzger, May 7, 2001
745: */
746: /*
747: * void fillw(short pattern, void *addr, size_t len);
748: * Write len copies of pattern at addr.
749: */
750: /* LINTSTUB: Func: void fillw(short pattern, void *addr, size_t len) */
751: ENTRY(fillw)
752: pushl %edi
753: movl 8(%esp),%eax
754: movl 12(%esp),%edi
755: movw %ax,%cx
756: rorl $16,%eax
757: movw %cx,%ax
758: cld
759: movl 16(%esp),%ecx
760: shrl %ecx # do longwords
761: rep
762: stosl
763: movl 16(%esp),%ecx
764: andl $1,%ecx # do remainder
765: rep
766: stosw
767: popl %edi
768: ret
769:
770: /*
771: * int kcopy(const void *from, void *to, size_t len);
772: * Copy len bytes, abort on fault.
773: */
774: /* LINTSTUB: Func: int kcopy(const void *from, void *to, size_t len) */
775: ENTRY(kcopy)
776: pushl %esi
777: pushl %edi
778: GET_CURPCB(%eax) # load curpcb into eax and set on-fault
779: pushl PCB_ONFAULT(%eax)
780: movl $_C_LABEL(copy_fault), PCB_ONFAULT(%eax)
781:
782: movl 16(%esp),%esi
783: movl 20(%esp),%edi
784: movl 24(%esp),%ecx
785: movl %edi,%eax
786: subl %esi,%eax
787: cmpl %ecx,%eax # overlapping?
788: jb 1f
789: cld # nope, copy forward
790: shrl $2,%ecx # copy by 32-bit words
791: rep
792: movsl
793: movl 24(%esp),%ecx
794: andl $3,%ecx # any bytes left?
795: rep
796: movsb
797:
798: GET_CURPCB(%edx) # XXX save curpcb?
799: popl PCB_ONFAULT(%edx)
800: popl %edi
801: popl %esi
802: xorl %eax,%eax
803: ret
804:
805: ALIGN_TEXT
806: 1: addl %ecx,%edi # copy backward
807: addl %ecx,%esi
808: std
809: andl $3,%ecx # any fractional bytes?
810: decl %edi
811: decl %esi
812: rep
813: movsb
814: movl 24(%esp),%ecx # copy remainder by 32-bit words
815: shrl $2,%ecx
816: subl $3,%esi
817: subl $3,%edi
818: rep
819: movsl
820: cld
821:
822: GET_CURPCB(%edx)
823: popl PCB_ONFAULT(%edx)
824: popl %edi
825: popl %esi
826: xorl %eax,%eax
827: ret
828:
829: /*****************************************************************************/
830:
831: /*
832: * The following primitives are used to copy data in and out of the user's
833: * address space.
834: */
835:
836: /*
837: * Default to the lowest-common-denominator. We will improve it
838: * later.
839: */
840: #if defined(I386_CPU)
841: #define DEFAULT_COPYOUT _C_LABEL(i386_copyout)
842: #define DEFAULT_COPYIN _C_LABEL(i386_copyin)
843: #elif defined(I486_CPU)
844: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout)
845: #define DEFAULT_COPYIN _C_LABEL(i386_copyin)
846: #elif defined(I586_CPU)
847: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) /* XXX */
848: #define DEFAULT_COPYIN _C_LABEL(i386_copyin) /* XXX */
849: #elif defined(I686_CPU)
850: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) /* XXX */
851: #define DEFAULT_COPYIN _C_LABEL(i386_copyin) /* XXX */
852: #endif
853:
854: .data
855:
856: .globl _C_LABEL(copyout_func)
857: _C_LABEL(copyout_func):
858: .long DEFAULT_COPYOUT
859:
860: .globl _C_LABEL(copyin_func)
861: _C_LABEL(copyin_func):
862: .long DEFAULT_COPYIN
863:
864: .text
865:
866: /*
867: * int copyout(const void *from, void *to, size_t len);
868: * Copy len bytes into the user's address space.
869: * see copyout(9)
870: */
871: /* LINTSTUB: Func: int copyout(const void *kaddr, void *uaddr, size_t len) */
872: ENTRY(copyout)
873: jmp *_C_LABEL(copyout_func)
874:
875: #if defined(I386_CPU)
876: /* LINTSTUB: Func: int i386_copyout(const void *kaddr, void *uaddr, size_t len) */
877: ENTRY(i386_copyout)
878: pushl %esi
879: pushl %edi
880: pushl $0
881:
882: movl 16(%esp),%esi
883: movl 20(%esp),%edi
884: movl 24(%esp),%eax
885:
886: /*
887: * We check that the end of the destination buffer is not past the end
888: * of the user's address space. If it's not, then we only need to
889: * check that each page is writable. The 486 will do this for us; the
890: * 386 will not. (We assume that pages in user space that are not
891: * writable by the user are not writable by the kernel either.)
892: */
893: movl %edi,%edx
894: addl %eax,%edx
895: jc _C_LABEL(copy_efault)
896: cmpl $VM_MAXUSER_ADDRESS,%edx
897: ja _C_LABEL(copy_efault)
898:
899: testl %eax,%eax # anything to do?
900: jz 3f
901:
902: /*
903: * We have to check each PTE for (write) permission, since the CPU
904: * doesn't do it for us.
905: */
906:
907: /* Compute number of pages. */
908: movl %edi,%ecx
909: andl $PGOFSET,%ecx
910: addl %eax,%ecx
911: decl %ecx
912: shrl $PGSHIFT,%ecx
913:
914: /* Compute PTE offset for start address. */
915: shrl $PGSHIFT,%edi
916:
917: GET_CURPCB(%edx)
918: movl $2f,PCB_ONFAULT(%edx)
919:
920: 1: /* Check PTE for each page. */
921: testb $PG_RW,_C_LABEL(PTmap)(,%edi,4)
922: jz 2f
923:
924: 4: incl %edi
925: decl %ecx
926: jns 1b
927:
928: movl 20(%esp),%edi
929: movl 24(%esp),%eax
930: jmp 3f
931:
932: 2: /* Simulate a trap. */
933: pushl %ecx
934: movl %edi,%eax
935: shll $PGSHIFT,%eax
936: pushl %eax
937: call _C_LABEL(trapwrite) # trapwrite(addr)
938: addl $4,%esp # pop argument
939: popl %ecx
940: testl %eax,%eax # if not ok, return EFAULT
941: jz 4b
942: jmp _C_LABEL(copy_efault)
943:
944: 3: GET_CURPCB(%edx)
945: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx)
946:
947: /* bcopy(%esi, %edi, %eax); */
948: cld
949: movl %eax,%ecx
950: shrl $2,%ecx
951: rep
952: movsl
953: movl %eax,%ecx
954: andl $3,%ecx
955: rep
956: movsb
957:
958: popl PCB_ONFAULT(%edx)
959: popl %edi
960: popl %esi
961: xorl %eax,%eax
962: ret
963: #endif /* I386_CPU */
964:
965: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
966: /* LINTSTUB: Func: int i486_copyout(const void *kaddr, void *uaddr, size_t len) */
967: ENTRY(i486_copyout)
968: pushl %esi
969: pushl %edi
970: pushl $0
971:
972: movl 16(%esp),%esi
973: movl 20(%esp),%edi
974: movl 24(%esp),%eax
975:
976: /*
977: * We check that the end of the destination buffer is not past the end
978: * of the user's address space.
979: */
980: movl %edi,%edx
981: addl %eax,%edx
982: jc _C_LABEL(copy_efault)
983: cmpl $VM_MAXUSER_ADDRESS,%edx
984: ja _C_LABEL(copy_efault)
985:
986: GET_CURPCB(%edx)
987: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx)
988:
989: /* bcopy(%esi, %edi, %eax); */
990: cld
991: movl %eax,%ecx
992: shrl $2,%ecx
993: rep
994: movsl
995: movl %eax,%ecx
996: andl $3,%ecx
997: rep
998: movsb
999:
1000: popl PCB_ONFAULT(%edx)
1001: popl %edi
1002: popl %esi
1003: xorl %eax,%eax
1004: ret
1005: #endif /* I486_CPU || I586_CPU || I686_CPU */
1006:
1007: /*
1008: * int copyin(const void *from, void *to, size_t len);
1009: * Copy len bytes from the user's address space.
1010: * see copyin(9)
1011: */
1012: /* LINTSTUB: Func: int copyin(const void *uaddr, void *kaddr, size_t len) */
1013: ENTRY(copyin)
1014: jmp *_C_LABEL(copyin_func)
1015:
1016: #if defined(I386_CPU) || defined(I486_CPU) || defined(I586_CPU) || \
1017: defined(I686_CPU)
1018: /* LINTSTUB: Func: int i386_copyin(const void *uaddr, void *kaddr, size_t len) */
1019: ENTRY(i386_copyin)
1020: pushl %esi
1021: pushl %edi
1022: GET_CURPCB(%eax)
1023: pushl $0
1024: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%eax)
1025:
1026: movl 16(%esp),%esi
1027: movl 20(%esp),%edi
1028: movl 24(%esp),%eax
1029:
1030: /*
1031: * We check that the end of the destination buffer is not past the end
1032: * of the user's address space. If it's not, then we only need to
1033: * check that each page is readable, and the CPU will do that for us.
1034: */
1035: movl %esi,%edx
1036: addl %eax,%edx
1037: jc _C_LABEL(copy_efault)
1038: cmpl $VM_MAXUSER_ADDRESS,%edx
1039: ja _C_LABEL(copy_efault)
1040:
1041: /* bcopy(%esi, %edi, %eax); */
1042: cld
1043: movl %eax,%ecx
1044: shrl $2,%ecx
1045: rep
1046: movsl
1047: movl %eax,%ecx
1048: andl $3,%ecx
1049: rep
1050: movsb
1051:
1052: GET_CURPCB(%edx)
1053: popl PCB_ONFAULT(%edx)
1054: popl %edi
1055: popl %esi
1056: xorl %eax,%eax
1057: ret
1058: #endif /* I386_CPU || I486_CPU || I586_CPU || I686_CPU */
1059:
1060: /* LINTSTUB: Ignore */
1061: NENTRY(copy_efault)
1062: movl $EFAULT,%eax
1063:
1064: /* LINTSTUB: Ignore */
1065: NENTRY(copy_fault)
1066: GET_CURPCB(%edx)
1067: popl PCB_ONFAULT(%edx)
1068: popl %edi
1069: popl %esi
1070: ret
1071:
1072: /*
1073: * int copyoutstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1074: * Copy a NUL-terminated string, at most maxlen characters long, into the
1075: * user's address space. Return the number of characters copied (including the
1076: * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else
1077: * return 0 or EFAULT.
1078: * see copyoutstr(9)
1079: */
1080: /* LINTSTUB: Func: int copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) */
1081: ENTRY(copyoutstr)
1082: pushl %esi
1083: pushl %edi
1084:
1085: movl 12(%esp),%esi # esi = from
1086: movl 16(%esp),%edi # edi = to
1087: movl 20(%esp),%edx # edx = maxlen
1088:
1089: #if defined(I386_CPU)
1090: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1091: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1092: jne 5f
1093: #endif /* I486_CPU || I586_CPU || I686_CPU */
1094:
1095: /* Compute number of bytes in first page. */
1096: movl %edi,%eax
1097: andl $PGOFSET,%eax
1.7 ! thorpej 1098: movl $PAGE_SIZE,%ecx
! 1099: subl %eax,%ecx # ecx = PAGE_SIZE - (src % PAGE_SIZE)
1.1 fvdl 1100:
1101: GET_CURPCB(%eax)
1102: movl $6f,PCB_ONFAULT(%eax)
1103:
1104: 1: /*
1105: * Once per page, check that we are still within the bounds of user
1106: * space, and check for a write fault.
1107: */
1108: cmpl $VM_MAXUSER_ADDRESS,%edi
1109: jae _C_LABEL(copystr_efault)
1110:
1111: /* Compute PTE offset. */
1112: movl %edi,%eax
1113: shrl $PGSHIFT,%eax # calculate pte address
1114:
1115: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1116: jnz 2f
1117:
1118: 6: /* Simulate a trap. */
1119: pushl %edx
1120: pushl %edi
1121: call _C_LABEL(trapwrite) # trapwrite(addr)
1122: addl $4,%esp # clear argument from stack
1123: popl %edx
1124: testl %eax,%eax
1125: jnz _C_LABEL(copystr_efault)
1126:
1127: 2: /* Copy up to end of this page. */
1128: subl %ecx,%edx # predecrement total count
1129: jnc 3f
1130: addl %edx,%ecx # ecx += (edx - ecx) = edx
1131: xorl %edx,%edx
1132:
1133: 3: decl %ecx
1134: js 4f
1135: lodsb
1136: stosb
1137: testb %al,%al
1138: jnz 3b
1139:
1140: /* Success -- 0 byte reached. */
1141: addl %ecx,%edx # add back residual for this page
1142: xorl %eax,%eax
1143: jmp copystr_return
1144:
1145: 4: /* Go to next page, if any. */
1.7 ! thorpej 1146: movl $PAGE_SIZE,%ecx
1.1 fvdl 1147: testl %edx,%edx
1148: jnz 1b
1149:
1150: /* edx is zero -- return ENAMETOOLONG. */
1151: movl $ENAMETOOLONG,%eax
1152: jmp copystr_return
1153: #endif /* I386_CPU */
1154:
1155: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1156: 5: GET_CURPCB(%eax)
1157: movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%eax)
1158: /*
1159: * Get min(%edx, VM_MAXUSER_ADDRESS-%edi).
1160: */
1161: movl $VM_MAXUSER_ADDRESS,%eax
1162: subl %edi,%eax
1163: cmpl %edx,%eax
1164: jae 1f
1165: movl %eax,%edx
1166: movl %eax,20(%esp)
1167:
1168: 1: incl %edx
1169: cld
1170:
1171: 1: decl %edx
1172: jz 2f
1173: lodsb
1174: stosb
1175: testb %al,%al
1176: jnz 1b
1177:
1178: /* Success -- 0 byte reached. */
1179: decl %edx
1180: xorl %eax,%eax
1181: jmp copystr_return
1182:
1183: 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
1184: cmpl $VM_MAXUSER_ADDRESS,%edi
1185: jae _C_LABEL(copystr_efault)
1186: movl $ENAMETOOLONG,%eax
1187: jmp copystr_return
1188: #endif /* I486_CPU || I586_CPU || I686_CPU */
1189:
1190: /*
1191: * int copyinstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1192: * Copy a NUL-terminated string, at most maxlen characters long, from the
1193: * user's address space. Return the number of characters copied (including the
1194: * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else
1195: * return 0 or EFAULT.
1196: * see copyinstr(9)
1197: */
1198: /* LINTSTUB: Func: int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) */
1199: ENTRY(copyinstr)
1200: pushl %esi
1201: pushl %edi
1202: GET_CURPCB(%ecx)
1203: movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%ecx)
1204:
1205: movl 12(%esp),%esi # %esi = from
1206: movl 16(%esp),%edi # %edi = to
1207: movl 20(%esp),%edx # %edx = maxlen
1208:
1209: /*
1210: * Get min(%edx, VM_MAXUSER_ADDRESS-%esi).
1211: */
1212: movl $VM_MAXUSER_ADDRESS,%eax
1213: subl %esi,%eax
1214: cmpl %edx,%eax
1215: jae 1f
1216: movl %eax,%edx
1217: movl %eax,20(%esp)
1218:
1219: 1: incl %edx
1220: cld
1221:
1222: 1: decl %edx
1223: jz 2f
1224: lodsb
1225: stosb
1226: testb %al,%al
1227: jnz 1b
1228:
1229: /* Success -- 0 byte reached. */
1230: decl %edx
1231: xorl %eax,%eax
1232: jmp copystr_return
1233:
1234: 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
1235: cmpl $VM_MAXUSER_ADDRESS,%esi
1236: jae _C_LABEL(copystr_efault)
1237: movl $ENAMETOOLONG,%eax
1238: jmp copystr_return
1239:
1240: /* LINTSTUB: Ignore */
1241: NENTRY(copystr_efault)
1242: movl $EFAULT,%eax
1243:
1244: /* LINTSTUB: Ignore */
1245: NENTRY(copystr_fault)
1246: copystr_return:
1247: /* Set *lencopied and return %eax. */
1248: GET_CURPCB(%ecx)
1249: movl $0,PCB_ONFAULT(%ecx)
1250: movl 20(%esp),%ecx
1251: subl %edx,%ecx
1252: movl 24(%esp),%edx
1253: testl %edx,%edx
1254: jz 8f
1255: movl %ecx,(%edx)
1256:
1257: 8: popl %edi
1258: popl %esi
1259: ret
1260:
1261: /*
1262: * int copystr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1263: * Copy a NUL-terminated string, at most maxlen characters long. Return the
1264: * number of characters copied (including the NUL) in *lencopied. If the
1265: * string is too long, return ENAMETOOLONG; else return 0.
1266: * see copystr(9)
1267: */
1268: /* LINTSTUB: Func: int copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done) */
1269: ENTRY(copystr)
1270: pushl %esi
1271: pushl %edi
1272:
1273: movl 12(%esp),%esi # esi = from
1274: movl 16(%esp),%edi # edi = to
1275: movl 20(%esp),%edx # edx = maxlen
1276: incl %edx
1277: cld
1278:
1279: 1: decl %edx
1280: jz 4f
1281: lodsb
1282: stosb
1283: testb %al,%al
1284: jnz 1b
1285:
1286: /* Success -- 0 byte reached. */
1287: decl %edx
1288: xorl %eax,%eax
1289: jmp 6f
1290:
1291: 4: /* edx is zero -- return ENAMETOOLONG. */
1292: movl $ENAMETOOLONG,%eax
1293:
1294: 6: /* Set *lencopied and return %eax. */
1295: movl 20(%esp),%ecx
1296: subl %edx,%ecx
1297: movl 24(%esp),%edx
1298: testl %edx,%edx
1299: jz 7f
1300: movl %ecx,(%edx)
1301:
1302: 7: popl %edi
1303: popl %esi
1304: ret
1305:
1306: /*
1307: * long fuword(const void *uaddr);
1308: * Fetch an int from the user's address space.
1309: * see fuword(9)
1310: */
1311: /* LINTSTUB: Func: long fuword(const void *base) */
1312: ENTRY(fuword)
1313: movl 4(%esp),%edx
1314: cmpl $VM_MAXUSER_ADDRESS-4,%edx
1315: ja _C_LABEL(fusuaddrfault)
1316: GET_CURPCB(%ecx)
1317: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1318: movl (%edx),%eax
1319: movl $0,PCB_ONFAULT(%ecx)
1320: ret
1321:
1322: /*
1323: * int fusword(const void *uaddr);
1324: * Fetch a short from the user's address space.
1325: * see fusword(9)
1326: */
1327: /* LINTSTUB: Func: int fusword(const void *base) */
1328: ENTRY(fusword)
1329: movl 4(%esp),%edx
1330: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1331: ja _C_LABEL(fusuaddrfault)
1332: GET_CURPCB(%ecx)
1333: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1334: movzwl (%edx),%eax
1335: movl $0,PCB_ONFAULT(%ecx)
1336: ret
1337:
1338: /*
1339: * int fuswintr(const void *uaddr);
1340: * Fetch a short from the user's address space. Can be called during an
1341: * interrupt.
1342: * see fuswintr(9)
1343: */
1344: /* LINTSTUB: Func: int fuswintr(const void *base) */
1345: ENTRY(fuswintr)
1346: movl 4(%esp),%edx
1347: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1348: ja _C_LABEL(fusuaddrfault)
1.5 thorpej 1349: movl CPUVAR(CURLWP),%ecx
1350: movl L_ADDR(%ecx),%ecx
1.1 fvdl 1351: movl $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)
1352: movzwl (%edx),%eax
1353: movl $0,PCB_ONFAULT(%ecx)
1354: ret
1355:
1356: /*
1357: * int fubyte(const void *uaddr);
1358: * Fetch a byte from the user's address space.
1359: * see fubyte(9)
1360: */
1361: /* LINTSTUB: Func: int fubyte(const void *base) */
1362: ENTRY(fubyte)
1363: movl 4(%esp),%edx
1364: cmpl $VM_MAXUSER_ADDRESS-1,%edx
1365: ja _C_LABEL(fusuaddrfault)
1366: GET_CURPCB(%ecx)
1367: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1368: movzbl (%edx),%eax
1369: movl $0,PCB_ONFAULT(%ecx)
1370: ret
1371:
1372: /*
1373: * Handle faults from [fs]u*(). Clean up and return -1.
1374: */
1375: /* LINTSTUB: Ignore */
1376: NENTRY(fusufault)
1377: movl $0,PCB_ONFAULT(%ecx)
1378: movl $-1,%eax
1379: ret
1380:
1381: /*
1382: * Handle faults from [fs]u*(). Clean up and return -1. This differs from
1383: * fusufault() in that trap() will recognize it and return immediately rather
1384: * than trying to page fault.
1385: */
1386: /* LINTSTUB: Ignore */
1387: NENTRY(fusubail)
1388: movl $0,PCB_ONFAULT(%ecx)
1389: movl $-1,%eax
1390: ret
1391:
1392: /*
1393: * Handle earlier faults from [fs]u*(), due to our of range addresses.
1394: */
1395: /* LINTSTUB: Ignore */
1396: NENTRY(fusuaddrfault)
1397: movl $-1,%eax
1398: ret
1399:
1400: /*
1401: * int suword(void *uaddr, long x);
1402: * Store an int in the user's address space.
1403: * see suword(9)
1404: */
1405: /* LINTSTUB: Func: int suword(void *base, long c) */
1406: ENTRY(suword)
1407: movl 4(%esp),%edx
1408: cmpl $VM_MAXUSER_ADDRESS-4,%edx
1409: ja _C_LABEL(fusuaddrfault)
1410:
1411: #if defined(I386_CPU)
1412: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1413: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1414: jne 2f
1415: #endif /* I486_CPU || I586_CPU || I686_CPU */
1416:
1417: GET_CURPCB(%eax)
1418: movl $3f,PCB_ONFAULT(%eax)
1419:
1420: movl %edx,%eax
1421: shrl $PGSHIFT,%eax # calculate pte address
1422: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1423: jnz 1f
1424:
1425: 3: /* Simulate a trap. */
1426: pushl %edx
1427: pushl %edx
1428: call _C_LABEL(trapwrite) # trapwrite(addr)
1429: addl $4,%esp # clear parameter from the stack
1430: popl %edx
1431: GET_CURPCB(%ecx)
1432: testl %eax,%eax
1433: jnz _C_LABEL(fusufault)
1434:
1435: 1: /* XXX also need to check the following 3 bytes for validity! */
1436: #endif
1437:
1438: 2: GET_CURPCB(%ecx)
1439: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1440:
1441: movl 8(%esp),%eax
1442: movl %eax,(%edx)
1443: xorl %eax,%eax
1444: movl %eax,PCB_ONFAULT(%ecx)
1445: ret
1446:
1447: /*
1448: * int susword(void *uaddr, short x);
1449: * Store a short in the user's address space.
1450: * see susword(9)
1451: */
1452: /* LINTSTUB: Func: int susword(void *base, short c) */
1453: ENTRY(susword)
1454: movl 4(%esp),%edx
1455: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1456: ja _C_LABEL(fusuaddrfault)
1457:
1458: #if defined(I386_CPU)
1459: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1460: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1461: jne 2f
1462: #endif /* I486_CPU || I586_CPU || I686_CPU */
1463:
1464: GET_CURPCB(%eax)
1465: movl $3f,PCB_ONFAULT(%eax)
1466:
1467: movl %edx,%eax
1468: shrl $PGSHIFT,%eax # calculate pte address
1469: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1470: jnz 1f
1471:
1472: 3: /* Simulate a trap. */
1473: pushl %edx
1474: pushl %edx
1475: call _C_LABEL(trapwrite) # trapwrite(addr)
1476: addl $4,%esp # clear parameter from the stack
1477: popl %edx
1478: GET_CURPCB(%ecx)
1479: testl %eax,%eax
1480: jnz _C_LABEL(fusufault)
1481:
1482: 1: /* XXX also need to check the following byte for validity! */
1483: #endif
1484:
1485: 2: GET_CURPCB(%ecx)
1486: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1487:
1488: movl 8(%esp),%eax
1489: movw %ax,(%edx)
1490: xorl %eax,%eax
1491: movl %eax,PCB_ONFAULT(%ecx)
1492: ret
1493:
1494: /*
1495: * int suswintr(void *uaddr, short x);
1496: * Store a short in the user's address space. Can be called during an
1497: * interrupt.
1498: * see suswintr(9)
1499: */
1500: /* LINTSTUB: Func: int suswintr(void *base, short c) */
1501: ENTRY(suswintr)
1502: movl 4(%esp),%edx
1503: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1504: ja _C_LABEL(fusuaddrfault)
1.5 thorpej 1505: movl CPUVAR(CURLWP),%ecx
1506: movl L_ADDR(%ecx),%ecx
1.1 fvdl 1507: movl $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)
1508:
1509: #if defined(I386_CPU)
1510: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1511: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1512: jne 2f
1513: #endif /* I486_CPU || I586_CPU || I686_CPU */
1514:
1515: movl %edx,%eax
1516: shrl $PGSHIFT,%eax # calculate pte address
1517: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1518: jnz 1f
1519:
1520: /* Simulate a trap. */
1521: jmp _C_LABEL(fusubail)
1522:
1523: 1: /* XXX also need to check the following byte for validity! */
1524: #endif
1525:
1526: 2: movl 8(%esp),%eax
1527: movw %ax,(%edx)
1528: xorl %eax,%eax
1529: movl %eax,PCB_ONFAULT(%ecx)
1530: ret
1531:
1532: /*
1533: * int subyte(void *uaddr, char x);
1534: * Store a byte in the user's address space.
1535: * see subyte(9)
1536: */
1537: /* LINTSTUB: Func: int subyte(void *base, int c) */
1538: ENTRY(subyte)
1539: movl 4(%esp),%edx
1540: cmpl $VM_MAXUSER_ADDRESS-1,%edx
1541: ja _C_LABEL(fusuaddrfault)
1542:
1543: #if defined(I386_CPU)
1544: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1545: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1546: jne 2f
1547: #endif /* I486_CPU || I586_CPU || I686_CPU */
1548:
1549: GET_CURPCB(%eax)
1550: movl $3f,PCB_ONFAULT(%eax)
1551:
1552: movl %edx,%eax
1553: shrl $PGSHIFT,%eax # calculate pte address
1554: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1555: jnz 1f
1556:
1557: 3: /* Simulate a trap. */
1558: pushl %edx
1559: pushl %edx
1560: call _C_LABEL(trapwrite) # trapwrite(addr)
1561: addl $4,%esp # clear parameter from the stack
1562: popl %edx
1563: GET_CURPCB(%ecx)
1564: testl %eax,%eax
1565: jnz _C_LABEL(fusufault)
1566:
1567: 1:
1568: #endif
1569:
1570: 2: GET_CURPCB(%ecx)
1571: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1572:
1573: movb 8(%esp),%al
1574: movb %al,(%edx)
1575: xorl %eax,%eax
1576: movl %eax,PCB_ONFAULT(%ecx)
1577: ret
1578:
1579: /*****************************************************************************/
1580:
1581: /*
1582: * The following is i386-specific nonsense.
1583: */
1584:
1585: /*
1586: * void lgdt(struct region_descriptor *rdp);
1587: * Load a new GDT pointer (and do any necessary cleanup).
1588: * XXX It's somewhat questionable whether reloading all the segment registers
1589: * is necessary, since the actual descriptor data is not changed except by
1590: * process creation and exit, both of which clean up via task switches. OTOH,
1591: * this only happens at run time when the GDT is resized.
1592: */
1593: /* LINTSTUB: Func: void lgdt(struct region_descriptor *rdp) */
1594: NENTRY(lgdt)
1595: /* Reload the descriptor table. */
1596: movl 4(%esp),%eax
1597: lgdt (%eax)
1598: /* Flush the prefetch queue. */
1599: jmp 1f
1600: nop
1601: 1: /* Reload "stale" selectors. */
1602: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
1603: movw %ax,%ds
1604: movw %ax,%es
1605: movw %ax,%gs
1606: movw %ax,%ss
1607: movl $GSEL(GCPU_SEL, SEL_KPL),%eax
1608: movw %ax,%fs
1609: /* Reload code selector by doing intersegment return. */
1610: popl %eax
1611: pushl $GSEL(GCODE_SEL, SEL_KPL)
1612: pushl %eax
1613: lret
1614:
1615: /*****************************************************************************/
1616:
1617: /*
1618: * These functions are primarily used by DDB.
1619: */
1620:
1621: /* LINTSTUB: Func: int setjmp (label_t *l) */
1622: ENTRY(setjmp)
1623: movl 4(%esp),%eax
1624: movl %ebx,(%eax) # save ebx
1625: movl %esp,4(%eax) # save esp
1626: movl %ebp,8(%eax) # save ebp
1627: movl %esi,12(%eax) # save esi
1628: movl %edi,16(%eax) # save edi
1629: movl (%esp),%edx # get rta
1630: movl %edx,20(%eax) # save eip
1631: xorl %eax,%eax # return (0);
1632: ret
1633:
1634: /* LINTSTUB: Func: void longjmp (label_t *l) */
1635: ENTRY(longjmp)
1636: movl 4(%esp),%eax
1637: movl (%eax),%ebx # restore ebx
1638: movl 4(%eax),%esp # restore esp
1639: movl 8(%eax),%ebp # restore ebp
1640: movl 12(%eax),%esi # restore esi
1641: movl 16(%eax),%edi # restore edi
1642: movl 20(%eax),%edx # get rta
1643: movl %edx,(%esp) # put in return frame
1644: xorl %eax,%eax # return (1);
1645: incl %eax
1646: ret
1647:
1648: /*****************************************************************************/
1649:
1650: .globl _C_LABEL(sched_whichqs),_C_LABEL(sched_qs)
1651: .globl _C_LABEL(uvmexp),_C_LABEL(panic)
1652:
1653: #ifdef DIAGNOSTIC
1654: NENTRY(switch_error)
1655: pushl $1f
1656: call _C_LABEL(panic)
1657: /* NOTREACHED */
1658: 1: .asciz "cpu_switch"
1659: #endif /* DIAGNOSTIC */
1660:
1661: /*
1.5 thorpej 1662: * void cpu_switch(struct lwp *)
1.1 fvdl 1663: * Find a runnable process and switch to it. Wait if necessary. If the new
1664: * process is the same as the old one, we short-circuit the context save and
1665: * restore.
1666: *
1667: * Note that the stack frame layout is known to "struct switchframe"
1668: * in <machine/frame.h> and to the code in cpu_fork() which initializes
1.5 thorpej 1669: * it for a new lwp.
1.1 fvdl 1670: */
1671: ENTRY(cpu_switch)
1672: pushl %ebx
1673: pushl %esi
1674: pushl %edi
1675:
1676: #ifdef DEBUG
1677: cmpl $IPL_SCHED,CPUVAR(ILEVEL)
1678: jae 1f
1.5 thorpej 1679: pushl $2f
1.1 fvdl 1680: call _C_LABEL(panic)
1681: /* NOTREACHED */
1.5 thorpej 1682: 2: .asciz "not splsched() in cpu_switch!"
1.1 fvdl 1683: 1:
1684: #endif /* DEBUG */
1685:
1.5 thorpej 1686: movl 16(%esp),%esi # current
1.1 fvdl 1687:
1688: /*
1.5 thorpej 1689: * Clear curlwp so that we don't accumulate system time while idle.
1690: * This also insures that schedcpu() will move the old lwp to
1.1 fvdl 1691: * the correct queue if it happens to get called from the spllower()
1692: * below and changes the priority. (See corresponding comment in
1693: * userret()).
1694: */
1.5 thorpej 1695: movl $0,CPUVAR(CURLWP)
1.1 fvdl 1696: /*
1.5 thorpej 1697: * First phase: find new lwp.
1.1 fvdl 1698: *
1699: * Registers:
1700: * %eax - queue head, scratch, then zero
1701: * %ebx - queue number
1702: * %ecx - cached value of whichqs
1.5 thorpej 1703: * %edx - next lwp in queue
1704: * %esi - old lwp
1705: * %edi - new lwp
1.1 fvdl 1706: */
1707:
1.5 thorpej 1708: /* Look for new lwp. */
1.1 fvdl 1709: cli # splhigh doesn't do a cli
1710: movl _C_LABEL(sched_whichqs),%ecx
1711: bsfl %ecx,%ebx # find a full q
1712: jnz switch_dequeue
1713:
1714: /*
1715: * idling: save old context.
1716: *
1717: * Registers:
1718: * %eax, %ecx - scratch
1.5 thorpej 1719: * %esi - old lwp, then old pcb
1.1 fvdl 1720: * %edi - idle pcb
1721: */
1722:
1723: pushl %esi
1724: call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc)
1725: addl $4,%esp
1726:
1.5 thorpej 1727: movl L_ADDR(%esi),%esi
1.1 fvdl 1728:
1729: /* Save stack pointers. */
1730: movl %esp,PCB_ESP(%esi)
1731: movl %ebp,PCB_EBP(%esi)
1732:
1733: /* Find idle PCB for this CPU */
1734: #ifndef MULTIPROCESSOR
1.5 thorpej 1735: movl $_C_LABEL(lwp0),%ebx
1736: movl L_ADDR(%ebx),%edi
1737: movl L_MD_TSS_SEL(%ebx),%edx
1.1 fvdl 1738: #else
1739: movl CPUVAR(IDLE_PCB),%edi
1740: movl CPUVAR(IDLE_TSS_SEL),%edx
1741: #endif
1.5 thorpej 1742: movl $0,CPUVAR(CURLWP) /* In case we fault... */
1.1 fvdl 1743:
1744: /* Restore the idle context (avoid interrupts) */
1745: cli
1746:
1747: /* Restore stack pointers. */
1748: movl PCB_ESP(%edi),%esp
1749: movl PCB_EBP(%edi),%ebp
1750:
1751:
1752: /* Switch address space. */
1753: movl PCB_CR3(%edi),%ecx
1754: movl %ecx,%cr3
1755:
1756: /* Switch TSS. Reset "task busy" flag before loading. */
1757: #ifdef MULTIPROCESSOR
1758: movl CPUVAR(GDT),%eax
1759: #else
1760: movl _C_LABEL(gdt),%eax
1761: #endif
1762: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
1763: ltr %dx
1764:
1765: /* We're always in the kernel, so we don't need the LDT. */
1766:
1767: /* Restore cr0 (including FPU state). */
1768: movl PCB_CR0(%edi),%ecx
1769: movl %ecx,%cr0
1770:
1771: /* Record new pcb. */
1772: SET_CURPCB(%edi)
1773:
1774: xorl %esi,%esi
1775: sti
1776: idle_unlock:
1777: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1778: call _C_LABEL(sched_unlock_idle)
1779: #endif
1780: /* Interrupts are okay again. */
1.2 fvdl 1781: pushl $IPL_NONE # spl0()
1.1 fvdl 1782: call _C_LABEL(Xspllower) # process pending interrupts
1.2 fvdl 1783: addl $4,%esp
1.1 fvdl 1784: jmp idle_start
1785: idle_zero:
1786: sti
1787: call _C_LABEL(uvm_pageidlezero)
1788: cli
1789: cmpl $0,_C_LABEL(sched_whichqs)
1790: jnz idle_exit
1791: idle_loop:
1792: /* Try to zero some pages. */
1793: movl _C_LABEL(uvm)+UVM_PAGE_IDLE_ZERO,%ecx
1794: testl %ecx,%ecx
1795: jnz idle_zero
1796: sti
1797: hlt
1798: NENTRY(mpidle)
1799: idle_start:
1800: cli
1801: cmpl $0,_C_LABEL(sched_whichqs)
1802: jz idle_loop
1803: idle_exit:
1804: movl $IPL_HIGH,CPUVAR(ILEVEL) # splhigh
1805: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1806: call _C_LABEL(sched_lock_idle)
1807: #endif
1808: movl _C_LABEL(sched_whichqs),%ecx
1809: bsfl %ecx,%ebx
1810: jz idle_unlock
1811:
1812: switch_dequeue:
1813: /*
1814: * we're running at splhigh(), but it's otherwise okay to take
1815: * interrupts here.
1816: */
1817: sti
1818: leal _C_LABEL(sched_qs)(,%ebx,8),%eax # select q
1819:
1.5 thorpej 1820: movl L_FORW(%eax),%edi # unlink from front of process q
1.1 fvdl 1821: #ifdef DIAGNOSTIC
1822: cmpl %edi,%eax # linked to self (i.e. nothing queued)?
1823: je _C_LABEL(switch_error) # not possible
1824: #endif /* DIAGNOSTIC */
1.5 thorpej 1825: movl L_FORW(%edi),%edx
1826: movl %edx,L_FORW(%eax)
1827: movl %eax,L_BACK(%edx)
1.1 fvdl 1828:
1829: cmpl %edx,%eax # q empty?
1830: jne 3f
1831:
1832: btrl %ebx,%ecx # yes, clear to indicate empty
1833: movl %ecx,_C_LABEL(sched_whichqs) # update q status
1834:
1835: 3: /* We just did it. */
1836: xorl %eax,%eax
1837: CLEAR_RESCHED(%eax)
1838:
1.5 thorpej 1839: switch_resume:
1.1 fvdl 1840: #ifdef DIAGNOSTIC
1.5 thorpej 1841: cmpl %eax,L_WCHAN(%edi) # Waiting for something?
1.1 fvdl 1842: jne _C_LABEL(switch_error) # Yes; shouldn't be queued.
1.5 thorpej 1843: cmpb $LSRUN,L_STAT(%edi) # In run state?
1.1 fvdl 1844: jne _C_LABEL(switch_error) # No; shouldn't be queued.
1845: #endif /* DIAGNOSTIC */
1846:
1.5 thorpej 1847: /* Isolate lwp. XXX Is this necessary? */
1848: movl %eax,L_BACK(%edi)
1.1 fvdl 1849:
1.5 thorpej 1850: /* Record new lwp. */
1851: movb $LSONPROC,L_STAT(%edi) # l->l_stat = LSONPROC
1852: SET_CURLWP(%edi,%ecx)
1.1 fvdl 1853:
1.5 thorpej 1854: /* Skip context switch if same lwp. */
1855: movl $1, %eax
1.1 fvdl 1856: cmpl %edi,%esi
1857: je switch_return
1858:
1.5 thorpej 1859: /* If old lwp exited, don't bother. */
1.1 fvdl 1860: testl %esi,%esi
1861: jz switch_exited
1862:
1863: /*
1864: * Second phase: save old context.
1865: *
1866: * Registers:
1867: * %eax, %ecx - scratch
1.5 thorpej 1868: * %esi - old lwp, then old pcb
1869: * %edi - new lwp
1.1 fvdl 1870: */
1871:
1872: pushl %esi
1873: call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc)
1874: addl $4,%esp
1875:
1.5 thorpej 1876: movl L_ADDR(%esi),%esi
1.1 fvdl 1877:
1878: /* Save stack pointers. */
1879: movl %esp,PCB_ESP(%esi)
1880: movl %ebp,PCB_EBP(%esi)
1881:
1882: switch_exited:
1883: /*
1884: * Third phase: restore saved context.
1885: *
1886: * Registers:
1887: * %eax, %ebx, %ecx, %edx - scratch
1888: * %esi - new pcb
1.5 thorpej 1889: * %edi - new lwp
1.1 fvdl 1890: */
1891:
1892: /* No interrupts while loading new state. */
1893: cli
1.5 thorpej 1894: movl L_ADDR(%edi),%esi
1.1 fvdl 1895:
1896: /* Restore stack pointers. */
1897: movl PCB_ESP(%esi),%esp
1898: movl PCB_EBP(%esi),%ebp
1899:
1900: #if 0
1901: /* Don't bother with the rest if switching to a system process. */
1.5 thorpej 1902: testl $P_SYSTEM,L_FLAG(%edi); XXX NJWLWP lwp's don't have P_SYSTEM!
1.1 fvdl 1903: jnz switch_restored
1904: #endif
1905:
1906: #ifdef MULTIPROCESSOR
1907: movl CPUVAR(GDT),%eax
1908: #else
1909: /* Load TSS info. */
1910: movl _C_LABEL(gdt),%eax
1911: #endif
1.5 thorpej 1912: movl L_MD_TSS_SEL(%edi),%edx
1.1 fvdl 1913:
1914: /* Switch TSS. Reset "task busy" flag before loading. */
1915: andl $~0x0200,4(%eax,%edx, 1)
1916: ltr %dx
1917:
1918: pushl %edi
1919: call _C_LABEL(pmap_activate) # pmap_activate(p)
1920: addl $4,%esp
1921:
1922: #if 0
1923: switch_restored:
1924: #endif
1925: /* Restore cr0 (including FPU state). */
1926: movl PCB_CR0(%esi),%ecx
1927: #ifdef MULTIPROCESSOR
1928: /*
1929: * If our floating point registers are on a different cpu,
1930: * clear CR0_TS so we'll trap rather than reuse bogus state.
1931: */
1932: movl PCB_FPCPU(%esi),%ebx
1933: cmpl CPUVAR(SELF),%ebx
1934: jz 1f
1935: orl $CR0_TS,%ecx
1936: 1:
1937: #endif
1938: movl %ecx,%cr0
1939:
1940: /* Record new pcb. */
1941: SET_CURPCB(%esi)
1942:
1943: /* Interrupts are okay again. */
1944: sti
1945:
1946: /*
1947: * Check for restartable atomic sequences (RAS)
1948: */
1.5 thorpej 1949: movl CPUVAR(CURLWP),%edi
1950: movl L_PROC(%edi),%esi
1951: cmpl $0,P_NRAS(%esi)
1.1 fvdl 1952: je 1f
1.5 thorpej 1953: movl L_MD_REGS(%edi),%ebx
1.4 gmcgarry 1954: movl TF_EIP(%ebx),%eax
1.1 fvdl 1955: pushl %eax
1.5 thorpej 1956: pushl %esi
1.1 fvdl 1957: call _C_LABEL(ras_lookup)
1958: addl $8,%esp
1959: cmpl $-1,%eax
1960: je 1f
1.4 gmcgarry 1961: movl %eax,TF_EIP(%ebx)
1.1 fvdl 1962: 1:
1.5 thorpej 1963: xor %eax,%eax
1.1 fvdl 1964:
1965: switch_return:
1966: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1967: call _C_LABEL(sched_unlock_idle)
1968: #endif
1.2 fvdl 1969: pushl $IPL_NONE # spl0()
1.1 fvdl 1970: call _C_LABEL(Xspllower) # process pending interrupts
1.2 fvdl 1971: addl $4,%esp
1.1 fvdl 1972: movl $IPL_HIGH,CPUVAR(ILEVEL) # splhigh()
1973:
1974: popl %edi
1975: popl %esi
1976: popl %ebx
1977: ret
1978:
1979: /*
1.5 thorpej 1980: * void cpu_switchto(struct lwp *current, struct lwp *next)
1981: * Switch to the specified next LWP.
1982: */
1983: ENTRY(cpu_switchto)
1984: pushl %ebx
1985: pushl %esi
1986: pushl %edi
1987:
1988: #ifdef DEBUG
1989: cmpl $IPL_SCHED,CPUVAR(ILEVEL)
1990: jae 1f
1991: pushl $2f
1992: call _C_LABEL(panic)
1993: /* NOTREACHED */
1994: 2: .asciz "not splsched() in cpu_switchto!"
1995: 1:
1996: #endif /* DEBUG */
1997:
1998: movl 16(%esp),%esi # current
1999: movl 20(%esp),%edi # next
2000:
2001: /*
2002: * Clear curlwp so that we don't accumulate system time while idle.
2003: * This also insures that schedcpu() will move the old process to
2004: * the correct queue if it happens to get called from the spllower()
2005: * below and changes the priority. (See corresponding comment in
2006: * usrret()).
2007: *
2008: * XXX Is this necessary? We know we won't go idle.
2009: */
2010: movl $0,CPUVAR(CURLWP)
2011:
2012: /*
2013: * We're running at splhigh(), but it's otherwise okay to take
2014: * interrupts here.
2015: */
2016: sti
2017:
2018: /* Jump into the middle of cpu_switch */
2019: xorl %eax,%eax
2020: jmp switch_resume
2021:
2022: /*
2023: * void switch_exit(struct lwp *l, void (*exit)(struct lwp *));
2024: * Switch to the appropriate idle context (lwp0's if uniprocessor; the cpu's
1.1 fvdl 2025: * if multiprocessor) and deallocate the address space and kernel stack for p.
2026: * Then jump into cpu_switch(), as if we were in the idle proc all along.
2027: */
2028: #ifndef MULTIPROCESSOR
1.5 thorpej 2029: .globl _C_LABEL(lwp0)
1.1 fvdl 2030: #endif
2031: .globl _C_LABEL(uvmspace_free),_C_LABEL(kernel_map)
2032: .globl _C_LABEL(uvm_km_free),_C_LABEL(tss_free)
1.6 drochner 2033: /* LINTSTUB: Func: void switch_exit(struct lwp *l, void (*exit)(struct lwp *)) */
1.1 fvdl 2034: ENTRY(switch_exit)
2035: movl 4(%esp),%edi # old process
1.5 thorpej 2036: movl 8(%esp),%eax # exit func
1.1 fvdl 2037: #ifndef MULTIPROCESSOR
1.5 thorpej 2038: movl $_C_LABEL(lwp0),%ebx
2039: movl L_ADDR(%ebx),%esi
2040: movl L_MD_TSS_SEL(%ebx),%edx
1.1 fvdl 2041: #else
2042: movl CPUVAR(IDLE_PCB),%esi
2043: movl CPUVAR(IDLE_TSS_SEL),%edx
2044: #endif
2045: /* In case we fault... */
1.5 thorpej 2046: movl $0,CPUVAR(CURLWP)
1.1 fvdl 2047:
2048: /* Restore the idle context. */
2049: cli
2050:
2051: /* Restore stack pointers. */
2052: movl PCB_ESP(%esi),%esp
2053: movl PCB_EBP(%esi),%ebp
2054:
1.5 thorpej 2055: /* Save exit func. */
2056: pushl %eax
2057:
1.1 fvdl 2058: /* Load TSS info. */
2059: #ifdef MULTIPROCESSOR
2060: movl CPUVAR(GDT),%eax
2061: #else
2062: /* Load TSS info. */
2063: movl _C_LABEL(gdt),%eax
2064: #endif
2065:
2066: /* Switch address space. */
2067: movl PCB_CR3(%esi),%ecx
2068: movl %ecx,%cr3
2069:
2070: /* Switch TSS. */
2071: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
2072: ltr %dx
2073:
2074: /* We're always in the kernel, so we don't need the LDT. */
2075:
2076: /* Restore cr0 (including FPU state). */
2077: movl PCB_CR0(%esi),%ecx
2078: movl %ecx,%cr0
2079:
2080: /* Record new pcb. */
2081: SET_CURPCB(%esi)
2082:
2083: /* Interrupts are okay again. */
2084: sti
2085:
2086: /*
2087: * Schedule the dead process's vmspace and stack to be freed.
2088: */
1.5 thorpej 2089: movl 0(%esp),%eax /* %eax = exit func */
2090: movl %edi,0(%esp) /* {lwp_}exit2(l) */
2091: call *%eax
1.1 fvdl 2092: addl $4,%esp
2093:
2094: /* Jump into cpu_switch() with the right state. */
2095: xorl %esi,%esi
1.5 thorpej 2096: movl %esi,CPUVAR(CURLWP)
1.1 fvdl 2097: jmp idle_start
2098:
2099: /*
2100: * void savectx(struct pcb *pcb);
2101: * Update pcb, saving current processor state.
2102: */
2103: /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
2104: ENTRY(savectx)
2105: movl 4(%esp),%edx # edx = p->p_addr
2106:
2107: /* Save stack pointers. */
2108: movl %esp,PCB_ESP(%edx)
2109: movl %ebp,PCB_EBP(%edx)
2110:
2111: ret
2112:
2113: /*
2114: * Old call gate entry for syscall
2115: */
2116: /* LINTSTUB: Var: char Xosyscall[1]; */
2117: IDTVEC(osyscall)
2118: /* Set eflags in trap frame. */
2119: pushfl
2120: popl 8(%esp)
2121: pushl $7 # size of instruction for restart
2122: jmp syscall1
2123:
2124: /*
2125: * Trap gate entry for syscall
2126: */
2127: /* LINTSTUB: Var: char Xsyscall[1]; */
2128: IDTVEC(syscall)
2129: pushl $2 # size of instruction for restart
2130: syscall1:
2131: pushl $T_ASTFLT # trap # for doing ASTs
2132: INTRENTRY
2133:
2134: #ifdef DIAGNOSTIC
2135: movl CPUVAR(ILEVEL),%ebx
2136: testl %ebx,%ebx
2137: jz 1f
2138: pushl $5f
2139: call _C_LABEL(printf)
2140: addl $4,%esp
2141: #ifdef DDB
2142: int $3
2143: #endif
2144: 1:
2145: #endif /* DIAGNOSTIC */
1.5 thorpej 2146: movl CPUVAR(CURLWP),%edx
2147: movl %esp,L_MD_REGS(%edx) # save pointer to frame
2148: movl L_PROC(%edx),%edx
1.1 fvdl 2149: call *P_MD_SYSCALL(%edx) # get pointer to syscall() function
2150: 2: /* Check for ASTs on exit to user mode. */
2151: cli
1.5 thorpej 2152: CHECK_ASTPENDING(%eax)
1.1 fvdl 2153: je 1f
2154: /* Always returning to user mode here. */
1.5 thorpej 2155: CLEAR_ASTPENDING(%eax)
1.1 fvdl 2156: sti
2157: /* Pushed T_ASTFLT into tf_trapno on entry. */
2158: call _C_LABEL(trap)
2159: jmp 2b
2160: #ifndef DIAGNOSTIC
2161: 1: INTRFASTEXIT
2162: #else /* DIAGNOSTIC */
2163: 1: cmpl $IPL_NONE,CPUVAR(ILEVEL)
2164: jne 3f
2165: INTRFASTEXIT
2166: 3: sti
2167: pushl $4f
2168: call _C_LABEL(printf)
2169: addl $4,%esp
2170: #ifdef DDB
2171: int $3
2172: #endif /* DDB */
2173: movl $IPL_NONE,CPUVAR(ILEVEL)
2174: jmp 2b
2175: 4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL EXIT\n"
2176: 5: .asciz "WARNING: SPL NOT ZERO ON SYSCALL ENTRY\n"
2177: #endif /* DIAGNOSTIC */
2178:
2179: #if NNPX > 0
2180: /*
2181: * Special interrupt handlers. Someday intr0-intr15 will be used to count
2182: * interrupts. We'll still need a special exception 16 handler. The busy
2183: * latch stuff in probintr() can be moved to npxprobe().
2184: */
2185:
2186: /* LINTSTUB: Func: void probeintr(void) */
2187: NENTRY(probeintr)
2188: ss
2189: incl _C_LABEL(npx_intrs_while_probing)
2190: pushl %eax
2191: movb $0x20,%al # EOI (asm in strings loses cpp features)
2192: outb %al,$0xa0 # IO_ICU2
2193: outb %al,$0x20 # IO_ICU1
2194: movb $0,%al
2195: outb %al,$0xf0 # clear BUSY# latch
2196: popl %eax
2197: iret
2198:
2199: /* LINTSTUB: Func: void probetrap(void) */
2200: NENTRY(probetrap)
2201: ss
2202: incl _C_LABEL(npx_traps_while_probing)
2203: fnclex
2204: iret
2205:
2206: /* LINTSTUB: Func: int npx586bug1(int a, int b) */
2207: NENTRY(npx586bug1)
2208: fildl 4(%esp) # x
2209: fildl 8(%esp) # y
2210: fld %st(1)
2211: fdiv %st(1),%st # x/y
2212: fmulp %st,%st(1) # (x/y)*y
2213: fsubrp %st,%st(1) # x-(x/y)*y
2214: pushl $0
2215: fistpl (%esp)
2216: popl %eax
2217: ret
2218: #endif /* NNPX > 0 */
CVSweb <webmaster@jp.NetBSD.org>