Annotation of src/sys/arch/i386/i386/locore.S, Revision 1.10
1.10 ! fvdl 1: /* $NetBSD: locore.S,v 1.9 2003/05/13 08:26:32 fvdl Exp $ */
1.1 fvdl 2:
3: /*-
4: * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Charles M. Hannum.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /*-
40: * Copyright (c) 1990 The Regents of the University of California.
41: * All rights reserved.
42: *
43: * This code is derived from software contributed to Berkeley by
44: * William Jolitz.
45: *
46: * Redistribution and use in source and binary forms, with or without
47: * modification, are permitted provided that the following conditions
48: * are met:
49: * 1. Redistributions of source code must retain the above copyright
50: * notice, this list of conditions and the following disclaimer.
51: * 2. Redistributions in binary form must reproduce the above copyright
52: * notice, this list of conditions and the following disclaimer in the
53: * documentation and/or other materials provided with the distribution.
54: * 3. All advertising materials mentioning features or use of this software
55: * must display the following acknowledgement:
56: * This product includes software developed by the University of
57: * California, Berkeley and its contributors.
58: * 4. Neither the name of the University nor the names of its contributors
59: * may be used to endorse or promote products derived from this software
60: * without specific prior written permission.
61: *
62: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72: * SUCH DAMAGE.
73: *
74: * @(#)locore.s 7.3 (Berkeley) 5/13/91
75: */
76:
77: #include "opt_cputype.h"
78: #include "opt_ddb.h"
79: #include "opt_ipkdb.h"
80: #include "opt_vm86.h"
81: #include "opt_user_ldt.h"
82: #include "opt_dummy_nops.h"
83: #include "opt_compat_oldboot.h"
84: #include "opt_multiprocessor.h"
85: #include "opt_lockdebug.h"
86: #include "opt_realmem.h"
87:
88: #include "npx.h"
89: #include "assym.h"
90: #include "apm.h"
91: #include "lapic.h"
92: #include "ioapic.h"
1.8 fvdl 93: #include "ksyms.h"
1.1 fvdl 94:
95: #include <sys/errno.h>
96: #include <sys/syscall.h>
97:
98: #include <machine/cputypes.h>
99: #include <machine/param.h>
100: #include <machine/pte.h>
101: #include <machine/segments.h>
102: #include <machine/specialreg.h>
103: #include <machine/trap.h>
104: #include <machine/bootinfo.h>
105:
106: #if NLAPIC > 0
107: #include <machine/i82489reg.h>
108: #endif
109:
110: /* LINTSTUB: include <sys/types.h> */
111: /* LINTSTUB: include <machine/cpu.h> */
112: /* LINTSTUB: include <sys/systm.h> */
113:
114: #include <machine/asm.h>
115:
116: #if defined(MULTIPROCESSOR)
117:
1.5 thorpej 118: #define SET_CURLWP(lwp,cpu) \
1.1 fvdl 119: movl CPUVAR(SELF),cpu ; \
1.5 thorpej 120: movl lwp,CPUVAR(CURLWP) ; \
121: movl cpu,L_CPU(lwp)
1.1 fvdl 122:
123: #else
124:
1.5 thorpej 125: #define SET_CURLWP(lwp,tcpu) movl lwp,CPUVAR(CURLWP)
126: #define GET_CURLWP(reg) movl CPUVAR(CURLWP),reg
1.1 fvdl 127:
128: #endif
129:
130: #define GET_CURPCB(reg) movl CPUVAR(CURPCB),reg
131: #define SET_CURPCB(reg) movl reg,CPUVAR(CURPCB)
132:
133: #define CLEAR_RESCHED(reg) movl reg,CPUVAR(RESCHED)
134:
135: /* XXX temporary kluge; these should not be here */
136: /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */
137: #include <dev/isa/isareg.h>
138:
139:
140: /* Disallow old names for REALBASEMEM */
141: #ifdef BIOSBASEMEM
142: #error BIOSBASEMEM option deprecated; use REALBASEMEM only if memory size reported by latest boot block is incorrect
143: #endif
144:
145: /* Disallow old names for REALEXTMEM */
146: #ifdef EXTMEM_SIZE
147: #error EXTMEM_SIZE option deprecated; use REALEXTMEM only if memory size reported by latest boot block is incorrect
148: #endif
149: #ifdef BIOSEXTMEM
150: #error BIOSEXTMEM option deprecated; use REALEXTMEM only if memory size reported by latest boot block is incorrect
151: #endif
152:
153: #include <machine/frameasm.h>
154:
155:
156: #ifdef MULTIPROCESSOR
157: #include <machine/i82489reg.h>
158: #endif
159:
160: /*
161: * PTmap is recursive pagemap at top of virtual address space.
162: * Within PTmap, the page directory can be found (third indirection).
163: *
164: * XXX 4 == sizeof pde
165: */
166: .set _C_LABEL(PTmap),(PDSLOT_PTE << PDSHIFT)
1.7 thorpej 167: .set _C_LABEL(PTD),(_C_LABEL(PTmap) + PDSLOT_PTE * PAGE_SIZE)
1.1 fvdl 168: .set _C_LABEL(PTDpde),(_C_LABEL(PTD) + PDSLOT_PTE * 4)
169:
170: /*
171: * APTmap, APTD is the alternate recursive pagemap.
172: * It's used when modifying another process's page tables.
173: *
174: * XXX 4 == sizeof pde
175: */
176: .set _C_LABEL(APTmap),(PDSLOT_APTE << PDSHIFT)
1.7 thorpej 177: .set _C_LABEL(APTD),(_C_LABEL(APTmap) + PDSLOT_APTE * PAGE_SIZE)
1.1 fvdl 178: .set _C_LABEL(APTDpde),(_C_LABEL(PTD) + PDSLOT_APTE * 4)
179:
180:
181: /*
182: * Initialization
183: */
184: .data
185:
186: .globl _C_LABEL(cpu)
187: .globl _C_LABEL(cpu_feature)
188: .globl _C_LABEL(esym),_C_LABEL(boothowto)
189: .globl _C_LABEL(bootinfo),_C_LABEL(atdevbase)
190: #ifdef COMPAT_OLDBOOT
191: .globl _C_LABEL(bootdev)
192: #endif
193: .globl _C_LABEL(proc0paddr),_C_LABEL(PTDpaddr)
194: .globl _C_LABEL(biosbasemem),_C_LABEL(biosextmem)
195: .globl _C_LABEL(gdt)
196: #ifdef I586_CPU
197: .globl _C_LABEL(idt)
198: #endif
199: .globl _C_LABEL(lapic_tpr)
200:
201: #if NLAPIC > 0
202: #ifdef __ELF__
1.7 thorpej 203: .align PAGE_SIZE
1.1 fvdl 204: #else
205: .align 12
206: #endif
207: .globl _C_LABEL(local_apic), _C_LABEL(lapic_id)
208: _C_LABEL(local_apic):
209: .space LAPIC_ID
210: _C_LABEL(lapic_id):
211: .long 0x00000000
212: .space LAPIC_TPRI-(LAPIC_ID+4)
213: _C_LABEL(lapic_tpr):
214: .space LAPIC_PPRI-LAPIC_TPRI
215: _C_LABEL(lapic_ppr):
216: .space LAPIC_ISR-LAPIC_PPRI
217: _C_LABEL(lapic_isr):
1.7 thorpej 218: .space PAGE_SIZE-LAPIC_ISR
1.1 fvdl 219: #else
220: _C_LABEL(lapic_tpr):
221: .long 0
222: #endif
223:
224:
225: _C_LABEL(cpu): .long 0 # are we 386, 386sx, or 486,
226: # or Pentium, or..
227: _C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid'
228: # instruction
229: _C_LABEL(esym): .long 0 # ptr to end of syms
230: _C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual
231: _C_LABEL(proc0paddr): .long 0
232: _C_LABEL(PTDpaddr): .long 0 # paddr of PTD, for libkvm
233: #ifndef REALBASEMEM
234: _C_LABEL(biosbasemem): .long 0 # base memory reported by BIOS
235: #else
236: _C_LABEL(biosbasemem): .long REALBASEMEM
237: #endif
238: #ifndef REALEXTMEM
239: _C_LABEL(biosextmem): .long 0 # extended memory reported by BIOS
240: #else
241: _C_LABEL(biosextmem): .long REALEXTMEM
242: #endif
243:
244: .space 512
245: tmpstk:
246:
247:
248: #define _RELOC(x) ((x) - KERNBASE)
249: #define RELOC(x) _RELOC(_C_LABEL(x))
250:
251: .text
252: .globl _C_LABEL(kernel_text)
253: .set _C_LABEL(kernel_text),KERNTEXTOFF
254:
255: .globl start
256: start: movw $0x1234,0x472 # warm boot
257:
258: /*
259: * Load parameters from stack
260: * (howto, [bootdev], bootinfo, esym, basemem, extmem).
261: */
262: movl 4(%esp),%eax
263: movl %eax,RELOC(boothowto)
264: #ifdef COMPAT_OLDBOOT
265: movl 8(%esp),%eax
266: movl %eax,RELOC(bootdev)
267: #endif
268: movl 12(%esp),%eax
269:
270: testl %eax, %eax
271: jz 1f
272: movl (%eax), %ebx /* number of entries */
273: movl $RELOC(bootinfo), %edi
274: movl %ebx, (%edi)
275: addl $4, %edi
276: 2:
277: testl %ebx, %ebx
278: jz 1f
279: addl $4, %eax
280: movl (%eax), %ecx /* address of entry */
281: pushl %eax
282: pushl (%ecx) /* len */
283: pushl %ecx
284: pushl %edi
285: addl (%ecx), %edi /* update dest pointer */
286: cmpl $_RELOC(_C_LABEL(bootinfo) + BOOTINFO_MAXSIZE), %edi
287: jg 2f
288: call _C_LABEL(memcpy)
289: addl $12, %esp
290: popl %eax
291: subl $1, %ebx
292: jmp 2b
293: 2: /* cleanup for overflow case */
294: addl $16, %esp
295: movl $RELOC(bootinfo), %edi
296: subl %ebx, (%edi) /* correct number of entries */
297: 1:
298:
299: movl 16(%esp),%eax
300: testl %eax,%eax
301: jz 1f
302: addl $KERNBASE,%eax
303: 1: movl %eax,RELOC(esym)
304:
305: movl RELOC(biosextmem),%eax
306: testl %eax,%eax
307: jnz 1f
308: movl 20(%esp),%eax
309: movl %eax,RELOC(biosextmem)
310: 1:
311: movl RELOC(biosbasemem),%eax
312: testl %eax,%eax
313: jnz 1f
314: movl 24(%esp),%eax
315: movl %eax,RELOC(biosbasemem)
316: 1:
317:
318: /* First, reset the PSL. */
319: pushl $PSL_MBO
320: popfl
321:
322: /* Clear segment registers; always null in proc0. */
323: xorl %eax,%eax
324: movw %ax,%fs
325: movw %ax,%gs
326: decl %eax
327: movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL
328:
329: /* Find out our CPU type. */
330:
331: try386: /* Try to toggle alignment check flag; does not exist on 386. */
332: pushfl
333: popl %eax
334: movl %eax,%ecx
335: orl $PSL_AC,%eax
336: pushl %eax
337: popfl
338: pushfl
339: popl %eax
340: xorl %ecx,%eax
341: andl $PSL_AC,%eax
342: pushl %ecx
343: popfl
344:
345: testl %eax,%eax
346: jnz try486
347:
348: /*
349: * Try the test of a NexGen CPU -- ZF will not change on a DIV
350: * instruction on a NexGen, it will on an i386. Documented in
351: * Nx586 Processor Recognition Application Note, NexGen, Inc.
352: */
353: movl $0x5555,%eax
354: xorl %edx,%edx
355: movl $2,%ecx
356: divl %ecx
357: jnz is386
358:
359: isnx586:
360: /*
361: * Don't try cpuid, as Nx586s reportedly don't support the
362: * PSL_ID bit.
363: */
364: movl $CPU_NX586,RELOC(cpu)
365: jmp 2f
366:
367: is386:
368: movl $CPU_386,RELOC(cpu)
369: jmp 2f
370:
371: try486: /* Try to toggle identification flag; does not exist on early 486s. */
372: pushfl
373: popl %eax
374: movl %eax,%ecx
375: xorl $PSL_ID,%eax
376: pushl %eax
377: popfl
378: pushfl
379: popl %eax
380: xorl %ecx,%eax
381: andl $PSL_ID,%eax
382: pushl %ecx
383: popfl
384:
385: testl %eax,%eax
386: jnz try586
387: is486: movl $CPU_486,RELOC(cpu)
388: /*
389: * Check Cyrix CPU
390: * Cyrix CPUs do not change the undefined flags following
391: * execution of the divide instruction which divides 5 by 2.
392: *
393: * Note: CPUID is enabled on M2, so it passes another way.
394: */
395: pushfl
396: movl $0x5555, %eax
397: xorl %edx, %edx
398: movl $2, %ecx
399: clc
400: divl %ecx
401: jnc trycyrix486
402: popfl
403: jmp 2f
404: trycyrix486:
405: movl $CPU_6x86,RELOC(cpu) # set CPU type
406: /*
407: * Check for Cyrix 486 CPU by seeing if the flags change during a
408: * divide. This is documented in the Cx486SLC/e SMM Programmer's
409: * Guide.
410: */
411: xorl %edx,%edx
412: cmpl %edx,%edx # set flags to known state
413: pushfl
414: popl %ecx # store flags in ecx
415: movl $-1,%eax
416: movl $4,%ebx
417: divl %ebx # do a long division
418: pushfl
419: popl %eax
420: xorl %ecx,%eax # are the flags different?
421: testl $0x8d5,%eax # only check C|PF|AF|Z|N|V
422: jne 2f # yes; must be Cyrix 6x86 CPU
423: movl $CPU_486DLC,RELOC(cpu) # set CPU type
424:
425: #ifndef CYRIX_CACHE_WORKS
426: /* Disable caching of the ISA hole only. */
427: invd
428: movb $CCR0,%al # Configuration Register index (CCR0)
429: outb %al,$0x22
430: inb $0x23,%al
431: orb $(CCR0_NC1|CCR0_BARB),%al
432: movb %al,%ah
433: movb $CCR0,%al
434: outb %al,$0x22
435: movb %ah,%al
436: outb %al,$0x23
437: invd
438: #else /* CYRIX_CACHE_WORKS */
439: /* Set cache parameters */
440: invd # Start with guaranteed clean cache
441: movb $CCR0,%al # Configuration Register index (CCR0)
442: outb %al,$0x22
443: inb $0x23,%al
444: andb $~CCR0_NC0,%al
445: #ifndef CYRIX_CACHE_REALLY_WORKS
446: orb $(CCR0_NC1|CCR0_BARB),%al
447: #else
448: orb $CCR0_NC1,%al
449: #endif
450: movb %al,%ah
451: movb $CCR0,%al
452: outb %al,$0x22
453: movb %ah,%al
454: outb %al,$0x23
455: /* clear non-cacheable region 1 */
456: movb $(NCR1+2),%al
457: outb %al,$0x22
458: movb $NCR_SIZE_0K,%al
459: outb %al,$0x23
460: /* clear non-cacheable region 2 */
461: movb $(NCR2+2),%al
462: outb %al,$0x22
463: movb $NCR_SIZE_0K,%al
464: outb %al,$0x23
465: /* clear non-cacheable region 3 */
466: movb $(NCR3+2),%al
467: outb %al,$0x22
468: movb $NCR_SIZE_0K,%al
469: outb %al,$0x23
470: /* clear non-cacheable region 4 */
471: movb $(NCR4+2),%al
472: outb %al,$0x22
473: movb $NCR_SIZE_0K,%al
474: outb %al,$0x23
475: /* enable caching in CR0 */
476: movl %cr0,%eax
477: andl $~(CR0_CD|CR0_NW),%eax
478: movl %eax,%cr0
479: invd
480: #endif /* CYRIX_CACHE_WORKS */
481:
482: jmp 2f
483:
484: try586: /* Use the `cpuid' instruction. */
485: xorl %eax,%eax
486: cpuid
487: movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL
488:
489: 2:
490: /*
491: * Finished with old stack; load new %esp now instead of later so we
492: * can trace this code without having to worry about the trace trap
493: * clobbering the memory test or the zeroing of the bss+bootstrap page
494: * tables.
495: *
496: * The boot program should check:
497: * text+data <= &stack_variable - more_space_for_stack
498: * text+data+bss+pad+space_for_page_tables <= end_of_memory
499: * Oops, the gdt is in the carcass of the boot program so clearing
500: * the rest of memory is still not possible.
501: */
502: movl $_RELOC(tmpstk),%esp # bootstrap stack end location
503:
504: /*
505: * Virtual address space of kernel:
506: *
507: * text | data | bss | [syms] | page dir | proc0 kstack
508: * 0 1 2 3
509: */
1.7 thorpej 510: #define PROC0PDIR ((0) * PAGE_SIZE)
511: #define PROC0STACK ((1) * PAGE_SIZE)
512: #define SYSMAP ((1+UPAGES) * PAGE_SIZE)
513: #define TABLESIZE ((1+UPAGES) * PAGE_SIZE) /* + nkpde * PAGE_SIZE */
1.1 fvdl 514:
515: /* Find end of kernel image. */
516: movl $RELOC(end),%edi
1.8 fvdl 517: #if (NKSYMS || defined(DDB) || defined(LKM)) && !defined(SYMTAB_SPACE)
1.1 fvdl 518: /* Save the symbols (if loaded). */
519: movl RELOC(esym),%eax
520: testl %eax,%eax
521: jz 1f
522: subl $KERNBASE,%eax
523: movl %eax,%edi
524: 1:
525: #endif
526:
527: /* Calculate where to start the bootstrap tables. */
528: movl %edi,%esi # edi = esym ? esym : end
529: addl $PGOFSET,%esi # page align up
530: andl $~PGOFSET,%esi
531:
532: /*
533: * Calculate the size of the kernel page table directory, and
534: * how many entries it will have.
535: */
536: movl RELOC(nkpde),%ecx # get nkpde
537: cmpl $NKPTP_MIN,%ecx # larger than min?
538: jge 1f
539: movl $NKPTP_MIN,%ecx # set at min
540: jmp 2f
541: 1: cmpl $NKPTP_MAX,%ecx # larger than max?
542: jle 2f
543: movl $NKPTP_MAX,%ecx
544: 2:
545:
546: /* Clear memory for bootstrap tables. */
547: shll $PGSHIFT,%ecx
548: addl $TABLESIZE,%ecx
549: addl %esi,%ecx # end of tables
550: subl %edi,%ecx # size of tables
551: shrl $2,%ecx
552: xorl %eax,%eax
553: cld
554: rep
555: stosl
556:
557: /*
558: * fillkpt
559: * eax = pte (page frame | control | status)
560: * ebx = page table address
561: * ecx = number of pages to map
562: */
563: #define fillkpt \
564: 1: movl %eax,(%ebx) ; \
1.7 thorpej 565: addl $PAGE_SIZE,%eax ; /* increment physical address */ \
1.1 fvdl 566: addl $4,%ebx ; /* next pte */ \
567: loop 1b ;
568:
569: /*
570: * Build initial page tables.
571: */
572: /* Calculate end of text segment, rounded to a page. */
573: leal (RELOC(etext)+PGOFSET),%edx
574: andl $~PGOFSET,%edx
575:
576: /* Skip over the first 1MB. */
577: movl $_RELOC(KERNTEXTOFF),%eax
578: movl %eax,%ecx
579: shrl $PGSHIFT,%ecx
580: leal (SYSMAP)(%esi,%ecx,4),%ebx
581:
582: /* Map the kernel text read-only. */
583: movl %edx,%ecx
584: subl %eax,%ecx
585: shrl $PGSHIFT,%ecx
586: orl $(PG_V|PG_KR),%eax
587: fillkpt
588:
589: /* Map the data, BSS, and bootstrap tables read-write. */
590: leal (PG_V|PG_KW)(%edx),%eax
591: movl RELOC(nkpde),%ecx
592: shll $PGSHIFT,%ecx
593: addl $TABLESIZE,%ecx
594: addl %esi,%ecx # end of tables
595: subl %edx,%ecx # subtract end of text
596: shrl $PGSHIFT,%ecx
597: fillkpt
598:
599: /* Map ISA I/O memory. */
600: movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set
601: movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s,
602: fillkpt
603:
604: /*
605: * Construct a page table directory.
606: */
607: /* Install PDEs for temporary double map of kernel. */
608: movl RELOC(nkpde),%ecx # for this many pde s,
609: leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
610: leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0,
611: fillkpt
612:
613: /* Map kernel PDEs. */
614: movl RELOC(nkpde),%ecx # for this many pde s,
615: leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # kernel pde offset
616: leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0,
617: fillkpt
618:
619: /* Install a PDE recursively mapping page directory as a page table! */
620: leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd
621: movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot
622:
623: /* Save phys. addr of PTD, for libkvm. */
624: movl %esi,RELOC(PTDpaddr)
625:
626: /* Load base of page directory and enable mapping. */
627: movl %esi,%eax # phys address of ptd in proc 0
628: movl %eax,%cr3 # load ptd addr into mmu
629: movl %cr0,%eax # get control word
630: # enable paging & NPX emulation
631: orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax
632: movl %eax,%cr0 # and let's page NOW!
633:
634: pushl $begin # jump to high mem
635: ret
636:
637: begin:
638: /* Now running relocated at KERNBASE. Remove double mapping. */
639: movl _C_LABEL(nkpde),%ecx # for this many pde s,
640: leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps!
641: addl $(KERNBASE), %ebx # now use relocated address
642: 1: movl $0,(%ebx)
643: addl $4,%ebx # next pde
644: loop 1b
645:
646: /* Relocate atdevbase. */
647: movl _C_LABEL(nkpde),%edx
648: shll $PGSHIFT,%edx
649: addl $(TABLESIZE+KERNBASE),%edx
650: addl %esi,%edx
651: movl %edx,_C_LABEL(atdevbase)
652:
653: /* Set up bootstrap stack. */
654: leal (PROC0STACK+KERNBASE)(%esi),%eax
655: movl %eax,_C_LABEL(proc0paddr)
656: leal (USPACE-FRAMESIZE)(%eax),%esp
657: movl %esi,PCB_CR3(%eax) # pcb->pcb_cr3
658: xorl %ebp,%ebp # mark end of frames
659:
660: subl $NGDT*8, %esp # space for temporary gdt
661: pushl %esp
662: call _C_LABEL(initgdt)
663: addl $4,%esp
664:
665: movl _C_LABEL(nkpde),%eax
666: shll $PGSHIFT,%eax
667: addl $TABLESIZE,%eax
668: addl %esi,%eax # skip past stack and page tables
669:
670: pushl %eax
671: call _C_LABEL(init386) # wire 386 chip for unix operation
672: addl $4+NGDT*8,%esp # pop temporary gdt
673:
674: #ifdef SAFARI_FIFO_HACK
675: movb $5,%al
676: movw $0x37b,%dx
677: outb %al,%dx
678: movw $0x37f,%dx
679: inb %dx,%al
680: movb %al,%cl
681:
682: orb $1,%cl
683:
684: movb $5,%al
685: movw $0x37b,%dx
686: outb %al,%dx
687: movw $0x37f,%dx
688: movb %cl,%al
689: outb %al,%dx
690: #endif /* SAFARI_FIFO_HACK */
691:
692: call _C_LABEL(main)
693:
694: /*
695: * void proc_trampoline(void);
696: * This is a trampoline function pushed onto the stack of a newly created
697: * process in order to do some additional setup. The trampoline is entered by
698: * cpu_switch()ing to the process, so we abuse the callee-saved registers used
699: * by cpu_switch() to store the information about the stub to call.
700: * NOTE: This function does not have a normal calling sequence!
701: */
702: /* LINTSTUB: Func: void proc_trampoline(void) */
703: NENTRY(proc_trampoline)
704: #ifdef MULTIPROCESSOR
705: call _C_LABEL(proc_trampoline_mp)
706: #endif
707: movl $IPL_NONE,CPUVAR(ILEVEL)
708: pushl %ebx
709: call *%esi
710: addl $4,%esp
711: INTRFASTEXIT
712: /* NOTREACHED */
713:
714: /*****************************************************************************/
715:
716: /*
717: * Signal trampoline; copied to top of user stack.
718: */
719: /* LINTSTUB: Var: char sigcode[1], esigcode[1]; */
720: NENTRY(sigcode)
721: /*
722: * Handler has returned here as if we called it. The sigcontext
723: * is on the stack after the 3 args "we" pushed.
724: */
725: leal 12(%esp),%eax # get pointer to sigcontext
726: movl %eax,4(%esp) # put it in the argument slot
727: # fake return address already there
728: movl $SYS___sigreturn14,%eax
729: int $0x80 # enter kernel with args on stack
730: movl $SYS_exit,%eax
731: int $0x80 # exit if sigreturn fails
732: .globl _C_LABEL(esigcode)
733: _C_LABEL(esigcode):
734:
735: /*****************************************************************************/
736:
737: /*
738: * The following primitives are used to fill and copy regions of memory.
739: */
740:
741: /*
742: * XXX No section 9 man page for fillw.
743: * fillw seems to be very sparsely used (only in pccons it seems.)
744: * One wonders if it couldn't be done without.
745: * -- Perry Metzger, May 7, 2001
746: */
747: /*
748: * void fillw(short pattern, void *addr, size_t len);
749: * Write len copies of pattern at addr.
750: */
751: /* LINTSTUB: Func: void fillw(short pattern, void *addr, size_t len) */
752: ENTRY(fillw)
753: pushl %edi
754: movl 8(%esp),%eax
755: movl 12(%esp),%edi
756: movw %ax,%cx
757: rorl $16,%eax
758: movw %cx,%ax
759: cld
760: movl 16(%esp),%ecx
761: shrl %ecx # do longwords
762: rep
763: stosl
764: movl 16(%esp),%ecx
765: andl $1,%ecx # do remainder
766: rep
767: stosw
768: popl %edi
769: ret
770:
771: /*
772: * int kcopy(const void *from, void *to, size_t len);
773: * Copy len bytes, abort on fault.
774: */
775: /* LINTSTUB: Func: int kcopy(const void *from, void *to, size_t len) */
776: ENTRY(kcopy)
777: pushl %esi
778: pushl %edi
779: GET_CURPCB(%eax) # load curpcb into eax and set on-fault
780: pushl PCB_ONFAULT(%eax)
781: movl $_C_LABEL(copy_fault), PCB_ONFAULT(%eax)
782:
783: movl 16(%esp),%esi
784: movl 20(%esp),%edi
785: movl 24(%esp),%ecx
786: movl %edi,%eax
787: subl %esi,%eax
788: cmpl %ecx,%eax # overlapping?
789: jb 1f
790: cld # nope, copy forward
791: shrl $2,%ecx # copy by 32-bit words
792: rep
793: movsl
794: movl 24(%esp),%ecx
795: andl $3,%ecx # any bytes left?
796: rep
797: movsb
798:
799: GET_CURPCB(%edx) # XXX save curpcb?
800: popl PCB_ONFAULT(%edx)
801: popl %edi
802: popl %esi
803: xorl %eax,%eax
804: ret
805:
806: ALIGN_TEXT
807: 1: addl %ecx,%edi # copy backward
808: addl %ecx,%esi
809: std
810: andl $3,%ecx # any fractional bytes?
811: decl %edi
812: decl %esi
813: rep
814: movsb
815: movl 24(%esp),%ecx # copy remainder by 32-bit words
816: shrl $2,%ecx
817: subl $3,%esi
818: subl $3,%edi
819: rep
820: movsl
821: cld
822:
823: GET_CURPCB(%edx)
824: popl PCB_ONFAULT(%edx)
825: popl %edi
826: popl %esi
827: xorl %eax,%eax
828: ret
829:
830: /*****************************************************************************/
831:
832: /*
833: * The following primitives are used to copy data in and out of the user's
834: * address space.
835: */
836:
837: /*
838: * Default to the lowest-common-denominator. We will improve it
839: * later.
840: */
841: #if defined(I386_CPU)
842: #define DEFAULT_COPYOUT _C_LABEL(i386_copyout)
843: #define DEFAULT_COPYIN _C_LABEL(i386_copyin)
844: #elif defined(I486_CPU)
845: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout)
846: #define DEFAULT_COPYIN _C_LABEL(i386_copyin)
847: #elif defined(I586_CPU)
848: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) /* XXX */
849: #define DEFAULT_COPYIN _C_LABEL(i386_copyin) /* XXX */
850: #elif defined(I686_CPU)
851: #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) /* XXX */
852: #define DEFAULT_COPYIN _C_LABEL(i386_copyin) /* XXX */
853: #endif
854:
855: .data
856:
857: .globl _C_LABEL(copyout_func)
858: _C_LABEL(copyout_func):
859: .long DEFAULT_COPYOUT
860:
861: .globl _C_LABEL(copyin_func)
862: _C_LABEL(copyin_func):
863: .long DEFAULT_COPYIN
864:
865: .text
866:
867: /*
868: * int copyout(const void *from, void *to, size_t len);
869: * Copy len bytes into the user's address space.
870: * see copyout(9)
871: */
872: /* LINTSTUB: Func: int copyout(const void *kaddr, void *uaddr, size_t len) */
873: ENTRY(copyout)
874: jmp *_C_LABEL(copyout_func)
875:
876: #if defined(I386_CPU)
877: /* LINTSTUB: Func: int i386_copyout(const void *kaddr, void *uaddr, size_t len) */
878: ENTRY(i386_copyout)
879: pushl %esi
880: pushl %edi
881: pushl $0
882:
883: movl 16(%esp),%esi
884: movl 20(%esp),%edi
885: movl 24(%esp),%eax
886:
887: /*
888: * We check that the end of the destination buffer is not past the end
889: * of the user's address space. If it's not, then we only need to
890: * check that each page is writable. The 486 will do this for us; the
891: * 386 will not. (We assume that pages in user space that are not
892: * writable by the user are not writable by the kernel either.)
893: */
894: movl %edi,%edx
895: addl %eax,%edx
896: jc _C_LABEL(copy_efault)
897: cmpl $VM_MAXUSER_ADDRESS,%edx
898: ja _C_LABEL(copy_efault)
899:
900: testl %eax,%eax # anything to do?
901: jz 3f
902:
903: /*
904: * We have to check each PTE for (write) permission, since the CPU
905: * doesn't do it for us.
906: */
907:
908: /* Compute number of pages. */
909: movl %edi,%ecx
910: andl $PGOFSET,%ecx
911: addl %eax,%ecx
912: decl %ecx
913: shrl $PGSHIFT,%ecx
914:
915: /* Compute PTE offset for start address. */
916: shrl $PGSHIFT,%edi
917:
918: GET_CURPCB(%edx)
919: movl $2f,PCB_ONFAULT(%edx)
920:
921: 1: /* Check PTE for each page. */
922: testb $PG_RW,_C_LABEL(PTmap)(,%edi,4)
923: jz 2f
924:
925: 4: incl %edi
926: decl %ecx
927: jns 1b
928:
929: movl 20(%esp),%edi
930: movl 24(%esp),%eax
931: jmp 3f
932:
933: 2: /* Simulate a trap. */
934: pushl %ecx
935: movl %edi,%eax
936: shll $PGSHIFT,%eax
937: pushl %eax
938: call _C_LABEL(trapwrite) # trapwrite(addr)
939: addl $4,%esp # pop argument
940: popl %ecx
941: testl %eax,%eax # if not ok, return EFAULT
942: jz 4b
943: jmp _C_LABEL(copy_efault)
944:
945: 3: GET_CURPCB(%edx)
946: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx)
947:
948: /* bcopy(%esi, %edi, %eax); */
949: cld
950: movl %eax,%ecx
951: shrl $2,%ecx
952: rep
953: movsl
954: movl %eax,%ecx
955: andl $3,%ecx
956: rep
957: movsb
958:
959: popl PCB_ONFAULT(%edx)
960: popl %edi
961: popl %esi
962: xorl %eax,%eax
963: ret
964: #endif /* I386_CPU */
965:
966: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
967: /* LINTSTUB: Func: int i486_copyout(const void *kaddr, void *uaddr, size_t len) */
968: ENTRY(i486_copyout)
969: pushl %esi
970: pushl %edi
971: pushl $0
972:
973: movl 16(%esp),%esi
974: movl 20(%esp),%edi
975: movl 24(%esp),%eax
976:
977: /*
978: * We check that the end of the destination buffer is not past the end
979: * of the user's address space.
980: */
981: movl %edi,%edx
982: addl %eax,%edx
983: jc _C_LABEL(copy_efault)
984: cmpl $VM_MAXUSER_ADDRESS,%edx
985: ja _C_LABEL(copy_efault)
986:
987: GET_CURPCB(%edx)
988: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx)
989:
990: /* bcopy(%esi, %edi, %eax); */
991: cld
992: movl %eax,%ecx
993: shrl $2,%ecx
994: rep
995: movsl
996: movl %eax,%ecx
997: andl $3,%ecx
998: rep
999: movsb
1000:
1001: popl PCB_ONFAULT(%edx)
1002: popl %edi
1003: popl %esi
1004: xorl %eax,%eax
1005: ret
1006: #endif /* I486_CPU || I586_CPU || I686_CPU */
1007:
1008: /*
1009: * int copyin(const void *from, void *to, size_t len);
1010: * Copy len bytes from the user's address space.
1011: * see copyin(9)
1012: */
1013: /* LINTSTUB: Func: int copyin(const void *uaddr, void *kaddr, size_t len) */
1014: ENTRY(copyin)
1015: jmp *_C_LABEL(copyin_func)
1016:
1017: #if defined(I386_CPU) || defined(I486_CPU) || defined(I586_CPU) || \
1018: defined(I686_CPU)
1019: /* LINTSTUB: Func: int i386_copyin(const void *uaddr, void *kaddr, size_t len) */
1020: ENTRY(i386_copyin)
1021: pushl %esi
1022: pushl %edi
1023: GET_CURPCB(%eax)
1024: pushl $0
1025: movl $_C_LABEL(copy_fault),PCB_ONFAULT(%eax)
1026:
1027: movl 16(%esp),%esi
1028: movl 20(%esp),%edi
1029: movl 24(%esp),%eax
1030:
1031: /*
1032: * We check that the end of the destination buffer is not past the end
1033: * of the user's address space. If it's not, then we only need to
1034: * check that each page is readable, and the CPU will do that for us.
1035: */
1036: movl %esi,%edx
1037: addl %eax,%edx
1038: jc _C_LABEL(copy_efault)
1039: cmpl $VM_MAXUSER_ADDRESS,%edx
1040: ja _C_LABEL(copy_efault)
1041:
1042: /* bcopy(%esi, %edi, %eax); */
1043: cld
1044: movl %eax,%ecx
1045: shrl $2,%ecx
1046: rep
1047: movsl
1048: movl %eax,%ecx
1049: andl $3,%ecx
1050: rep
1051: movsb
1052:
1053: GET_CURPCB(%edx)
1054: popl PCB_ONFAULT(%edx)
1055: popl %edi
1056: popl %esi
1057: xorl %eax,%eax
1058: ret
1059: #endif /* I386_CPU || I486_CPU || I586_CPU || I686_CPU */
1060:
1061: /* LINTSTUB: Ignore */
1062: NENTRY(copy_efault)
1063: movl $EFAULT,%eax
1064:
1065: /* LINTSTUB: Ignore */
1066: NENTRY(copy_fault)
1067: GET_CURPCB(%edx)
1068: popl PCB_ONFAULT(%edx)
1069: popl %edi
1070: popl %esi
1071: ret
1072:
1073: /*
1074: * int copyoutstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1075: * Copy a NUL-terminated string, at most maxlen characters long, into the
1076: * user's address space. Return the number of characters copied (including the
1077: * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else
1078: * return 0 or EFAULT.
1079: * see copyoutstr(9)
1080: */
1081: /* LINTSTUB: Func: int copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) */
1082: ENTRY(copyoutstr)
1083: pushl %esi
1084: pushl %edi
1085:
1086: movl 12(%esp),%esi # esi = from
1087: movl 16(%esp),%edi # edi = to
1088: movl 20(%esp),%edx # edx = maxlen
1089:
1090: #if defined(I386_CPU)
1091: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1092: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1093: jne 5f
1094: #endif /* I486_CPU || I586_CPU || I686_CPU */
1095:
1096: /* Compute number of bytes in first page. */
1097: movl %edi,%eax
1098: andl $PGOFSET,%eax
1.7 thorpej 1099: movl $PAGE_SIZE,%ecx
1100: subl %eax,%ecx # ecx = PAGE_SIZE - (src % PAGE_SIZE)
1.1 fvdl 1101:
1102: GET_CURPCB(%eax)
1103: movl $6f,PCB_ONFAULT(%eax)
1104:
1105: 1: /*
1106: * Once per page, check that we are still within the bounds of user
1107: * space, and check for a write fault.
1108: */
1109: cmpl $VM_MAXUSER_ADDRESS,%edi
1110: jae _C_LABEL(copystr_efault)
1111:
1112: /* Compute PTE offset. */
1113: movl %edi,%eax
1114: shrl $PGSHIFT,%eax # calculate pte address
1115:
1116: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1117: jnz 2f
1118:
1119: 6: /* Simulate a trap. */
1120: pushl %edx
1121: pushl %edi
1122: call _C_LABEL(trapwrite) # trapwrite(addr)
1123: addl $4,%esp # clear argument from stack
1124: popl %edx
1125: testl %eax,%eax
1126: jnz _C_LABEL(copystr_efault)
1127:
1128: 2: /* Copy up to end of this page. */
1129: subl %ecx,%edx # predecrement total count
1130: jnc 3f
1131: addl %edx,%ecx # ecx += (edx - ecx) = edx
1132: xorl %edx,%edx
1133:
1134: 3: decl %ecx
1135: js 4f
1136: lodsb
1137: stosb
1138: testb %al,%al
1139: jnz 3b
1140:
1141: /* Success -- 0 byte reached. */
1142: addl %ecx,%edx # add back residual for this page
1143: xorl %eax,%eax
1144: jmp copystr_return
1145:
1146: 4: /* Go to next page, if any. */
1.7 thorpej 1147: movl $PAGE_SIZE,%ecx
1.1 fvdl 1148: testl %edx,%edx
1149: jnz 1b
1150:
1151: /* edx is zero -- return ENAMETOOLONG. */
1152: movl $ENAMETOOLONG,%eax
1153: jmp copystr_return
1154: #endif /* I386_CPU */
1155:
1156: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1157: 5: GET_CURPCB(%eax)
1158: movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%eax)
1159: /*
1160: * Get min(%edx, VM_MAXUSER_ADDRESS-%edi).
1161: */
1162: movl $VM_MAXUSER_ADDRESS,%eax
1163: subl %edi,%eax
1164: cmpl %edx,%eax
1165: jae 1f
1166: movl %eax,%edx
1167: movl %eax,20(%esp)
1168:
1169: 1: incl %edx
1170: cld
1171:
1172: 1: decl %edx
1173: jz 2f
1174: lodsb
1175: stosb
1176: testb %al,%al
1177: jnz 1b
1178:
1179: /* Success -- 0 byte reached. */
1180: decl %edx
1181: xorl %eax,%eax
1182: jmp copystr_return
1183:
1184: 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
1185: cmpl $VM_MAXUSER_ADDRESS,%edi
1186: jae _C_LABEL(copystr_efault)
1187: movl $ENAMETOOLONG,%eax
1188: jmp copystr_return
1189: #endif /* I486_CPU || I586_CPU || I686_CPU */
1190:
1191: /*
1192: * int copyinstr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1193: * Copy a NUL-terminated string, at most maxlen characters long, from the
1194: * user's address space. Return the number of characters copied (including the
1195: * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else
1196: * return 0 or EFAULT.
1197: * see copyinstr(9)
1198: */
1199: /* LINTSTUB: Func: int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) */
1200: ENTRY(copyinstr)
1201: pushl %esi
1202: pushl %edi
1203: GET_CURPCB(%ecx)
1204: movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%ecx)
1205:
1206: movl 12(%esp),%esi # %esi = from
1207: movl 16(%esp),%edi # %edi = to
1208: movl 20(%esp),%edx # %edx = maxlen
1209:
1210: /*
1211: * Get min(%edx, VM_MAXUSER_ADDRESS-%esi).
1212: */
1213: movl $VM_MAXUSER_ADDRESS,%eax
1214: subl %esi,%eax
1215: cmpl %edx,%eax
1216: jae 1f
1217: movl %eax,%edx
1218: movl %eax,20(%esp)
1219:
1220: 1: incl %edx
1221: cld
1222:
1223: 1: decl %edx
1224: jz 2f
1225: lodsb
1226: stosb
1227: testb %al,%al
1228: jnz 1b
1229:
1230: /* Success -- 0 byte reached. */
1231: decl %edx
1232: xorl %eax,%eax
1233: jmp copystr_return
1234:
1235: 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */
1236: cmpl $VM_MAXUSER_ADDRESS,%esi
1237: jae _C_LABEL(copystr_efault)
1238: movl $ENAMETOOLONG,%eax
1239: jmp copystr_return
1240:
1241: /* LINTSTUB: Ignore */
1242: NENTRY(copystr_efault)
1243: movl $EFAULT,%eax
1244:
1245: /* LINTSTUB: Ignore */
1246: NENTRY(copystr_fault)
1247: copystr_return:
1248: /* Set *lencopied and return %eax. */
1249: GET_CURPCB(%ecx)
1250: movl $0,PCB_ONFAULT(%ecx)
1251: movl 20(%esp),%ecx
1252: subl %edx,%ecx
1253: movl 24(%esp),%edx
1254: testl %edx,%edx
1255: jz 8f
1256: movl %ecx,(%edx)
1257:
1258: 8: popl %edi
1259: popl %esi
1260: ret
1261:
1262: /*
1263: * int copystr(const void *from, void *to, size_t maxlen, size_t *lencopied);
1264: * Copy a NUL-terminated string, at most maxlen characters long. Return the
1265: * number of characters copied (including the NUL) in *lencopied. If the
1266: * string is too long, return ENAMETOOLONG; else return 0.
1267: * see copystr(9)
1268: */
1269: /* LINTSTUB: Func: int copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done) */
1270: ENTRY(copystr)
1271: pushl %esi
1272: pushl %edi
1273:
1274: movl 12(%esp),%esi # esi = from
1275: movl 16(%esp),%edi # edi = to
1276: movl 20(%esp),%edx # edx = maxlen
1277: incl %edx
1278: cld
1279:
1280: 1: decl %edx
1281: jz 4f
1282: lodsb
1283: stosb
1284: testb %al,%al
1285: jnz 1b
1286:
1287: /* Success -- 0 byte reached. */
1288: decl %edx
1289: xorl %eax,%eax
1290: jmp 6f
1291:
1292: 4: /* edx is zero -- return ENAMETOOLONG. */
1293: movl $ENAMETOOLONG,%eax
1294:
1295: 6: /* Set *lencopied and return %eax. */
1296: movl 20(%esp),%ecx
1297: subl %edx,%ecx
1298: movl 24(%esp),%edx
1299: testl %edx,%edx
1300: jz 7f
1301: movl %ecx,(%edx)
1302:
1303: 7: popl %edi
1304: popl %esi
1305: ret
1306:
1307: /*
1308: * long fuword(const void *uaddr);
1309: * Fetch an int from the user's address space.
1310: * see fuword(9)
1311: */
1312: /* LINTSTUB: Func: long fuword(const void *base) */
1313: ENTRY(fuword)
1314: movl 4(%esp),%edx
1315: cmpl $VM_MAXUSER_ADDRESS-4,%edx
1316: ja _C_LABEL(fusuaddrfault)
1317: GET_CURPCB(%ecx)
1318: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1319: movl (%edx),%eax
1320: movl $0,PCB_ONFAULT(%ecx)
1321: ret
1322:
1323: /*
1324: * int fusword(const void *uaddr);
1325: * Fetch a short from the user's address space.
1326: * see fusword(9)
1327: */
1328: /* LINTSTUB: Func: int fusword(const void *base) */
1329: ENTRY(fusword)
1330: movl 4(%esp),%edx
1331: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1332: ja _C_LABEL(fusuaddrfault)
1333: GET_CURPCB(%ecx)
1334: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1335: movzwl (%edx),%eax
1336: movl $0,PCB_ONFAULT(%ecx)
1337: ret
1338:
1339: /*
1340: * int fuswintr(const void *uaddr);
1341: * Fetch a short from the user's address space. Can be called during an
1342: * interrupt.
1343: * see fuswintr(9)
1344: */
1345: /* LINTSTUB: Func: int fuswintr(const void *base) */
1346: ENTRY(fuswintr)
1347: movl 4(%esp),%edx
1348: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1349: ja _C_LABEL(fusuaddrfault)
1.5 thorpej 1350: movl CPUVAR(CURLWP),%ecx
1351: movl L_ADDR(%ecx),%ecx
1.1 fvdl 1352: movl $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)
1353: movzwl (%edx),%eax
1354: movl $0,PCB_ONFAULT(%ecx)
1355: ret
1356:
1357: /*
1358: * int fubyte(const void *uaddr);
1359: * Fetch a byte from the user's address space.
1360: * see fubyte(9)
1361: */
1362: /* LINTSTUB: Func: int fubyte(const void *base) */
1363: ENTRY(fubyte)
1364: movl 4(%esp),%edx
1365: cmpl $VM_MAXUSER_ADDRESS-1,%edx
1366: ja _C_LABEL(fusuaddrfault)
1367: GET_CURPCB(%ecx)
1368: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1369: movzbl (%edx),%eax
1370: movl $0,PCB_ONFAULT(%ecx)
1371: ret
1372:
1373: /*
1374: * Handle faults from [fs]u*(). Clean up and return -1.
1375: */
1376: /* LINTSTUB: Ignore */
1377: NENTRY(fusufault)
1378: movl $0,PCB_ONFAULT(%ecx)
1379: movl $-1,%eax
1380: ret
1381:
1382: /*
1383: * Handle faults from [fs]u*(). Clean up and return -1. This differs from
1384: * fusufault() in that trap() will recognize it and return immediately rather
1385: * than trying to page fault.
1386: */
1387: /* LINTSTUB: Ignore */
1388: NENTRY(fusubail)
1389: movl $0,PCB_ONFAULT(%ecx)
1390: movl $-1,%eax
1391: ret
1392:
1393: /*
1394: * Handle earlier faults from [fs]u*(), due to our of range addresses.
1395: */
1396: /* LINTSTUB: Ignore */
1397: NENTRY(fusuaddrfault)
1398: movl $-1,%eax
1399: ret
1400:
1401: /*
1402: * int suword(void *uaddr, long x);
1403: * Store an int in the user's address space.
1404: * see suword(9)
1405: */
1406: /* LINTSTUB: Func: int suword(void *base, long c) */
1407: ENTRY(suword)
1408: movl 4(%esp),%edx
1409: cmpl $VM_MAXUSER_ADDRESS-4,%edx
1410: ja _C_LABEL(fusuaddrfault)
1411:
1412: #if defined(I386_CPU)
1413: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1414: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1415: jne 2f
1416: #endif /* I486_CPU || I586_CPU || I686_CPU */
1417:
1418: GET_CURPCB(%eax)
1419: movl $3f,PCB_ONFAULT(%eax)
1420:
1421: movl %edx,%eax
1422: shrl $PGSHIFT,%eax # calculate pte address
1423: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1424: jnz 1f
1425:
1426: 3: /* Simulate a trap. */
1427: pushl %edx
1428: pushl %edx
1429: call _C_LABEL(trapwrite) # trapwrite(addr)
1430: addl $4,%esp # clear parameter from the stack
1431: popl %edx
1432: GET_CURPCB(%ecx)
1433: testl %eax,%eax
1434: jnz _C_LABEL(fusufault)
1435:
1436: 1: /* XXX also need to check the following 3 bytes for validity! */
1437: #endif
1438:
1439: 2: GET_CURPCB(%ecx)
1440: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1441:
1442: movl 8(%esp),%eax
1443: movl %eax,(%edx)
1444: xorl %eax,%eax
1445: movl %eax,PCB_ONFAULT(%ecx)
1446: ret
1447:
1448: /*
1449: * int susword(void *uaddr, short x);
1450: * Store a short in the user's address space.
1451: * see susword(9)
1452: */
1453: /* LINTSTUB: Func: int susword(void *base, short c) */
1454: ENTRY(susword)
1455: movl 4(%esp),%edx
1456: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1457: ja _C_LABEL(fusuaddrfault)
1458:
1459: #if defined(I386_CPU)
1460: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1461: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1462: jne 2f
1463: #endif /* I486_CPU || I586_CPU || I686_CPU */
1464:
1465: GET_CURPCB(%eax)
1466: movl $3f,PCB_ONFAULT(%eax)
1467:
1468: movl %edx,%eax
1469: shrl $PGSHIFT,%eax # calculate pte address
1470: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1471: jnz 1f
1472:
1473: 3: /* Simulate a trap. */
1474: pushl %edx
1475: pushl %edx
1476: call _C_LABEL(trapwrite) # trapwrite(addr)
1477: addl $4,%esp # clear parameter from the stack
1478: popl %edx
1479: GET_CURPCB(%ecx)
1480: testl %eax,%eax
1481: jnz _C_LABEL(fusufault)
1482:
1483: 1: /* XXX also need to check the following byte for validity! */
1484: #endif
1485:
1486: 2: GET_CURPCB(%ecx)
1487: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1488:
1489: movl 8(%esp),%eax
1490: movw %ax,(%edx)
1491: xorl %eax,%eax
1492: movl %eax,PCB_ONFAULT(%ecx)
1493: ret
1494:
1495: /*
1496: * int suswintr(void *uaddr, short x);
1497: * Store a short in the user's address space. Can be called during an
1498: * interrupt.
1499: * see suswintr(9)
1500: */
1501: /* LINTSTUB: Func: int suswintr(void *base, short c) */
1502: ENTRY(suswintr)
1503: movl 4(%esp),%edx
1504: cmpl $VM_MAXUSER_ADDRESS-2,%edx
1505: ja _C_LABEL(fusuaddrfault)
1.5 thorpej 1506: movl CPUVAR(CURLWP),%ecx
1507: movl L_ADDR(%ecx),%ecx
1.1 fvdl 1508: movl $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)
1509:
1510: #if defined(I386_CPU)
1511: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1512: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1513: jne 2f
1514: #endif /* I486_CPU || I586_CPU || I686_CPU */
1515:
1516: movl %edx,%eax
1517: shrl $PGSHIFT,%eax # calculate pte address
1518: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1519: jnz 1f
1520:
1521: /* Simulate a trap. */
1522: jmp _C_LABEL(fusubail)
1523:
1524: 1: /* XXX also need to check the following byte for validity! */
1525: #endif
1526:
1527: 2: movl 8(%esp),%eax
1528: movw %ax,(%edx)
1529: xorl %eax,%eax
1530: movl %eax,PCB_ONFAULT(%ecx)
1531: ret
1532:
1533: /*
1534: * int subyte(void *uaddr, char x);
1535: * Store a byte in the user's address space.
1536: * see subyte(9)
1537: */
1538: /* LINTSTUB: Func: int subyte(void *base, int c) */
1539: ENTRY(subyte)
1540: movl 4(%esp),%edx
1541: cmpl $VM_MAXUSER_ADDRESS-1,%edx
1542: ja _C_LABEL(fusuaddrfault)
1543:
1544: #if defined(I386_CPU)
1545: #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1546: cmpl $CPUCLASS_386,_C_LABEL(cpu_class)
1547: jne 2f
1548: #endif /* I486_CPU || I586_CPU || I686_CPU */
1549:
1550: GET_CURPCB(%eax)
1551: movl $3f,PCB_ONFAULT(%eax)
1552:
1553: movl %edx,%eax
1554: shrl $PGSHIFT,%eax # calculate pte address
1555: testb $PG_RW,_C_LABEL(PTmap)(,%eax,4)
1556: jnz 1f
1557:
1558: 3: /* Simulate a trap. */
1559: pushl %edx
1560: pushl %edx
1561: call _C_LABEL(trapwrite) # trapwrite(addr)
1562: addl $4,%esp # clear parameter from the stack
1563: popl %edx
1564: GET_CURPCB(%ecx)
1565: testl %eax,%eax
1566: jnz _C_LABEL(fusufault)
1567:
1568: 1:
1569: #endif
1570:
1571: 2: GET_CURPCB(%ecx)
1572: movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx)
1573:
1574: movb 8(%esp),%al
1575: movb %al,(%edx)
1576: xorl %eax,%eax
1577: movl %eax,PCB_ONFAULT(%ecx)
1578: ret
1579:
1580: /*****************************************************************************/
1581:
1582: /*
1583: * The following is i386-specific nonsense.
1584: */
1585:
1586: /*
1587: * void lgdt(struct region_descriptor *rdp);
1588: * Load a new GDT pointer (and do any necessary cleanup).
1589: * XXX It's somewhat questionable whether reloading all the segment registers
1590: * is necessary, since the actual descriptor data is not changed except by
1591: * process creation and exit, both of which clean up via task switches. OTOH,
1592: * this only happens at run time when the GDT is resized.
1593: */
1594: /* LINTSTUB: Func: void lgdt(struct region_descriptor *rdp) */
1595: NENTRY(lgdt)
1596: /* Reload the descriptor table. */
1597: movl 4(%esp),%eax
1598: lgdt (%eax)
1599: /* Flush the prefetch queue. */
1600: jmp 1f
1601: nop
1602: 1: /* Reload "stale" selectors. */
1603: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
1604: movw %ax,%ds
1605: movw %ax,%es
1606: movw %ax,%gs
1607: movw %ax,%ss
1608: movl $GSEL(GCPU_SEL, SEL_KPL),%eax
1609: movw %ax,%fs
1610: /* Reload code selector by doing intersegment return. */
1611: popl %eax
1612: pushl $GSEL(GCODE_SEL, SEL_KPL)
1613: pushl %eax
1614: lret
1615:
1616: /*****************************************************************************/
1617:
1618: /*
1619: * These functions are primarily used by DDB.
1620: */
1621:
1622: /* LINTSTUB: Func: int setjmp (label_t *l) */
1623: ENTRY(setjmp)
1624: movl 4(%esp),%eax
1625: movl %ebx,(%eax) # save ebx
1626: movl %esp,4(%eax) # save esp
1627: movl %ebp,8(%eax) # save ebp
1628: movl %esi,12(%eax) # save esi
1629: movl %edi,16(%eax) # save edi
1630: movl (%esp),%edx # get rta
1631: movl %edx,20(%eax) # save eip
1632: xorl %eax,%eax # return (0);
1633: ret
1634:
1635: /* LINTSTUB: Func: void longjmp (label_t *l) */
1636: ENTRY(longjmp)
1637: movl 4(%esp),%eax
1638: movl (%eax),%ebx # restore ebx
1639: movl 4(%eax),%esp # restore esp
1640: movl 8(%eax),%ebp # restore ebp
1641: movl 12(%eax),%esi # restore esi
1642: movl 16(%eax),%edi # restore edi
1643: movl 20(%eax),%edx # get rta
1644: movl %edx,(%esp) # put in return frame
1645: xorl %eax,%eax # return (1);
1646: incl %eax
1647: ret
1648:
1649: /*****************************************************************************/
1650:
1651: .globl _C_LABEL(sched_whichqs),_C_LABEL(sched_qs)
1652: .globl _C_LABEL(uvmexp),_C_LABEL(panic)
1653:
1654: #ifdef DIAGNOSTIC
1655: NENTRY(switch_error)
1656: pushl $1f
1657: call _C_LABEL(panic)
1658: /* NOTREACHED */
1659: 1: .asciz "cpu_switch"
1660: #endif /* DIAGNOSTIC */
1661:
1662: /*
1.5 thorpej 1663: * void cpu_switch(struct lwp *)
1.1 fvdl 1664: * Find a runnable process and switch to it. Wait if necessary. If the new
1665: * process is the same as the old one, we short-circuit the context save and
1666: * restore.
1667: *
1668: * Note that the stack frame layout is known to "struct switchframe"
1669: * in <machine/frame.h> and to the code in cpu_fork() which initializes
1.5 thorpej 1670: * it for a new lwp.
1.1 fvdl 1671: */
1672: ENTRY(cpu_switch)
1673: pushl %ebx
1674: pushl %esi
1675: pushl %edi
1676:
1677: #ifdef DEBUG
1678: cmpl $IPL_SCHED,CPUVAR(ILEVEL)
1679: jae 1f
1.5 thorpej 1680: pushl $2f
1.1 fvdl 1681: call _C_LABEL(panic)
1682: /* NOTREACHED */
1.5 thorpej 1683: 2: .asciz "not splsched() in cpu_switch!"
1.1 fvdl 1684: 1:
1685: #endif /* DEBUG */
1686:
1.5 thorpej 1687: movl 16(%esp),%esi # current
1.1 fvdl 1688:
1689: /*
1.5 thorpej 1690: * Clear curlwp so that we don't accumulate system time while idle.
1691: * This also insures that schedcpu() will move the old lwp to
1.1 fvdl 1692: * the correct queue if it happens to get called from the spllower()
1693: * below and changes the priority. (See corresponding comment in
1694: * userret()).
1695: */
1.5 thorpej 1696: movl $0,CPUVAR(CURLWP)
1.1 fvdl 1697: /*
1.5 thorpej 1698: * First phase: find new lwp.
1.1 fvdl 1699: *
1700: * Registers:
1701: * %eax - queue head, scratch, then zero
1702: * %ebx - queue number
1703: * %ecx - cached value of whichqs
1.5 thorpej 1704: * %edx - next lwp in queue
1705: * %esi - old lwp
1706: * %edi - new lwp
1.1 fvdl 1707: */
1708:
1.5 thorpej 1709: /* Look for new lwp. */
1.1 fvdl 1710: cli # splhigh doesn't do a cli
1711: movl _C_LABEL(sched_whichqs),%ecx
1712: bsfl %ecx,%ebx # find a full q
1713: jnz switch_dequeue
1714:
1715: /*
1716: * idling: save old context.
1717: *
1718: * Registers:
1719: * %eax, %ecx - scratch
1.5 thorpej 1720: * %esi - old lwp, then old pcb
1.1 fvdl 1721: * %edi - idle pcb
1722: */
1723:
1724: pushl %esi
1725: call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc)
1726: addl $4,%esp
1727:
1.5 thorpej 1728: movl L_ADDR(%esi),%esi
1.1 fvdl 1729:
1730: /* Save stack pointers. */
1731: movl %esp,PCB_ESP(%esi)
1732: movl %ebp,PCB_EBP(%esi)
1733:
1734: /* Find idle PCB for this CPU */
1735: #ifndef MULTIPROCESSOR
1.5 thorpej 1736: movl $_C_LABEL(lwp0),%ebx
1737: movl L_ADDR(%ebx),%edi
1738: movl L_MD_TSS_SEL(%ebx),%edx
1.1 fvdl 1739: #else
1740: movl CPUVAR(IDLE_PCB),%edi
1741: movl CPUVAR(IDLE_TSS_SEL),%edx
1742: #endif
1.5 thorpej 1743: movl $0,CPUVAR(CURLWP) /* In case we fault... */
1.1 fvdl 1744:
1745: /* Restore the idle context (avoid interrupts) */
1746: cli
1747:
1748: /* Restore stack pointers. */
1749: movl PCB_ESP(%edi),%esp
1750: movl PCB_EBP(%edi),%ebp
1751:
1752:
1753: /* Switch address space. */
1754: movl PCB_CR3(%edi),%ecx
1755: movl %ecx,%cr3
1756:
1757: /* Switch TSS. Reset "task busy" flag before loading. */
1758: #ifdef MULTIPROCESSOR
1759: movl CPUVAR(GDT),%eax
1760: #else
1761: movl _C_LABEL(gdt),%eax
1762: #endif
1763: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
1764: ltr %dx
1765:
1766: /* We're always in the kernel, so we don't need the LDT. */
1767:
1768: /* Restore cr0 (including FPU state). */
1769: movl PCB_CR0(%edi),%ecx
1770: movl %ecx,%cr0
1771:
1772: /* Record new pcb. */
1773: SET_CURPCB(%edi)
1774:
1775: xorl %esi,%esi
1776: sti
1777: idle_unlock:
1778: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1779: call _C_LABEL(sched_unlock_idle)
1780: #endif
1781: /* Interrupts are okay again. */
1.2 fvdl 1782: pushl $IPL_NONE # spl0()
1.1 fvdl 1783: call _C_LABEL(Xspllower) # process pending interrupts
1.2 fvdl 1784: addl $4,%esp
1.1 fvdl 1785: jmp idle_start
1786: idle_zero:
1787: sti
1788: call _C_LABEL(uvm_pageidlezero)
1789: cli
1790: cmpl $0,_C_LABEL(sched_whichqs)
1791: jnz idle_exit
1792: idle_loop:
1793: /* Try to zero some pages. */
1794: movl _C_LABEL(uvm)+UVM_PAGE_IDLE_ZERO,%ecx
1795: testl %ecx,%ecx
1796: jnz idle_zero
1797: sti
1798: hlt
1799: NENTRY(mpidle)
1800: idle_start:
1801: cli
1802: cmpl $0,_C_LABEL(sched_whichqs)
1803: jz idle_loop
1804: idle_exit:
1805: movl $IPL_HIGH,CPUVAR(ILEVEL) # splhigh
1806: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1807: call _C_LABEL(sched_lock_idle)
1808: #endif
1809: movl _C_LABEL(sched_whichqs),%ecx
1810: bsfl %ecx,%ebx
1811: jz idle_unlock
1812:
1813: switch_dequeue:
1814: /*
1815: * we're running at splhigh(), but it's otherwise okay to take
1816: * interrupts here.
1817: */
1818: sti
1819: leal _C_LABEL(sched_qs)(,%ebx,8),%eax # select q
1820:
1.5 thorpej 1821: movl L_FORW(%eax),%edi # unlink from front of process q
1.1 fvdl 1822: #ifdef DIAGNOSTIC
1823: cmpl %edi,%eax # linked to self (i.e. nothing queued)?
1824: je _C_LABEL(switch_error) # not possible
1825: #endif /* DIAGNOSTIC */
1.5 thorpej 1826: movl L_FORW(%edi),%edx
1827: movl %edx,L_FORW(%eax)
1828: movl %eax,L_BACK(%edx)
1.1 fvdl 1829:
1830: cmpl %edx,%eax # q empty?
1831: jne 3f
1832:
1833: btrl %ebx,%ecx # yes, clear to indicate empty
1834: movl %ecx,_C_LABEL(sched_whichqs) # update q status
1835:
1836: 3: /* We just did it. */
1837: xorl %eax,%eax
1838: CLEAR_RESCHED(%eax)
1839:
1.5 thorpej 1840: switch_resume:
1.1 fvdl 1841: #ifdef DIAGNOSTIC
1.5 thorpej 1842: cmpl %eax,L_WCHAN(%edi) # Waiting for something?
1.1 fvdl 1843: jne _C_LABEL(switch_error) # Yes; shouldn't be queued.
1.5 thorpej 1844: cmpb $LSRUN,L_STAT(%edi) # In run state?
1.1 fvdl 1845: jne _C_LABEL(switch_error) # No; shouldn't be queued.
1846: #endif /* DIAGNOSTIC */
1847:
1.5 thorpej 1848: /* Isolate lwp. XXX Is this necessary? */
1849: movl %eax,L_BACK(%edi)
1.1 fvdl 1850:
1.5 thorpej 1851: /* Record new lwp. */
1852: movb $LSONPROC,L_STAT(%edi) # l->l_stat = LSONPROC
1853: SET_CURLWP(%edi,%ecx)
1.1 fvdl 1854:
1.5 thorpej 1855: /* Skip context switch if same lwp. */
1.10 ! fvdl 1856: xorl %ebx,%ebx
1.1 fvdl 1857: cmpl %edi,%esi
1858: je switch_return
1859:
1.5 thorpej 1860: /* If old lwp exited, don't bother. */
1.1 fvdl 1861: testl %esi,%esi
1862: jz switch_exited
1863:
1864: /*
1865: * Second phase: save old context.
1866: *
1867: * Registers:
1868: * %eax, %ecx - scratch
1.5 thorpej 1869: * %esi - old lwp, then old pcb
1870: * %edi - new lwp
1.1 fvdl 1871: */
1872:
1873: pushl %esi
1874: call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc)
1875: addl $4,%esp
1876:
1.5 thorpej 1877: movl L_ADDR(%esi),%esi
1.1 fvdl 1878:
1879: /* Save stack pointers. */
1880: movl %esp,PCB_ESP(%esi)
1881: movl %ebp,PCB_EBP(%esi)
1882:
1883: switch_exited:
1884: /*
1885: * Third phase: restore saved context.
1886: *
1887: * Registers:
1888: * %eax, %ebx, %ecx, %edx - scratch
1889: * %esi - new pcb
1.5 thorpej 1890: * %edi - new lwp
1.1 fvdl 1891: */
1892:
1893: /* No interrupts while loading new state. */
1894: cli
1.5 thorpej 1895: movl L_ADDR(%edi),%esi
1.1 fvdl 1896:
1897: /* Restore stack pointers. */
1898: movl PCB_ESP(%esi),%esp
1899: movl PCB_EBP(%esi),%ebp
1900:
1901: #if 0
1902: /* Don't bother with the rest if switching to a system process. */
1.5 thorpej 1903: testl $P_SYSTEM,L_FLAG(%edi); XXX NJWLWP lwp's don't have P_SYSTEM!
1.1 fvdl 1904: jnz switch_restored
1905: #endif
1906:
1907: #ifdef MULTIPROCESSOR
1908: movl CPUVAR(GDT),%eax
1909: #else
1910: /* Load TSS info. */
1911: movl _C_LABEL(gdt),%eax
1912: #endif
1.5 thorpej 1913: movl L_MD_TSS_SEL(%edi),%edx
1.1 fvdl 1914:
1915: /* Switch TSS. Reset "task busy" flag before loading. */
1916: andl $~0x0200,4(%eax,%edx, 1)
1917: ltr %dx
1918:
1919: pushl %edi
1920: call _C_LABEL(pmap_activate) # pmap_activate(p)
1921: addl $4,%esp
1922:
1923: #if 0
1924: switch_restored:
1925: #endif
1926: /* Restore cr0 (including FPU state). */
1927: movl PCB_CR0(%esi),%ecx
1928: #ifdef MULTIPROCESSOR
1929: /*
1930: * If our floating point registers are on a different cpu,
1931: * clear CR0_TS so we'll trap rather than reuse bogus state.
1932: */
1933: movl PCB_FPCPU(%esi),%ebx
1934: cmpl CPUVAR(SELF),%ebx
1935: jz 1f
1936: orl $CR0_TS,%ecx
1937: 1:
1938: #endif
1939: movl %ecx,%cr0
1940:
1941: /* Record new pcb. */
1942: SET_CURPCB(%esi)
1943:
1944: /* Interrupts are okay again. */
1945: sti
1946:
1947: /*
1948: * Check for restartable atomic sequences (RAS)
1949: */
1.5 thorpej 1950: movl CPUVAR(CURLWP),%edi
1951: movl L_PROC(%edi),%esi
1952: cmpl $0,P_NRAS(%esi)
1.1 fvdl 1953: je 1f
1.5 thorpej 1954: movl L_MD_REGS(%edi),%ebx
1.4 gmcgarry 1955: movl TF_EIP(%ebx),%eax
1.1 fvdl 1956: pushl %eax
1.5 thorpej 1957: pushl %esi
1.1 fvdl 1958: call _C_LABEL(ras_lookup)
1959: addl $8,%esp
1960: cmpl $-1,%eax
1961: je 1f
1.4 gmcgarry 1962: movl %eax,TF_EIP(%ebx)
1.1 fvdl 1963: 1:
1.10 ! fvdl 1964: movl $1,%ebx
1.1 fvdl 1965:
1966: switch_return:
1967: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1968: call _C_LABEL(sched_unlock_idle)
1969: #endif
1.2 fvdl 1970: pushl $IPL_NONE # spl0()
1.1 fvdl 1971: call _C_LABEL(Xspllower) # process pending interrupts
1.2 fvdl 1972: addl $4,%esp
1.1 fvdl 1973: movl $IPL_HIGH,CPUVAR(ILEVEL) # splhigh()
1.9 fvdl 1974:
1975: movl %ebx,%eax
1.1 fvdl 1976:
1977: popl %edi
1978: popl %esi
1979: popl %ebx
1980: ret
1981:
1982: /*
1.5 thorpej 1983: * void cpu_switchto(struct lwp *current, struct lwp *next)
1984: * Switch to the specified next LWP.
1985: */
1986: ENTRY(cpu_switchto)
1987: pushl %ebx
1988: pushl %esi
1989: pushl %edi
1990:
1991: #ifdef DEBUG
1992: cmpl $IPL_SCHED,CPUVAR(ILEVEL)
1993: jae 1f
1994: pushl $2f
1995: call _C_LABEL(panic)
1996: /* NOTREACHED */
1997: 2: .asciz "not splsched() in cpu_switchto!"
1998: 1:
1999: #endif /* DEBUG */
2000:
2001: movl 16(%esp),%esi # current
2002: movl 20(%esp),%edi # next
2003:
2004: /*
2005: * Clear curlwp so that we don't accumulate system time while idle.
2006: * This also insures that schedcpu() will move the old process to
2007: * the correct queue if it happens to get called from the spllower()
2008: * below and changes the priority. (See corresponding comment in
2009: * usrret()).
2010: *
2011: * XXX Is this necessary? We know we won't go idle.
2012: */
2013: movl $0,CPUVAR(CURLWP)
2014:
2015: /*
2016: * We're running at splhigh(), but it's otherwise okay to take
2017: * interrupts here.
2018: */
2019: sti
2020:
2021: /* Jump into the middle of cpu_switch */
2022: xorl %eax,%eax
2023: jmp switch_resume
2024:
2025: /*
2026: * void switch_exit(struct lwp *l, void (*exit)(struct lwp *));
2027: * Switch to the appropriate idle context (lwp0's if uniprocessor; the cpu's
1.1 fvdl 2028: * if multiprocessor) and deallocate the address space and kernel stack for p.
2029: * Then jump into cpu_switch(), as if we were in the idle proc all along.
2030: */
2031: #ifndef MULTIPROCESSOR
1.5 thorpej 2032: .globl _C_LABEL(lwp0)
1.1 fvdl 2033: #endif
2034: .globl _C_LABEL(uvmspace_free),_C_LABEL(kernel_map)
2035: .globl _C_LABEL(uvm_km_free),_C_LABEL(tss_free)
1.6 drochner 2036: /* LINTSTUB: Func: void switch_exit(struct lwp *l, void (*exit)(struct lwp *)) */
1.1 fvdl 2037: ENTRY(switch_exit)
2038: movl 4(%esp),%edi # old process
1.5 thorpej 2039: movl 8(%esp),%eax # exit func
1.1 fvdl 2040: #ifndef MULTIPROCESSOR
1.5 thorpej 2041: movl $_C_LABEL(lwp0),%ebx
2042: movl L_ADDR(%ebx),%esi
2043: movl L_MD_TSS_SEL(%ebx),%edx
1.1 fvdl 2044: #else
2045: movl CPUVAR(IDLE_PCB),%esi
2046: movl CPUVAR(IDLE_TSS_SEL),%edx
2047: #endif
2048: /* In case we fault... */
1.5 thorpej 2049: movl $0,CPUVAR(CURLWP)
1.1 fvdl 2050:
2051: /* Restore the idle context. */
2052: cli
2053:
2054: /* Restore stack pointers. */
2055: movl PCB_ESP(%esi),%esp
2056: movl PCB_EBP(%esi),%ebp
2057:
1.5 thorpej 2058: /* Save exit func. */
2059: pushl %eax
2060:
1.1 fvdl 2061: /* Load TSS info. */
2062: #ifdef MULTIPROCESSOR
2063: movl CPUVAR(GDT),%eax
2064: #else
2065: /* Load TSS info. */
2066: movl _C_LABEL(gdt),%eax
2067: #endif
2068:
2069: /* Switch address space. */
2070: movl PCB_CR3(%esi),%ecx
2071: movl %ecx,%cr3
2072:
2073: /* Switch TSS. */
2074: andl $~0x0200,4-SEL_KPL(%eax,%edx,1)
2075: ltr %dx
2076:
2077: /* We're always in the kernel, so we don't need the LDT. */
2078:
2079: /* Restore cr0 (including FPU state). */
2080: movl PCB_CR0(%esi),%ecx
2081: movl %ecx,%cr0
2082:
2083: /* Record new pcb. */
2084: SET_CURPCB(%esi)
2085:
2086: /* Interrupts are okay again. */
2087: sti
2088:
2089: /*
2090: * Schedule the dead process's vmspace and stack to be freed.
2091: */
1.5 thorpej 2092: movl 0(%esp),%eax /* %eax = exit func */
2093: movl %edi,0(%esp) /* {lwp_}exit2(l) */
2094: call *%eax
1.1 fvdl 2095: addl $4,%esp
2096:
2097: /* Jump into cpu_switch() with the right state. */
2098: xorl %esi,%esi
1.5 thorpej 2099: movl %esi,CPUVAR(CURLWP)
1.1 fvdl 2100: jmp idle_start
2101:
2102: /*
2103: * void savectx(struct pcb *pcb);
2104: * Update pcb, saving current processor state.
2105: */
2106: /* LINTSTUB: Func: void savectx(struct pcb *pcb) */
2107: ENTRY(savectx)
2108: movl 4(%esp),%edx # edx = p->p_addr
2109:
2110: /* Save stack pointers. */
2111: movl %esp,PCB_ESP(%edx)
2112: movl %ebp,PCB_EBP(%edx)
2113:
2114: ret
2115:
2116: /*
2117: * Old call gate entry for syscall
2118: */
2119: /* LINTSTUB: Var: char Xosyscall[1]; */
2120: IDTVEC(osyscall)
2121: /* Set eflags in trap frame. */
2122: pushfl
2123: popl 8(%esp)
2124: pushl $7 # size of instruction for restart
2125: jmp syscall1
2126:
2127: /*
2128: * Trap gate entry for syscall
2129: */
2130: /* LINTSTUB: Var: char Xsyscall[1]; */
2131: IDTVEC(syscall)
2132: pushl $2 # size of instruction for restart
2133: syscall1:
2134: pushl $T_ASTFLT # trap # for doing ASTs
2135: INTRENTRY
2136:
2137: #ifdef DIAGNOSTIC
2138: movl CPUVAR(ILEVEL),%ebx
2139: testl %ebx,%ebx
2140: jz 1f
2141: pushl $5f
2142: call _C_LABEL(printf)
2143: addl $4,%esp
2144: #ifdef DDB
2145: int $3
2146: #endif
2147: 1:
2148: #endif /* DIAGNOSTIC */
1.5 thorpej 2149: movl CPUVAR(CURLWP),%edx
2150: movl %esp,L_MD_REGS(%edx) # save pointer to frame
2151: movl L_PROC(%edx),%edx
1.1 fvdl 2152: call *P_MD_SYSCALL(%edx) # get pointer to syscall() function
2153: 2: /* Check for ASTs on exit to user mode. */
2154: cli
1.5 thorpej 2155: CHECK_ASTPENDING(%eax)
1.1 fvdl 2156: je 1f
2157: /* Always returning to user mode here. */
1.5 thorpej 2158: CLEAR_ASTPENDING(%eax)
1.1 fvdl 2159: sti
2160: /* Pushed T_ASTFLT into tf_trapno on entry. */
2161: call _C_LABEL(trap)
2162: jmp 2b
2163: #ifndef DIAGNOSTIC
2164: 1: INTRFASTEXIT
2165: #else /* DIAGNOSTIC */
2166: 1: cmpl $IPL_NONE,CPUVAR(ILEVEL)
2167: jne 3f
2168: INTRFASTEXIT
2169: 3: sti
2170: pushl $4f
2171: call _C_LABEL(printf)
2172: addl $4,%esp
2173: #ifdef DDB
2174: int $3
2175: #endif /* DDB */
2176: movl $IPL_NONE,CPUVAR(ILEVEL)
2177: jmp 2b
2178: 4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL EXIT\n"
2179: 5: .asciz "WARNING: SPL NOT ZERO ON SYSCALL ENTRY\n"
2180: #endif /* DIAGNOSTIC */
2181:
2182: #if NNPX > 0
2183: /*
2184: * Special interrupt handlers. Someday intr0-intr15 will be used to count
2185: * interrupts. We'll still need a special exception 16 handler. The busy
2186: * latch stuff in probintr() can be moved to npxprobe().
2187: */
2188:
2189: /* LINTSTUB: Func: void probeintr(void) */
2190: NENTRY(probeintr)
2191: ss
2192: incl _C_LABEL(npx_intrs_while_probing)
2193: pushl %eax
2194: movb $0x20,%al # EOI (asm in strings loses cpp features)
2195: outb %al,$0xa0 # IO_ICU2
2196: outb %al,$0x20 # IO_ICU1
2197: movb $0,%al
2198: outb %al,$0xf0 # clear BUSY# latch
2199: popl %eax
2200: iret
2201:
2202: /* LINTSTUB: Func: void probetrap(void) */
2203: NENTRY(probetrap)
2204: ss
2205: incl _C_LABEL(npx_traps_while_probing)
2206: fnclex
2207: iret
2208:
2209: /* LINTSTUB: Func: int npx586bug1(int a, int b) */
2210: NENTRY(npx586bug1)
2211: fildl 4(%esp) # x
2212: fildl 8(%esp) # y
2213: fld %st(1)
2214: fdiv %st(1),%st # x/y
2215: fmulp %st,%st(1) # (x/y)*y
2216: fsubrp %st,%st(1) # x-(x/y)*y
2217: pushl $0
2218: fistpl (%esp)
2219: popl %eax
2220: ret
2221: #endif /* NNPX > 0 */
CVSweb <webmaster@jp.NetBSD.org>