Annotation of src/sys/arch/sparc/sparc/locore.s, Revision 1.200
1.200 ! pk 1: /* $NetBSD: locore.s,v 1.199 2004/04/17 10:06:29 pk Exp $ */
1.70 mrg 2:
1.1 deraadt 3: /*
1.52 pk 4: * Copyright (c) 1996 Paul Kranenburg
5: * Copyright (c) 1996
1.55 abrown 6: * The President and Fellows of Harvard College. All rights reserved.
1.1 deraadt 7: * Copyright (c) 1992, 1993
8: * The Regents of the University of California. All rights reserved.
9: *
10: * This software was developed by the Computer Systems Engineering group
11: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
12: * contributed to Berkeley.
13: *
14: * All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed by the University of
17: * California, Lawrence Berkeley Laboratory.
1.52 pk 18: * This product includes software developed by Harvard University.
1.1 deraadt 19: *
20: * Redistribution and use in source and binary forms, with or without
21: * modification, are permitted provided that the following conditions
22: * are met:
23: * 1. Redistributions of source code must retain the above copyright
24: * notice, this list of conditions and the following disclaimer.
25: * 2. Redistributions in binary form must reproduce the above copyright
26: * notice, this list of conditions and the following disclaimer in the
27: * documentation and/or other materials provided with the distribution.
28: * 3. All advertising materials mentioning features or use of this software
29: * must display the following acknowledgement:
30: * This product includes software developed by the University of
31: * California, Berkeley and its contributors.
1.52 pk 32: * This product includes software developed by Harvard University.
33: * This product includes software developed by Paul Kranenburg.
1.1 deraadt 34: * 4. Neither the name of the University nor the names of its contributors
35: * may be used to endorse or promote products derived from this software
36: * without specific prior written permission.
37: *
38: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48: * SUCH DAMAGE.
49: *
1.10 deraadt 50: * @(#)locore.s 8.4 (Berkeley) 12/10/93
1.1 deraadt 51: */
52:
1.85 jonathan 53: #include "opt_ddb.h"
1.140 pk 54: #include "opt_kgdb.h"
1.84 thorpej 55: #include "opt_compat_svr4.h"
1.116 christos 56: #include "opt_compat_sunos.h"
1.97 pk 57: #include "opt_multiprocessor.h"
1.134 pk 58: #include "opt_lockdebug.h"
1.80 mrg 59:
1.47 mycroft 60: #include "assym.h"
1.52 pk 61: #include <machine/param.h>
1.111 pk 62: #include <machine/asm.h>
1.1 deraadt 63: #include <sparc/sparc/intreg.h>
64: #include <sparc/sparc/timerreg.h>
1.52 pk 65: #include <sparc/sparc/vaddrs.h>
1.1 deraadt 66: #ifdef notyet
67: #include <sparc/dev/zsreg.h>
68: #endif
69: #include <machine/ctlreg.h>
1.173 pk 70: #include <machine/intr.h>
1.1 deraadt 71: #include <machine/psl.h>
72: #include <machine/signal.h>
73: #include <machine/trap.h>
1.92 pk 74: #include <sys/syscall.h>
1.1 deraadt 75:
76: /*
77: * GNU assembler does not understand `.empty' directive; Sun assembler
78: * gripes about labels without it. To allow cross-compilation using
79: * the Sun assembler, and because .empty directives are useful documentation,
80: * we use this trick.
81: */
82: #ifdef SUN_AS
83: #define EMPTY .empty
84: #else
85: #define EMPTY /* .empty */
86: #endif
87:
88: /* use as needed to align things on longword boundaries */
1.52 pk 89: #define _ALIGN .align 4
1.1 deraadt 90:
91: /*
92: * CCFSZ (C Compiler Frame SiZe) is the size of a stack frame required if
93: * a function is to call C code. It should be just 64, but Sun defined
94: * their frame with space to hold arguments 0 through 5 (plus some junk),
1.63 pk 95: * and varargs routines (such as printf) demand this, and gcc uses this
1.1 deraadt 96: * area at times anyway.
97: */
98: #define CCFSZ 96
99:
1.195 pk 100: /* We rely on the fact that %lo(CPUINFO_VA) is zero */
101: .if CPUINFO_VA & 0x1fff
102: BARF
103: .endif
104:
1.1 deraadt 105: /*
106: * A handy macro for maintaining instrumentation counters.
107: * Note that this clobbers %o0 and %o1. Normal usage is
108: * something like:
109: * foointr:
110: * TRAP_SETUP(...) ! makes %o registers safe
1.111 pk 111: * INCR(cnt+V_FOO) ! count a foo
1.1 deraadt 112: */
113: #define INCR(what) \
114: sethi %hi(what), %o0; \
115: ld [%o0 + %lo(what)], %o1; \
116: inc %o1; \
117: st %o1, [%o0 + %lo(what)]
118:
119: /*
120: * Another handy macro: load one register window, given `base' address.
121: * This can be either a simple register (e.g., %sp) or include an initial
122: * offset (e.g., %g6 + PCB_RW).
123: */
124: #define LOADWIN(addr) \
125: ldd [addr], %l0; \
126: ldd [addr + 8], %l2; \
127: ldd [addr + 16], %l4; \
128: ldd [addr + 24], %l6; \
129: ldd [addr + 32], %i0; \
130: ldd [addr + 40], %i2; \
131: ldd [addr + 48], %i4; \
132: ldd [addr + 56], %i6
133:
134: /*
135: * To return from trap we need the two-instruction sequence
136: * `jmp %l1; rett %l2', which is defined here for convenience.
137: */
138: #define RETT jmp %l1; rett %l2
139:
140: .data
141: /*
142: * The interrupt stack.
143: *
144: * This is the very first thing in the data segment, and therefore has
145: * the lowest kernel stack address. We count on this in the interrupt
146: * trap-frame setup code, since we may need to switch from the kernel
147: * stack to the interrupt stack (iff we are not already on the interrupt
148: * stack). One sethi+cmp is all we need since this is so carefully
149: * arranged.
1.98 pk 150: *
151: * In SMP kernels, each CPU has its own interrupt stack and the computation
152: * to determine whether we're already on the interrupt stack is slightly
153: * more time consuming (see INTR_SETUP() below).
1.1 deraadt 154: */
1.111 pk 155: .globl _C_LABEL(intstack)
156: .globl _C_LABEL(eintstack)
157: _C_LABEL(intstack):
1.98 pk 158: .skip INT_STACK_SIZE ! 16k = 128 128-byte stack frames
1.111 pk 159: _C_LABEL(eintstack):
1.1 deraadt 160:
1.101 pk 161: _EINTSTACKP = CPUINFO_VA + CPUINFO_EINTSTACK
162:
1.1 deraadt 163: /*
1.131 thorpej 164: * CPUINFO_VA is a CPU-local virtual address; cpi->ci_self is a global
165: * virtual address for the same structure. It must be stored in p->p_cpu
166: * upon context switch.
167: */
1.179 pk 168: _CISELFP = CPUINFO_VA + CPUINFO_SELF
169: _CIFLAGS = CPUINFO_VA + CPUINFO_FLAGS
170:
1.197 wiz 171: /* Per-CPU AST and reschedule requests */
1.179 pk 172: _WANT_AST = CPUINFO_VA + CPUINFO_WANT_AST
173: _WANT_RESCHED = CPUINFO_VA + CPUINFO_WANT_RESCHED
1.131 thorpej 174:
175: /*
1.1 deraadt 176: * When a process exits and its u. area goes away, we set cpcb to point
177: * to this `u.', leaving us with something to use for an interrupt stack,
178: * and letting all the register save code have a pcb_uw to examine.
179: * This is also carefully arranged (to come just before u0, so that
180: * process 0's kernel stack can quietly overrun into it during bootup, if
181: * we feel like doing that).
182: */
1.111 pk 183: .globl _C_LABEL(idle_u)
184: _C_LABEL(idle_u):
1.13 deraadt 185: .skip USPACE
1.99 pk 186: /*
187: * On SMP kernels, there's an idle u-area for each CPU and we must
188: * read its location from cpuinfo.
189: */
1.111 pk 190: IDLE_UP = CPUINFO_VA + CPUINFO_IDLE_U
1.1 deraadt 191:
192: /*
193: * Process 0's u.
194: *
195: * This must be aligned on an 8 byte boundary.
196: */
1.111 pk 197: .globl _C_LABEL(u0)
198: _C_LABEL(u0): .skip USPACE
1.1 deraadt 199: estack0:
200:
201: #ifdef KGDB
202: /*
203: * Another item that must be aligned, easiest to put it here.
204: */
205: KGDB_STACK_SIZE = 2048
1.111 pk 206: .globl _C_LABEL(kgdb_stack)
207: _C_LABEL(kgdb_stack):
1.1 deraadt 208: .skip KGDB_STACK_SIZE ! hope this is enough
209: #endif
210:
211: /*
1.111 pk 212: * cpcb points to the current pcb (and hence u. area).
1.1 deraadt 213: * Initially this is the special one.
214: */
1.111 pk 215: cpcb = CPUINFO_VA + CPUINFO_CURPCB
1.1 deraadt 216:
1.185 thorpej 217: /* curlwp points to the current LWP that has the CPU */
218: curlwp = CPUINFO_VA + CPUINFO_CURLWP
1.104 pk 219:
1.52 pk 220: /*
1.197 wiz 221: * cputyp is the current CPU type, used to distinguish between
1.13 deraadt 222: * the many variations of different sun4* machines. It contains
223: * the value CPU_SUN4, CPU_SUN4C, or CPU_SUN4M.
1.9 deraadt 224: */
1.111 pk 225: .globl _C_LABEL(cputyp)
226: _C_LABEL(cputyp):
1.9 deraadt 227: .word 1
1.52 pk 228:
1.18 deraadt 229: #if defined(SUN4C) || defined(SUN4M)
1.111 pk 230: cputypval:
1.18 deraadt 231: .asciz "sun4c"
232: .ascii " "
1.111 pk 233: cputypvar:
1.37 pk 234: .asciz "compatible"
1.52 pk 235: _ALIGN
1.18 deraadt 236: #endif
237:
1.13 deraadt 238: /*
239: * There variables are pointed to by the cpp symbols PGSHIFT, NBPG,
240: * and PGOFSET.
241: */
1.111 pk 242: .globl _C_LABEL(pgshift), _C_LABEL(nbpg), _C_LABEL(pgofset)
243: _C_LABEL(pgshift):
1.52 pk 244: .word 0
1.111 pk 245: _C_LABEL(nbpg):
1.52 pk 246: .word 0
1.111 pk 247: _C_LABEL(pgofset):
1.52 pk 248: .word 0
249:
1.111 pk 250: .globl _C_LABEL(trapbase)
251: _C_LABEL(trapbase):
1.52 pk 252: .word 0
1.9 deraadt 253:
1.75 pk 254: #if 0
1.9 deraadt 255: #if defined(SUN4M)
256: _mapme:
257: .asciz "0 0 f8000000 15c6a0 map-pages"
258: #endif
1.75 pk 259: #endif
1.9 deraadt 260:
1.158 thorpej 261: #if !defined(SUN4D)
262: sun4d_notsup:
263: .asciz "cr .( NetBSD/sparc: this kernel does not support the sun4d) cr"
264: #endif
1.9 deraadt 265: #if !defined(SUN4M)
266: sun4m_notsup:
1.20 deraadt 267: .asciz "cr .( NetBSD/sparc: this kernel does not support the sun4m) cr"
1.9 deraadt 268: #endif
1.13 deraadt 269: #if !defined(SUN4C)
1.9 deraadt 270: sun4c_notsup:
1.20 deraadt 271: .asciz "cr .( NetBSD/sparc: this kernel does not support the sun4c) cr"
1.13 deraadt 272: #endif
273: #if !defined(SUN4)
274: sun4_notsup:
1.20 deraadt 275: ! the extra characters at the end are to ensure the zs fifo drains
276: ! before we halt. Sick, eh?
277: .asciz "NetBSD/sparc: this kernel does not support the sun4\n\r \b"
1.9 deraadt 278: #endif
1.52 pk 279: _ALIGN
1.9 deraadt 280:
1.1 deraadt 281: .text
282:
283: /*
1.26 deraadt 284: * The first thing in the real text segment is the trap vector table,
285: * which must be aligned on a 4096 byte boundary. The text segment
286: * starts beyond page 0 of KERNBASE so that there is a red zone
287: * between user and kernel space. Since the boot ROM loads us at
1.119 christos 288: * PROM_LOADADDR, it is far easier to start at KERNBASE+PROM_LOADADDR than to
1.26 deraadt 289: * buck the trend. This is two or four pages in (depending on if
290: * pagesize is 8192 or 4096). We place two items in this area:
1.75 pk 291: * the message buffer (phys addr 0) and the cpu_softc structure for
292: * the first processor in the system (phys addr 0x2000).
293: * Because the message buffer is in our "red zone" between user and
1.26 deraadt 294: * kernel space we remap it in configure() to another location and
295: * invalidate the mapping at KERNBASE.
296: */
297:
1.1 deraadt 298: /*
299: * Each trap has room for four instructions, of which one perforce must
300: * be a branch. On entry the hardware has copied pc and npc to %l1 and
301: * %l2 respectively. We use two more to read the psr into %l0, and to
302: * put the trap type value into %l3 (with a few exceptions below).
303: * We could read the trap type field of %tbr later in the code instead,
304: * but there is no need, and that would require more instructions
305: * (read+mask, vs 1 `mov' here).
306: *
307: * I used to generate these numbers by address arithmetic, but gas's
308: * expression evaluator has about as much sense as your average slug
309: * (oddly enough, the code looks about as slimy too). Thus, all the
310: * trap numbers are given as arguments to the trap macros. This means
311: * there is one line per trap. Sigh.
312: *
313: * Note that only the local registers may be used, since the trap
314: * window is potentially the last window. Its `in' registers are
315: * the previous window's outs (as usual), but more important, its
316: * `out' registers may be in use as the `topmost' window's `in' registers.
317: * The global registers are of course verboten (well, until we save
318: * them away).
319: *
320: * Hardware interrupt vectors can be `linked'---the linkage is to regular
321: * C code---or rewired to fast in-window handlers. The latter are good
322: * for unbuffered hardware like the Zilog serial chip and the AMD audio
323: * chip, where many interrupts can be handled trivially with pseudo-DMA or
324: * similar. Only one `fast' interrupt can be used per level, however, and
325: * direct and `fast' interrupts are incompatible. Routines in intr.c
326: * handle setting these, with optional paranoia.
327: */
328:
329: /* regular vectored traps */
330: #define VTRAP(type, label) \
331: mov (type), %l3; b label; mov %psr, %l0; nop
332:
333: /* hardware interrupts (can be linked or made `fast') */
1.52 pk 334: #define HARDINT44C(lev) \
1.111 pk 335: mov (lev), %l3; b _C_LABEL(sparc_interrupt44c); mov %psr, %l0; nop
1.52 pk 336:
337: /* hardware interrupts (can be linked or made `fast') */
338: #define HARDINT4M(lev) \
1.111 pk 339: mov (lev), %l3; b _C_LABEL(sparc_interrupt4m); mov %psr, %l0; nop
1.1 deraadt 340:
341: /* software interrupts (may not be made direct, sorry---but you
342: should not be using them trivially anyway) */
1.52 pk 343: #define SOFTINT44C(lev, bit) \
344: mov (lev), %l3; mov (bit), %l4; b softintr_sun44c; mov %psr, %l0
345:
346: /* There's no SOFTINT4M(): both hard and soft vector the same way */
1.1 deraadt 347:
348: /* traps that just call trap() */
349: #define TRAP(type) VTRAP(type, slowtrap)
350:
351: /* architecturally undefined traps (cause panic) */
352: #define UTRAP(type) VTRAP(type, slowtrap)
353:
354: /* software undefined traps (may be replaced) */
355: #define STRAP(type) VTRAP(type, slowtrap)
356:
357: /* breakpoint acts differently under kgdb */
358: #ifdef KGDB
359: #define BPT VTRAP(T_BREAKPOINT, bpt)
1.52 pk 360: #define BPT_KGDB_EXEC VTRAP(T_KGDB_EXEC, bpt)
361: #else
362: #define BPT TRAP(T_BREAKPOINT)
363: #define BPT_KGDB_EXEC TRAP(T_KGDB_EXEC)
364: #endif
365:
366: /* special high-speed 1-instruction-shaved-off traps (get nothing in %l3) */
1.122 christos 367: #define SYSCALL b _C_LABEL(_syscall); mov %psr, %l0; nop; nop
1.52 pk 368: #define WINDOW_OF b window_of; mov %psr, %l0; nop; nop
369: #define WINDOW_UF b window_uf; mov %psr, %l0; nop; nop
370: #ifdef notyet
371: #define ZS_INTERRUPT b zshard; mov %psr, %l0; nop; nop
372: #else
373: #define ZS_INTERRUPT44C HARDINT44C(12)
374: #define ZS_INTERRUPT4M HARDINT4M(12)
375: #endif
376:
1.173 pk 377: #ifdef DEBUG
378: #define TRAP_TRACE(tt, tmp) \
379: sethi %hi(CPUINFO_VA + CPUINFO_TT), tmp; \
380: st tt, [tmp + %lo(CPUINFO_VA + CPUINFO_TT)];
381: #define TRAP_TRACE2(tt, tmp1, tmp2) \
382: mov tt, tmp1; \
383: TRAP_TRACE(tmp1, tmp2)
384: #else /* DEBUG */
385: #define TRAP_TRACE(tt,tmp) /**/
386: #define TRAP_TRACE2(tt,tmp1,tmp2) /**/
387: #endif /* DEBUG */
388:
1.111 pk 389: .globl _ASM_LABEL(start), _C_LABEL(kernel_text)
390: _C_LABEL(kernel_text) = start ! for kvm_mkdb(8)
391: _ASM_LABEL(start):
1.52 pk 392: /*
393: * Put sun4 traptable first, since it needs the most stringent aligment (8192)
394: */
395: #if defined(SUN4)
396: trapbase_sun4:
397: /* trap 0 is special since we cannot receive it */
398: b dostart; nop; nop; nop ! 00 = reset (fake)
399: VTRAP(T_TEXTFAULT, memfault_sun4) ! 01 = instr. fetch fault
400: TRAP(T_ILLINST) ! 02 = illegal instruction
401: TRAP(T_PRIVINST) ! 03 = privileged instruction
402: TRAP(T_FPDISABLED) ! 04 = fp instr, but EF bit off in psr
403: WINDOW_OF ! 05 = window overflow
404: WINDOW_UF ! 06 = window underflow
405: TRAP(T_ALIGN) ! 07 = address alignment error
406: VTRAP(T_FPE, fp_exception) ! 08 = fp exception
407: VTRAP(T_DATAFAULT, memfault_sun4) ! 09 = data fetch fault
408: TRAP(T_TAGOF) ! 0a = tag overflow
409: UTRAP(0x0b)
410: UTRAP(0x0c)
411: UTRAP(0x0d)
412: UTRAP(0x0e)
413: UTRAP(0x0f)
414: UTRAP(0x10)
415: SOFTINT44C(1, IE_L1) ! 11 = level 1 interrupt
416: HARDINT44C(2) ! 12 = level 2 interrupt
417: HARDINT44C(3) ! 13 = level 3 interrupt
418: SOFTINT44C(4, IE_L4) ! 14 = level 4 interrupt
419: HARDINT44C(5) ! 15 = level 5 interrupt
420: SOFTINT44C(6, IE_L6) ! 16 = level 6 interrupt
421: HARDINT44C(7) ! 17 = level 7 interrupt
422: HARDINT44C(8) ! 18 = level 8 interrupt
423: HARDINT44C(9) ! 19 = level 9 interrupt
424: HARDINT44C(10) ! 1a = level 10 interrupt
425: HARDINT44C(11) ! 1b = level 11 interrupt
426: ZS_INTERRUPT44C ! 1c = level 12 (zs) interrupt
427: HARDINT44C(13) ! 1d = level 13 interrupt
428: HARDINT44C(14) ! 1e = level 14 interrupt
429: VTRAP(15, nmi_sun4) ! 1f = nonmaskable interrupt
430: UTRAP(0x20)
431: UTRAP(0x21)
432: UTRAP(0x22)
433: UTRAP(0x23)
434: TRAP(T_CPDISABLED) ! 24 = coprocessor instr, EC bit off in psr
435: UTRAP(0x25)
436: UTRAP(0x26)
437: UTRAP(0x27)
438: TRAP(T_CPEXCEPTION) ! 28 = coprocessor exception
439: UTRAP(0x29)
440: UTRAP(0x2a)
441: UTRAP(0x2b)
442: UTRAP(0x2c)
443: UTRAP(0x2d)
444: UTRAP(0x2e)
445: UTRAP(0x2f)
446: UTRAP(0x30)
447: UTRAP(0x31)
448: UTRAP(0x32)
449: UTRAP(0x33)
450: UTRAP(0x34)
451: UTRAP(0x35)
452: UTRAP(0x36)
453: UTRAP(0x37)
454: UTRAP(0x38)
455: UTRAP(0x39)
456: UTRAP(0x3a)
457: UTRAP(0x3b)
458: UTRAP(0x3c)
459: UTRAP(0x3d)
460: UTRAP(0x3e)
461: UTRAP(0x3f)
462: UTRAP(0x40)
463: UTRAP(0x41)
464: UTRAP(0x42)
465: UTRAP(0x43)
466: UTRAP(0x44)
467: UTRAP(0x45)
468: UTRAP(0x46)
469: UTRAP(0x47)
470: UTRAP(0x48)
471: UTRAP(0x49)
472: UTRAP(0x4a)
473: UTRAP(0x4b)
474: UTRAP(0x4c)
475: UTRAP(0x4d)
476: UTRAP(0x4e)
477: UTRAP(0x4f)
478: UTRAP(0x50)
479: UTRAP(0x51)
480: UTRAP(0x52)
481: UTRAP(0x53)
482: UTRAP(0x54)
483: UTRAP(0x55)
484: UTRAP(0x56)
485: UTRAP(0x57)
486: UTRAP(0x58)
487: UTRAP(0x59)
488: UTRAP(0x5a)
489: UTRAP(0x5b)
490: UTRAP(0x5c)
491: UTRAP(0x5d)
492: UTRAP(0x5e)
493: UTRAP(0x5f)
494: UTRAP(0x60)
495: UTRAP(0x61)
496: UTRAP(0x62)
497: UTRAP(0x63)
498: UTRAP(0x64)
499: UTRAP(0x65)
500: UTRAP(0x66)
501: UTRAP(0x67)
502: UTRAP(0x68)
503: UTRAP(0x69)
504: UTRAP(0x6a)
505: UTRAP(0x6b)
506: UTRAP(0x6c)
507: UTRAP(0x6d)
508: UTRAP(0x6e)
509: UTRAP(0x6f)
510: UTRAP(0x70)
511: UTRAP(0x71)
512: UTRAP(0x72)
513: UTRAP(0x73)
514: UTRAP(0x74)
515: UTRAP(0x75)
516: UTRAP(0x76)
517: UTRAP(0x77)
518: UTRAP(0x78)
519: UTRAP(0x79)
520: UTRAP(0x7a)
521: UTRAP(0x7b)
522: UTRAP(0x7c)
523: UTRAP(0x7d)
524: UTRAP(0x7e)
525: UTRAP(0x7f)
526: SYSCALL ! 80 = sun syscall
527: BPT ! 81 = pseudo breakpoint instruction
528: TRAP(T_DIV0) ! 82 = divide by zero
529: TRAP(T_FLUSHWIN) ! 83 = flush windows
530: TRAP(T_CLEANWIN) ! 84 = provide clean windows
531: TRAP(T_RANGECHECK) ! 85 = ???
532: TRAP(T_FIXALIGN) ! 86 = fix up unaligned accesses
533: TRAP(T_INTOF) ! 87 = integer overflow
534: SYSCALL ! 88 = svr4 syscall
535: SYSCALL ! 89 = bsd syscall
536: BPT_KGDB_EXEC ! 8a = enter kernel gdb on kernel startup
537: STRAP(0x8b)
538: STRAP(0x8c)
539: STRAP(0x8d)
540: STRAP(0x8e)
541: STRAP(0x8f)
542: STRAP(0x90)
543: STRAP(0x91)
544: STRAP(0x92)
545: STRAP(0x93)
546: STRAP(0x94)
547: STRAP(0x95)
548: STRAP(0x96)
549: STRAP(0x97)
550: STRAP(0x98)
551: STRAP(0x99)
552: STRAP(0x9a)
553: STRAP(0x9b)
554: STRAP(0x9c)
555: STRAP(0x9d)
556: STRAP(0x9e)
557: STRAP(0x9f)
558: STRAP(0xa0)
559: STRAP(0xa1)
560: STRAP(0xa2)
561: STRAP(0xa3)
562: STRAP(0xa4)
563: STRAP(0xa5)
564: STRAP(0xa6)
565: STRAP(0xa7)
566: STRAP(0xa8)
567: STRAP(0xa9)
568: STRAP(0xaa)
569: STRAP(0xab)
570: STRAP(0xac)
571: STRAP(0xad)
572: STRAP(0xae)
573: STRAP(0xaf)
574: STRAP(0xb0)
575: STRAP(0xb1)
576: STRAP(0xb2)
577: STRAP(0xb3)
578: STRAP(0xb4)
579: STRAP(0xb5)
580: STRAP(0xb6)
581: STRAP(0xb7)
582: STRAP(0xb8)
583: STRAP(0xb9)
584: STRAP(0xba)
585: STRAP(0xbb)
586: STRAP(0xbc)
587: STRAP(0xbd)
588: STRAP(0xbe)
589: STRAP(0xbf)
590: STRAP(0xc0)
591: STRAP(0xc1)
592: STRAP(0xc2)
593: STRAP(0xc3)
594: STRAP(0xc4)
595: STRAP(0xc5)
596: STRAP(0xc6)
597: STRAP(0xc7)
598: STRAP(0xc8)
599: STRAP(0xc9)
600: STRAP(0xca)
601: STRAP(0xcb)
602: STRAP(0xcc)
603: STRAP(0xcd)
604: STRAP(0xce)
605: STRAP(0xcf)
606: STRAP(0xd0)
607: STRAP(0xd1)
608: STRAP(0xd2)
609: STRAP(0xd3)
610: STRAP(0xd4)
611: STRAP(0xd5)
612: STRAP(0xd6)
613: STRAP(0xd7)
614: STRAP(0xd8)
615: STRAP(0xd9)
616: STRAP(0xda)
617: STRAP(0xdb)
618: STRAP(0xdc)
619: STRAP(0xdd)
620: STRAP(0xde)
621: STRAP(0xdf)
622: STRAP(0xe0)
623: STRAP(0xe1)
624: STRAP(0xe2)
625: STRAP(0xe3)
626: STRAP(0xe4)
627: STRAP(0xe5)
628: STRAP(0xe6)
629: STRAP(0xe7)
630: STRAP(0xe8)
631: STRAP(0xe9)
632: STRAP(0xea)
633: STRAP(0xeb)
634: STRAP(0xec)
635: STRAP(0xed)
636: STRAP(0xee)
637: STRAP(0xef)
638: STRAP(0xf0)
639: STRAP(0xf1)
640: STRAP(0xf2)
641: STRAP(0xf3)
642: STRAP(0xf4)
643: STRAP(0xf5)
644: STRAP(0xf6)
645: STRAP(0xf7)
646: STRAP(0xf8)
647: STRAP(0xf9)
648: STRAP(0xfa)
649: STRAP(0xfb)
650: STRAP(0xfc)
651: STRAP(0xfd)
652: STRAP(0xfe)
653: STRAP(0xff)
654: #endif
655:
656: #if defined(SUN4C)
657: trapbase_sun4c:
658: /* trap 0 is special since we cannot receive it */
659: b dostart; nop; nop; nop ! 00 = reset (fake)
660: VTRAP(T_TEXTFAULT, memfault_sun4c) ! 01 = instr. fetch fault
661: TRAP(T_ILLINST) ! 02 = illegal instruction
662: TRAP(T_PRIVINST) ! 03 = privileged instruction
663: TRAP(T_FPDISABLED) ! 04 = fp instr, but EF bit off in psr
664: WINDOW_OF ! 05 = window overflow
665: WINDOW_UF ! 06 = window underflow
666: TRAP(T_ALIGN) ! 07 = address alignment error
667: VTRAP(T_FPE, fp_exception) ! 08 = fp exception
668: VTRAP(T_DATAFAULT, memfault_sun4c) ! 09 = data fetch fault
669: TRAP(T_TAGOF) ! 0a = tag overflow
670: UTRAP(0x0b)
671: UTRAP(0x0c)
672: UTRAP(0x0d)
673: UTRAP(0x0e)
674: UTRAP(0x0f)
675: UTRAP(0x10)
676: SOFTINT44C(1, IE_L1) ! 11 = level 1 interrupt
677: HARDINT44C(2) ! 12 = level 2 interrupt
678: HARDINT44C(3) ! 13 = level 3 interrupt
679: SOFTINT44C(4, IE_L4) ! 14 = level 4 interrupt
680: HARDINT44C(5) ! 15 = level 5 interrupt
681: SOFTINT44C(6, IE_L6) ! 16 = level 6 interrupt
682: HARDINT44C(7) ! 17 = level 7 interrupt
683: HARDINT44C(8) ! 18 = level 8 interrupt
684: HARDINT44C(9) ! 19 = level 9 interrupt
685: HARDINT44C(10) ! 1a = level 10 interrupt
686: HARDINT44C(11) ! 1b = level 11 interrupt
687: ZS_INTERRUPT44C ! 1c = level 12 (zs) interrupt
688: HARDINT44C(13) ! 1d = level 13 interrupt
689: HARDINT44C(14) ! 1e = level 14 interrupt
690: VTRAP(15, nmi_sun4c) ! 1f = nonmaskable interrupt
691: UTRAP(0x20)
692: UTRAP(0x21)
693: UTRAP(0x22)
694: UTRAP(0x23)
695: TRAP(T_CPDISABLED) ! 24 = coprocessor instr, EC bit off in psr
696: UTRAP(0x25)
697: UTRAP(0x26)
698: UTRAP(0x27)
699: TRAP(T_CPEXCEPTION) ! 28 = coprocessor exception
700: UTRAP(0x29)
701: UTRAP(0x2a)
702: UTRAP(0x2b)
703: UTRAP(0x2c)
704: UTRAP(0x2d)
705: UTRAP(0x2e)
706: UTRAP(0x2f)
707: UTRAP(0x30)
708: UTRAP(0x31)
709: UTRAP(0x32)
710: UTRAP(0x33)
711: UTRAP(0x34)
712: UTRAP(0x35)
713: UTRAP(0x36)
714: UTRAP(0x37)
715: UTRAP(0x38)
716: UTRAP(0x39)
717: UTRAP(0x3a)
718: UTRAP(0x3b)
719: UTRAP(0x3c)
720: UTRAP(0x3d)
721: UTRAP(0x3e)
722: UTRAP(0x3f)
723: UTRAP(0x40)
724: UTRAP(0x41)
725: UTRAP(0x42)
726: UTRAP(0x43)
727: UTRAP(0x44)
728: UTRAP(0x45)
729: UTRAP(0x46)
730: UTRAP(0x47)
731: UTRAP(0x48)
732: UTRAP(0x49)
733: UTRAP(0x4a)
734: UTRAP(0x4b)
735: UTRAP(0x4c)
736: UTRAP(0x4d)
737: UTRAP(0x4e)
738: UTRAP(0x4f)
739: UTRAP(0x50)
740: UTRAP(0x51)
741: UTRAP(0x52)
742: UTRAP(0x53)
743: UTRAP(0x54)
744: UTRAP(0x55)
745: UTRAP(0x56)
746: UTRAP(0x57)
747: UTRAP(0x58)
748: UTRAP(0x59)
749: UTRAP(0x5a)
750: UTRAP(0x5b)
751: UTRAP(0x5c)
752: UTRAP(0x5d)
753: UTRAP(0x5e)
754: UTRAP(0x5f)
755: UTRAP(0x60)
756: UTRAP(0x61)
757: UTRAP(0x62)
758: UTRAP(0x63)
759: UTRAP(0x64)
760: UTRAP(0x65)
761: UTRAP(0x66)
762: UTRAP(0x67)
763: UTRAP(0x68)
764: UTRAP(0x69)
765: UTRAP(0x6a)
766: UTRAP(0x6b)
767: UTRAP(0x6c)
768: UTRAP(0x6d)
769: UTRAP(0x6e)
770: UTRAP(0x6f)
771: UTRAP(0x70)
772: UTRAP(0x71)
773: UTRAP(0x72)
774: UTRAP(0x73)
775: UTRAP(0x74)
776: UTRAP(0x75)
777: UTRAP(0x76)
778: UTRAP(0x77)
779: UTRAP(0x78)
780: UTRAP(0x79)
781: UTRAP(0x7a)
782: UTRAP(0x7b)
783: UTRAP(0x7c)
784: UTRAP(0x7d)
785: UTRAP(0x7e)
786: UTRAP(0x7f)
787: SYSCALL ! 80 = sun syscall
788: BPT ! 81 = pseudo breakpoint instruction
789: TRAP(T_DIV0) ! 82 = divide by zero
790: TRAP(T_FLUSHWIN) ! 83 = flush windows
791: TRAP(T_CLEANWIN) ! 84 = provide clean windows
792: TRAP(T_RANGECHECK) ! 85 = ???
793: TRAP(T_FIXALIGN) ! 86 = fix up unaligned accesses
794: TRAP(T_INTOF) ! 87 = integer overflow
795: SYSCALL ! 88 = svr4 syscall
796: SYSCALL ! 89 = bsd syscall
797: BPT_KGDB_EXEC ! 8a = enter kernel gdb on kernel startup
798: STRAP(0x8b)
799: STRAP(0x8c)
800: STRAP(0x8d)
801: STRAP(0x8e)
802: STRAP(0x8f)
803: STRAP(0x90)
804: STRAP(0x91)
805: STRAP(0x92)
806: STRAP(0x93)
807: STRAP(0x94)
808: STRAP(0x95)
809: STRAP(0x96)
810: STRAP(0x97)
811: STRAP(0x98)
812: STRAP(0x99)
813: STRAP(0x9a)
814: STRAP(0x9b)
815: STRAP(0x9c)
816: STRAP(0x9d)
817: STRAP(0x9e)
818: STRAP(0x9f)
819: STRAP(0xa0)
820: STRAP(0xa1)
821: STRAP(0xa2)
822: STRAP(0xa3)
823: STRAP(0xa4)
824: STRAP(0xa5)
825: STRAP(0xa6)
826: STRAP(0xa7)
827: STRAP(0xa8)
828: STRAP(0xa9)
829: STRAP(0xaa)
830: STRAP(0xab)
831: STRAP(0xac)
832: STRAP(0xad)
833: STRAP(0xae)
834: STRAP(0xaf)
835: STRAP(0xb0)
836: STRAP(0xb1)
837: STRAP(0xb2)
838: STRAP(0xb3)
839: STRAP(0xb4)
840: STRAP(0xb5)
841: STRAP(0xb6)
842: STRAP(0xb7)
843: STRAP(0xb8)
844: STRAP(0xb9)
845: STRAP(0xba)
846: STRAP(0xbb)
847: STRAP(0xbc)
848: STRAP(0xbd)
849: STRAP(0xbe)
850: STRAP(0xbf)
851: STRAP(0xc0)
852: STRAP(0xc1)
853: STRAP(0xc2)
854: STRAP(0xc3)
855: STRAP(0xc4)
856: STRAP(0xc5)
857: STRAP(0xc6)
858: STRAP(0xc7)
859: STRAP(0xc8)
860: STRAP(0xc9)
861: STRAP(0xca)
862: STRAP(0xcb)
863: STRAP(0xcc)
864: STRAP(0xcd)
865: STRAP(0xce)
866: STRAP(0xcf)
867: STRAP(0xd0)
868: STRAP(0xd1)
869: STRAP(0xd2)
870: STRAP(0xd3)
871: STRAP(0xd4)
872: STRAP(0xd5)
873: STRAP(0xd6)
874: STRAP(0xd7)
875: STRAP(0xd8)
876: STRAP(0xd9)
877: STRAP(0xda)
878: STRAP(0xdb)
879: STRAP(0xdc)
880: STRAP(0xdd)
881: STRAP(0xde)
882: STRAP(0xdf)
883: STRAP(0xe0)
884: STRAP(0xe1)
885: STRAP(0xe2)
886: STRAP(0xe3)
887: STRAP(0xe4)
888: STRAP(0xe5)
889: STRAP(0xe6)
890: STRAP(0xe7)
891: STRAP(0xe8)
892: STRAP(0xe9)
893: STRAP(0xea)
894: STRAP(0xeb)
895: STRAP(0xec)
896: STRAP(0xed)
897: STRAP(0xee)
898: STRAP(0xef)
899: STRAP(0xf0)
900: STRAP(0xf1)
901: STRAP(0xf2)
902: STRAP(0xf3)
903: STRAP(0xf4)
904: STRAP(0xf5)
905: STRAP(0xf6)
906: STRAP(0xf7)
907: STRAP(0xf8)
908: STRAP(0xf9)
909: STRAP(0xfa)
910: STRAP(0xfb)
911: STRAP(0xfc)
912: STRAP(0xfd)
913: STRAP(0xfe)
914: STRAP(0xff)
1.1 deraadt 915: #endif
916:
1.52 pk 917: #if defined(SUN4M)
918: trapbase_sun4m:
1.1 deraadt 919: /* trap 0 is special since we cannot receive it */
920: b dostart; nop; nop; nop ! 00 = reset (fake)
1.52 pk 921: VTRAP(T_TEXTFAULT, memfault_sun4m) ! 01 = instr. fetch fault
1.1 deraadt 922: TRAP(T_ILLINST) ! 02 = illegal instruction
923: TRAP(T_PRIVINST) ! 03 = privileged instruction
924: TRAP(T_FPDISABLED) ! 04 = fp instr, but EF bit off in psr
925: WINDOW_OF ! 05 = window overflow
926: WINDOW_UF ! 06 = window underflow
927: TRAP(T_ALIGN) ! 07 = address alignment error
928: VTRAP(T_FPE, fp_exception) ! 08 = fp exception
1.52 pk 929: VTRAP(T_DATAFAULT, memfault_sun4m) ! 09 = data fetch fault
1.1 deraadt 930: TRAP(T_TAGOF) ! 0a = tag overflow
931: UTRAP(0x0b)
932: UTRAP(0x0c)
933: UTRAP(0x0d)
934: UTRAP(0x0e)
935: UTRAP(0x0f)
936: UTRAP(0x10)
1.52 pk 937: HARDINT4M(1) ! 11 = level 1 interrupt
938: HARDINT4M(2) ! 12 = level 2 interrupt
939: HARDINT4M(3) ! 13 = level 3 interrupt
940: HARDINT4M(4) ! 14 = level 4 interrupt
941: HARDINT4M(5) ! 15 = level 5 interrupt
942: HARDINT4M(6) ! 16 = level 6 interrupt
943: HARDINT4M(7) ! 17 = level 7 interrupt
944: HARDINT4M(8) ! 18 = level 8 interrupt
945: HARDINT4M(9) ! 19 = level 9 interrupt
946: HARDINT4M(10) ! 1a = level 10 interrupt
947: HARDINT4M(11) ! 1b = level 11 interrupt
948: ZS_INTERRUPT4M ! 1c = level 12 (zs) interrupt
949: HARDINT4M(13) ! 1d = level 13 interrupt
950: HARDINT4M(14) ! 1e = level 14 interrupt
951: VTRAP(15, nmi_sun4m) ! 1f = nonmaskable interrupt
1.1 deraadt 952: UTRAP(0x20)
1.190 pk 953: VTRAP(T_TEXTERROR, memfault_sun4m) ! 21 = instr. fetch error
1.1 deraadt 954: UTRAP(0x22)
955: UTRAP(0x23)
1.25 deraadt 956: TRAP(T_CPDISABLED) ! 24 = coprocessor instr, EC bit off in psr
1.1 deraadt 957: UTRAP(0x25)
958: UTRAP(0x26)
959: UTRAP(0x27)
1.25 deraadt 960: TRAP(T_CPEXCEPTION) ! 28 = coprocessor exception
1.190 pk 961: VTRAP(T_DATAERROR, memfault_sun4m) ! 29 = data fetch error
1.1 deraadt 962: UTRAP(0x2a)
1.52 pk 963: VTRAP(T_STOREBUFFAULT, memfault_sun4m) ! 2b = SuperSPARC store buffer fault
1.1 deraadt 964: UTRAP(0x2c)
965: UTRAP(0x2d)
966: UTRAP(0x2e)
967: UTRAP(0x2f)
968: UTRAP(0x30)
969: UTRAP(0x31)
970: UTRAP(0x32)
971: UTRAP(0x33)
972: UTRAP(0x34)
973: UTRAP(0x35)
1.25 deraadt 974: UTRAP(0x36)
1.1 deraadt 975: UTRAP(0x37)
976: UTRAP(0x38)
977: UTRAP(0x39)
978: UTRAP(0x3a)
979: UTRAP(0x3b)
980: UTRAP(0x3c)
981: UTRAP(0x3d)
982: UTRAP(0x3e)
983: UTRAP(0x3f)
1.25 deraadt 984: UTRAP(0x40)
1.1 deraadt 985: UTRAP(0x41)
986: UTRAP(0x42)
987: UTRAP(0x43)
988: UTRAP(0x44)
989: UTRAP(0x45)
990: UTRAP(0x46)
991: UTRAP(0x47)
992: UTRAP(0x48)
993: UTRAP(0x49)
994: UTRAP(0x4a)
995: UTRAP(0x4b)
996: UTRAP(0x4c)
997: UTRAP(0x4d)
998: UTRAP(0x4e)
999: UTRAP(0x4f)
1000: UTRAP(0x50)
1001: UTRAP(0x51)
1002: UTRAP(0x52)
1003: UTRAP(0x53)
1004: UTRAP(0x54)
1005: UTRAP(0x55)
1006: UTRAP(0x56)
1007: UTRAP(0x57)
1008: UTRAP(0x58)
1009: UTRAP(0x59)
1010: UTRAP(0x5a)
1011: UTRAP(0x5b)
1012: UTRAP(0x5c)
1013: UTRAP(0x5d)
1014: UTRAP(0x5e)
1015: UTRAP(0x5f)
1016: UTRAP(0x60)
1017: UTRAP(0x61)
1018: UTRAP(0x62)
1019: UTRAP(0x63)
1020: UTRAP(0x64)
1021: UTRAP(0x65)
1022: UTRAP(0x66)
1023: UTRAP(0x67)
1024: UTRAP(0x68)
1025: UTRAP(0x69)
1026: UTRAP(0x6a)
1027: UTRAP(0x6b)
1028: UTRAP(0x6c)
1029: UTRAP(0x6d)
1030: UTRAP(0x6e)
1031: UTRAP(0x6f)
1032: UTRAP(0x70)
1033: UTRAP(0x71)
1034: UTRAP(0x72)
1035: UTRAP(0x73)
1036: UTRAP(0x74)
1037: UTRAP(0x75)
1038: UTRAP(0x76)
1039: UTRAP(0x77)
1040: UTRAP(0x78)
1041: UTRAP(0x79)
1042: UTRAP(0x7a)
1043: UTRAP(0x7b)
1044: UTRAP(0x7c)
1045: UTRAP(0x7d)
1046: UTRAP(0x7e)
1047: UTRAP(0x7f)
1.3 deraadt 1048: SYSCALL ! 80 = sun syscall
1.1 deraadt 1049: BPT ! 81 = pseudo breakpoint instruction
1050: TRAP(T_DIV0) ! 82 = divide by zero
1051: TRAP(T_FLUSHWIN) ! 83 = flush windows
1052: TRAP(T_CLEANWIN) ! 84 = provide clean windows
1053: TRAP(T_RANGECHECK) ! 85 = ???
1054: TRAP(T_FIXALIGN) ! 86 = fix up unaligned accesses
1055: TRAP(T_INTOF) ! 87 = integer overflow
1.33 christos 1056: SYSCALL ! 88 = svr4 syscall
1.1 deraadt 1057: SYSCALL ! 89 = bsd syscall
1.33 christos 1058: BPT_KGDB_EXEC ! 8a = enter kernel gdb on kernel startup
1.171 pk 1059: TRAP(T_DBPAUSE) ! 8b = hold CPU for kernel debugger
1.1 deraadt 1060: STRAP(0x8c)
1061: STRAP(0x8d)
1062: STRAP(0x8e)
1063: STRAP(0x8f)
1064: STRAP(0x90)
1065: STRAP(0x91)
1066: STRAP(0x92)
1067: STRAP(0x93)
1068: STRAP(0x94)
1069: STRAP(0x95)
1070: STRAP(0x96)
1071: STRAP(0x97)
1072: STRAP(0x98)
1073: STRAP(0x99)
1074: STRAP(0x9a)
1075: STRAP(0x9b)
1076: STRAP(0x9c)
1077: STRAP(0x9d)
1078: STRAP(0x9e)
1079: STRAP(0x9f)
1080: STRAP(0xa0)
1081: STRAP(0xa1)
1082: STRAP(0xa2)
1083: STRAP(0xa3)
1084: STRAP(0xa4)
1085: STRAP(0xa5)
1086: STRAP(0xa6)
1087: STRAP(0xa7)
1088: STRAP(0xa8)
1089: STRAP(0xa9)
1090: STRAP(0xaa)
1091: STRAP(0xab)
1092: STRAP(0xac)
1093: STRAP(0xad)
1094: STRAP(0xae)
1095: STRAP(0xaf)
1096: STRAP(0xb0)
1097: STRAP(0xb1)
1098: STRAP(0xb2)
1099: STRAP(0xb3)
1100: STRAP(0xb4)
1101: STRAP(0xb5)
1102: STRAP(0xb6)
1103: STRAP(0xb7)
1104: STRAP(0xb8)
1105: STRAP(0xb9)
1106: STRAP(0xba)
1107: STRAP(0xbb)
1108: STRAP(0xbc)
1109: STRAP(0xbd)
1110: STRAP(0xbe)
1111: STRAP(0xbf)
1112: STRAP(0xc0)
1113: STRAP(0xc1)
1114: STRAP(0xc2)
1115: STRAP(0xc3)
1116: STRAP(0xc4)
1117: STRAP(0xc5)
1118: STRAP(0xc6)
1119: STRAP(0xc7)
1120: STRAP(0xc8)
1121: STRAP(0xc9)
1122: STRAP(0xca)
1123: STRAP(0xcb)
1124: STRAP(0xcc)
1125: STRAP(0xcd)
1126: STRAP(0xce)
1127: STRAP(0xcf)
1128: STRAP(0xd0)
1129: STRAP(0xd1)
1130: STRAP(0xd2)
1131: STRAP(0xd3)
1132: STRAP(0xd4)
1133: STRAP(0xd5)
1134: STRAP(0xd6)
1135: STRAP(0xd7)
1136: STRAP(0xd8)
1137: STRAP(0xd9)
1138: STRAP(0xda)
1139: STRAP(0xdb)
1140: STRAP(0xdc)
1141: STRAP(0xdd)
1142: STRAP(0xde)
1143: STRAP(0xdf)
1144: STRAP(0xe0)
1145: STRAP(0xe1)
1146: STRAP(0xe2)
1147: STRAP(0xe3)
1148: STRAP(0xe4)
1149: STRAP(0xe5)
1150: STRAP(0xe6)
1151: STRAP(0xe7)
1152: STRAP(0xe8)
1153: STRAP(0xe9)
1154: STRAP(0xea)
1155: STRAP(0xeb)
1156: STRAP(0xec)
1157: STRAP(0xed)
1158: STRAP(0xee)
1159: STRAP(0xef)
1160: STRAP(0xf0)
1161: STRAP(0xf1)
1162: STRAP(0xf2)
1163: STRAP(0xf3)
1164: STRAP(0xf4)
1165: STRAP(0xf5)
1166: STRAP(0xf6)
1167: STRAP(0xf7)
1168: STRAP(0xf8)
1169: STRAP(0xf9)
1170: STRAP(0xfa)
1171: STRAP(0xfb)
1172: STRAP(0xfc)
1173: STRAP(0xfd)
1174: STRAP(0xfe)
1175: STRAP(0xff)
1.52 pk 1176: #endif
1.1 deraadt 1177:
1.20 deraadt 1178: /*
1.52 pk 1179: * Pad the trap table to max page size.
1180: * Trap table size is 0x100 * 4instr * 4byte/instr = 4096 bytes;
1181: * need to .skip 4096 to pad to page size iff. the number of trap tables
1182: * defined above is odd.
1.20 deraadt 1183: */
1.65 mycroft 1184: #if (defined(SUN4) + defined(SUN4C) + defined(SUN4M)) % 2 == 1
1.20 deraadt 1185: .skip 4096
1.52 pk 1186: #endif
1.20 deraadt 1187:
1.173 pk 1188: /* redzones don't work currently in multi-processor mode */
1189: #if defined(DEBUG) && !defined(MULTIPROCESSOR)
1.1 deraadt 1190: /*
1191: * A hardware red zone is impossible. We simulate one in software by
1192: * keeping a `red zone' pointer; if %sp becomes less than this, we panic.
1193: * This is expensive and is only enabled when debugging.
1194: */
1.97 pk 1195:
1.99 pk 1196: /* `redzone' is located in the per-CPU information structure */
1.97 pk 1197: _redzone = CPUINFO_VA + CPUINFO_REDZONE
1198: .data
1.1 deraadt 1199: #define REDSTACK 2048 /* size of `panic: stack overflow' region */
1200: _redstack:
1201: .skip REDSTACK
1202: .text
1203: Lpanic_red:
1204: .asciz "stack overflow"
1.52 pk 1205: _ALIGN
1.1 deraadt 1206:
1207: /* set stack pointer redzone to base+minstack; alters base */
1208: #define SET_SP_REDZONE(base, tmp) \
1209: add base, REDSIZE, base; \
1210: sethi %hi(_redzone), tmp; \
1211: st base, [tmp + %lo(_redzone)]
1212:
1213: /* variant with a constant */
1214: #define SET_SP_REDZONE_CONST(const, tmp1, tmp2) \
1215: set (const) + REDSIZE, tmp1; \
1216: sethi %hi(_redzone), tmp2; \
1217: st tmp1, [tmp2 + %lo(_redzone)]
1218:
1.97 pk 1219: /* variant with a variable & offset */
1220: #define SET_SP_REDZONE_VAR(var, offset, tmp1, tmp2) \
1221: sethi %hi(var), tmp1; \
1222: ld [tmp1 + %lo(var)], tmp1; \
1223: sethi %hi(offset), tmp2; \
1224: add tmp1, tmp2, tmp1; \
1225: SET_SP_REDZONE(tmp1, tmp2)
1226:
1.1 deraadt 1227: /* check stack pointer against redzone (uses two temps) */
1228: #define CHECK_SP_REDZONE(t1, t2) \
1229: sethi %hi(_redzone), t1; \
1230: ld [t1 + %lo(_redzone)], t2; \
1231: cmp %sp, t2; /* if sp >= t2, not in red zone */ \
1232: bgeu 7f; nop; /* and can continue normally */ \
1233: /* move to panic stack */ \
1234: st %g0, [t1 + %lo(_redzone)]; \
1235: set _redstack + REDSTACK - 96, %sp; \
1236: /* prevent panic() from lowering ipl */ \
1.121 christos 1237: sethi %hi(_C_LABEL(panicstr)), t2; \
1.1 deraadt 1238: set Lpanic_red, t2; \
1.121 christos 1239: st t2, [t1 + %lo(_C_LABEL(panicstr))]; \
1.1 deraadt 1240: rd %psr, t1; /* t1 = splhigh() */ \
1241: or t1, PSR_PIL, t2; \
1242: wr t2, 0, %psr; \
1243: wr t2, PSR_ET, %psr; /* turn on traps */ \
1244: nop; nop; nop; \
1.4 deraadt 1245: save %sp, -CCFSZ, %sp; /* preserve current window */ \
1.1 deraadt 1246: sethi %hi(Lpanic_red), %o0; \
1.121 christos 1247: call _C_LABEL(panic); or %o0, %lo(Lpanic_red), %o0; \
1.1 deraadt 1248: 7:
1249:
1250: #else
1251:
1252: #define SET_SP_REDZONE(base, tmp)
1253: #define SET_SP_REDZONE_CONST(const, t1, t2)
1.98 pk 1254: #define SET_SP_REDZONE_VAR(var, offset, t1, t2)
1.1 deraadt 1255: #define CHECK_SP_REDZONE(t1, t2)
1.97 pk 1256: #endif /* DEBUG */
1.1 deraadt 1257:
1258: /*
1259: * The window code must verify user stack addresses before using them.
1260: * A user stack pointer is invalid if:
1261: * - it is not on an 8 byte boundary;
1262: * - its pages (a register window, being 64 bytes, can occupy
1263: * two pages) are not readable or writable.
1264: * We define three separate macros here for testing user stack addresses.
1265: *
1266: * PTE_OF_ADDR locates a PTE, branching to a `bad address'
1267: * handler if the stack pointer points into the hole in the
1268: * address space (i.e., top 3 bits are not either all 1 or all 0);
1269: * CMP_PTE_USER_READ compares the located PTE against `user read' mode;
1270: * CMP_PTE_USER_WRITE compares the located PTE against `user write' mode.
1271: * The compares give `equal' if read or write is OK.
1272: *
1273: * Note that the user stack pointer usually points into high addresses
1274: * (top 3 bits all 1), so that is what we check first.
1275: *
1276: * The code below also assumes that PTE_OF_ADDR is safe in a delay
1277: * slot; it is, at it merely sets its `pte' register to a temporary value.
1278: */
1.52 pk 1279: #if defined(SUN4) || defined(SUN4C)
1.1 deraadt 1280: /* input: addr, output: pte; aux: bad address label */
1.52 pk 1281: #define PTE_OF_ADDR4_4C(addr, pte, bad, page_offset) \
1.1 deraadt 1282: sra addr, PG_VSHIFT, pte; \
1283: cmp pte, -1; \
1.13 deraadt 1284: be,a 1f; andn addr, page_offset, pte; \
1.1 deraadt 1285: tst pte; \
1286: bne bad; EMPTY; \
1.13 deraadt 1287: andn addr, page_offset, pte; \
1.1 deraadt 1288: 1:
1289:
1290: /* input: pte; output: condition codes */
1.52 pk 1291: #define CMP_PTE_USER_READ4_4C(pte) \
1.1 deraadt 1292: lda [pte] ASI_PTE, pte; \
1293: srl pte, PG_PROTSHIFT, pte; \
1294: andn pte, (PG_W >> PG_PROTSHIFT), pte; \
1295: cmp pte, PG_PROTUREAD
1296:
1297: /* input: pte; output: condition codes */
1.52 pk 1298: #define CMP_PTE_USER_WRITE4_4C(pte) \
1.1 deraadt 1299: lda [pte] ASI_PTE, pte; \
1300: srl pte, PG_PROTSHIFT, pte; \
1301: cmp pte, PG_PROTUWRITE
1.9 deraadt 1302: #endif
1.1 deraadt 1303:
1304: /*
1.52 pk 1305: * The Sun4M does not have the memory hole that the 4C does. Thus all
1306: * we need to do here is clear the page offset from addr.
1307: */
1308: #if defined(SUN4M)
1309: #define PTE_OF_ADDR4M(addr, pte, bad, page_offset) \
1310: andn addr, page_offset, pte
1311:
1.94 pk 1312: /*
1313: * After obtaining the PTE through ASI_SRMMUFP, we read the Sync Fault
1314: * Status register. This is necessary on Hypersparcs which stores and
1315: * locks the fault address and status registers if the translation
1316: * fails (thanks to Chris Torek for finding this quirk).
1317: */
1318: #define CMP_PTE_USER_READ4M(pte, tmp) \
1.52 pk 1319: or pte, ASI_SRMMUFP_L3, pte; \
1320: lda [pte] ASI_SRMMUFP, pte; \
1.94 pk 1321: set SRMMU_SFSR, tmp; \
1.200 ! pk 1322: lda [tmp] ASI_SRMMU, %g0; \
! 1323: and pte, SRMMU_TETYPE, tmp; \
! 1324: /* Check for valid pte */ \
! 1325: cmp tmp, SRMMU_TEPTE; \
! 1326: bnz 8f; \
! 1327: and pte, SRMMU_PROT_MASK, pte; \
! 1328: /* check for one of: R_R, RW_RW, RX_RX and RWX_RWX */ \
! 1329: cmp pte, PPROT_X_X; \
! 1330: bcs,a 8f; \
! 1331: /* Now we have carry set if OK; turn it into Z bit */ \
! 1332: subxcc %g0, -1, %g0; \
! 1333: /* One more case to check: R_RW */ \
! 1334: cmp pte, PPROT_R_RW; \
1.59 pk 1335: 8:
1.52 pk 1336:
1.58 pk 1337:
1338: /* note: PTE bit 4 set implies no user writes */
1.94 pk 1339: #define CMP_PTE_USER_WRITE4M(pte, tmp) \
1.52 pk 1340: or pte, ASI_SRMMUFP_L3, pte; \
1341: lda [pte] ASI_SRMMUFP, pte; \
1.94 pk 1342: set SRMMU_SFSR, tmp; \
1343: lda [tmp] ASI_SRMMU, %g0; \
1.58 pk 1344: and pte, (SRMMU_TETYPE | 0x14), pte; \
1345: cmp pte, (SRMMU_TEPTE | PPROT_WRITE)
1.52 pk 1346: #endif /* 4m */
1347:
1348: #if defined(SUN4M) && !(defined(SUN4C) || defined(SUN4))
1.64 pk 1349:
1.62 pk 1350: #define PTE_OF_ADDR(addr, pte, bad, page_offset, label) \
1351: PTE_OF_ADDR4M(addr, pte, bad, page_offset)
1.94 pk 1352: #define CMP_PTE_USER_WRITE(pte, tmp, label) CMP_PTE_USER_WRITE4M(pte,tmp)
1353: #define CMP_PTE_USER_READ(pte, tmp, label) CMP_PTE_USER_READ4M(pte,tmp)
1.64 pk 1354:
1.52 pk 1355: #elif (defined(SUN4C) || defined(SUN4)) && !defined(SUN4M)
1.64 pk 1356:
1.62 pk 1357: #define PTE_OF_ADDR(addr, pte, bad, page_offset,label) \
1358: PTE_OF_ADDR4_4C(addr, pte, bad, page_offset)
1359: #define CMP_PTE_USER_WRITE(pte, tmp, label) CMP_PTE_USER_WRITE4_4C(pte)
1360: #define CMP_PTE_USER_READ(pte, tmp, label) CMP_PTE_USER_READ4_4C(pte)
1.64 pk 1361:
1.52 pk 1362: #else /* both defined, ugh */
1.64 pk 1363:
1.62 pk 1364: #define PTE_OF_ADDR(addr, pte, bad, page_offset, label) \
1365: label: b,a 2f; \
1366: PTE_OF_ADDR4M(addr, pte, bad, page_offset); \
1367: b,a 3f; \
1368: 2: \
1369: PTE_OF_ADDR4_4C(addr, pte, bad, page_offset); \
1370: 3:
1.52 pk 1371:
1.62 pk 1372: #define CMP_PTE_USER_READ(pte, tmp, label) \
1373: label: b,a 1f; \
1.94 pk 1374: CMP_PTE_USER_READ4M(pte,tmp); \
1.62 pk 1375: b,a 2f; \
1376: 1: \
1377: CMP_PTE_USER_READ4_4C(pte); \
1378: 2:
1.52 pk 1379:
1.62 pk 1380: #define CMP_PTE_USER_WRITE(pte, tmp, label) \
1381: label: b,a 1f; \
1.94 pk 1382: CMP_PTE_USER_WRITE4M(pte,tmp); \
1.62 pk 1383: b,a 2f; \
1384: 1: \
1385: CMP_PTE_USER_WRITE4_4C(pte); \
1386: 2:
1.52 pk 1387: #endif
1388:
1389:
1390: /*
1.1 deraadt 1391: * The calculations in PTE_OF_ADDR and CMP_PTE_USER_* are rather slow:
1392: * in particular, according to Gordon Irlam of the University of Adelaide
1393: * in Australia, these consume at least 18 cycles on an SS1 and 37 on an
1394: * SS2. Hence, we try to avoid them in the common case.
1395: *
1396: * A chunk of 64 bytes is on a single page if and only if:
1397: *
1.13 deraadt 1398: * ((base + 64 - 1) & ~(NBPG-1)) == (base & ~(NBPG-1))
1.1 deraadt 1399: *
1400: * Equivalently (and faster to test), the low order bits (base & 4095) must
1401: * be small enough so that the sum (base + 63) does not carry out into the
1402: * upper page-address bits, i.e.,
1403: *
1.13 deraadt 1404: * (base & (NBPG-1)) < (NBPG - 63)
1.1 deraadt 1405: *
1406: * so we allow testing that here. This macro is also assumed to be safe
1407: * in a delay slot (modulo overwriting its temporary).
1408: */
1.13 deraadt 1409: #define SLT_IF_1PAGE_RW(addr, tmp, page_offset) \
1410: and addr, page_offset, tmp; \
1411: sub page_offset, 62, page_offset; \
1412: cmp tmp, page_offset
1.1 deraadt 1413:
1414: /*
1415: * Every trap that enables traps must set up stack space.
1416: * If the trap is from user mode, this involves switching to the kernel
1417: * stack for the current process, and we must also set cpcb->pcb_uw
1418: * so that the window overflow handler can tell user windows from kernel
1419: * windows.
1420: *
1421: * The number of user windows is:
1422: *
1423: * cpcb->pcb_uw = (cpcb->pcb_wim - 1 - CWP) % nwindows
1424: *
1425: * (where pcb_wim = log2(current %wim) and CWP = low 5 bits of %psr).
1426: * We compute this expression by table lookup in uwtab[CWP - pcb_wim],
1427: * which has been set up as:
1428: *
1429: * for i in [-nwin+1 .. nwin-1]
1430: * uwtab[i] = (nwin - 1 - i) % nwin;
1431: *
1432: * (If you do not believe this works, try it for yourself.)
1433: *
1434: * We also keep one or two more tables:
1435: *
1436: * for i in 0..nwin-1
1437: * wmask[i] = 1 << ((i + 1) % nwindows);
1438: *
1439: * wmask[CWP] tells whether a `rett' would return into the invalid window.
1440: */
1441: .data
1442: .skip 32 ! alignment byte & negative indicies
1443: uwtab: .skip 32 ! u_char uwtab[-31..31];
1444: wmask: .skip 32 ! u_char wmask[0..31];
1445:
1446: .text
1447: /*
1448: * Things begin to grow uglier....
1449: *
1450: * Each trap handler may (always) be running in the trap window.
1451: * If this is the case, it cannot enable further traps until it writes
1452: * the register windows into the stack (or, if the stack is no good,
1453: * the current pcb).
1454: *
1455: * ASSUMPTIONS: TRAP_SETUP() is called with:
1456: * %l0 = %psr
1457: * %l1 = return pc
1458: * %l2 = return npc
1459: * %l3 = (some value that must not be altered)
1460: * which means we have 4 registers to work with.
1461: *
1462: * The `stackspace' argument is the number of stack bytes to allocate
1463: * for register-saving, and must be at least -64 (and typically more,
1464: * for global registers and %y).
1465: *
1466: * Trapframes should use -CCFSZ-80. (80 = sizeof(struct trapframe);
1467: * see trap.h. This basically means EVERYONE. Interrupt frames could
1468: * get away with less, but currently do not.)
1469: *
1470: * The basic outline here is:
1471: *
1472: * if (trap came from kernel mode) {
1473: * if (we are in the trap window)
1474: * save it away;
1475: * %sp = %fp - stackspace;
1476: * } else {
1477: * compute the number of user windows;
1478: * if (we are in the trap window)
1479: * save it away;
1480: * %sp = (top of kernel stack) - stackspace;
1481: * }
1482: *
1483: * Again, the number of user windows is:
1484: *
1485: * cpcb->pcb_uw = (cpcb->pcb_wim - 1 - CWP) % nwindows
1486: *
1487: * (where pcb_wim = log2(current %wim) and CWP is the low 5 bits of %psr),
1488: * and this is computed as `uwtab[CWP - pcb_wim]'.
1489: *
1490: * NOTE: if you change this code, you will have to look carefully
1491: * at the window overflow and underflow handlers and make sure they
1492: * have similar changes made as needed.
1493: */
1494: #define CALL_CLEAN_TRAP_WINDOW \
1495: sethi %hi(clean_trap_window), %l7; \
1496: jmpl %l7 + %lo(clean_trap_window), %l4; \
1497: mov %g7, %l7 /* save %g7 in %l7 for clean_trap_window */
1498:
1499: #define TRAP_SETUP(stackspace) \
1.173 pk 1500: TRAP_TRACE(%l3,%l5); \
1.1 deraadt 1501: rd %wim, %l4; \
1502: mov 1, %l5; \
1503: sll %l5, %l0, %l5; \
1504: btst PSR_PS, %l0; \
1505: bz 1f; \
1506: btst %l5, %l4; \
1507: /* came from kernel mode; cond codes indicate trap window */ \
1508: bz,a 3f; \
1509: add %fp, stackspace, %sp; /* want to just set %sp */ \
1510: CALL_CLEAN_TRAP_WINDOW; /* but maybe need to clean first */ \
1511: b 3f; \
1512: add %fp, stackspace, %sp; \
1513: 1: \
1514: /* came from user mode: compute pcb_nw */ \
1.111 pk 1515: sethi %hi(cpcb), %l6; \
1516: ld [%l6 + %lo(cpcb)], %l6; \
1.1 deraadt 1517: ld [%l6 + PCB_WIM], %l5; \
1518: and %l0, 31, %l4; \
1519: sub %l4, %l5, %l5; \
1520: set uwtab, %l4; \
1521: ldub [%l4 + %l5], %l5; \
1522: st %l5, [%l6 + PCB_UW]; \
1523: /* cond codes still indicate whether in trap window */ \
1524: bz,a 2f; \
1.13 deraadt 1525: sethi %hi(USPACE+(stackspace)), %l5; \
1.1 deraadt 1526: /* yes, in trap window; must clean it */ \
1527: CALL_CLEAN_TRAP_WINDOW; \
1.111 pk 1528: sethi %hi(cpcb), %l6; \
1529: ld [%l6 + %lo(cpcb)], %l6; \
1.13 deraadt 1530: sethi %hi(USPACE+(stackspace)), %l5; \
1.1 deraadt 1531: 2: \
1532: /* trap window is (now) clean: set %sp */ \
1.13 deraadt 1533: or %l5, %lo(USPACE+(stackspace)), %l5; \
1.1 deraadt 1534: add %l6, %l5, %sp; \
1535: SET_SP_REDZONE(%l6, %l5); \
1536: 3: \
1537: CHECK_SP_REDZONE(%l6, %l5)
1538:
1539: /*
1540: * Interrupt setup is almost exactly like trap setup, but we need to
1541: * go to the interrupt stack if (a) we came from user mode or (b) we
1542: * came from kernel mode on the kernel stack.
1543: */
1.142 mrg 1544: #if defined(MULTIPROCESSOR)
1.98 pk 1545: /*
1546: * SMP kernels: read `eintstack' from cpuinfo structure. Since the
1547: * location of the interrupt stack is not known in advance, we need
1548: * to check the current %fp against both ends of the stack space.
1549: */
1.97 pk 1550: #define INTR_SETUP(stackspace) \
1.173 pk 1551: TRAP_TRACE(%l3,%l5); \
1.97 pk 1552: rd %wim, %l4; \
1553: mov 1, %l5; \
1554: sll %l5, %l0, %l5; \
1555: btst PSR_PS, %l0; \
1556: bz 1f; \
1557: btst %l5, %l4; \
1558: /* came from kernel mode; cond codes still indicate trap window */ \
1559: bz,a 0f; \
1.101 pk 1560: sethi %hi(_EINTSTACKP), %l7; \
1.97 pk 1561: CALL_CLEAN_TRAP_WINDOW; \
1.101 pk 1562: sethi %hi(_EINTSTACKP), %l7; \
1.97 pk 1563: 0: /* now if not intstack > %fp >= eintstack, we were on the kernel stack */ \
1.101 pk 1564: ld [%l7 + %lo(_EINTSTACKP)], %l7; \
1.97 pk 1565: cmp %fp, %l7; \
1566: bge,a 3f; /* %fp >= eintstack */ \
1567: add %l7, stackspace, %sp; /* so switch to intstack */ \
1568: sethi %hi(INT_STACK_SIZE), %l6; \
1.98 pk 1569: sub %l7, %l6, %l6; \
1570: cmp %fp, %l6; \
1.97 pk 1571: blu,a 3f; /* %fp < intstack */ \
1572: add %l7, stackspace, %sp; /* so switch to intstack */ \
1573: b 4f; \
1574: add %fp, stackspace, %sp; /* else stay on intstack */ \
1575: 1: \
1576: /* came from user mode: compute pcb_nw */ \
1.111 pk 1577: sethi %hi(cpcb), %l6; \
1578: ld [%l6 + %lo(cpcb)], %l6; \
1.97 pk 1579: ld [%l6 + PCB_WIM], %l5; \
1580: and %l0, 31, %l4; \
1581: sub %l4, %l5, %l5; \
1582: set uwtab, %l4; \
1583: ldub [%l4 + %l5], %l5; \
1584: st %l5, [%l6 + PCB_UW]; \
1585: /* cond codes still indicate whether in trap window */ \
1586: bz,a 2f; \
1.101 pk 1587: sethi %hi(_EINTSTACKP), %l7; \
1.97 pk 1588: /* yes, in trap window; must save regs */ \
1589: CALL_CLEAN_TRAP_WINDOW; \
1.101 pk 1590: sethi %hi(_EINTSTACKP), %l7; \
1.97 pk 1591: 2: \
1.101 pk 1592: ld [%l7 + %lo(_EINTSTACKP)], %l7; \
1.97 pk 1593: add %l7, stackspace, %sp; \
1594: 3: \
1.101 pk 1595: SET_SP_REDZONE_VAR(_EINTSTACKP, -INT_STACK_SIZE, %l6, %l5); \
1.97 pk 1596: 4: \
1597: CHECK_SP_REDZONE(%l6, %l5)
1.98 pk 1598:
1.97 pk 1599: #else /* MULTIPROCESSOR */
1.98 pk 1600:
1.1 deraadt 1601: #define INTR_SETUP(stackspace) \
1.173 pk 1602: TRAP_TRACE(%l3,%l5); \
1.1 deraadt 1603: rd %wim, %l4; \
1604: mov 1, %l5; \
1605: sll %l5, %l0, %l5; \
1606: btst PSR_PS, %l0; \
1607: bz 1f; \
1608: btst %l5, %l4; \
1609: /* came from kernel mode; cond codes still indicate trap window */ \
1610: bz,a 0f; \
1.111 pk 1611: sethi %hi(_C_LABEL(eintstack)), %l7; \
1.1 deraadt 1612: CALL_CLEAN_TRAP_WINDOW; \
1.111 pk 1613: sethi %hi(_C_LABEL(eintstack)), %l7; \
1.1 deraadt 1614: 0: /* now if %fp >= eintstack, we were on the kernel stack */ \
1615: cmp %fp, %l7; \
1616: bge,a 3f; \
1617: add %l7, stackspace, %sp; /* so switch to intstack */ \
1618: b 4f; \
1619: add %fp, stackspace, %sp; /* else stay on intstack */ \
1620: 1: \
1621: /* came from user mode: compute pcb_nw */ \
1.111 pk 1622: sethi %hi(cpcb), %l6; \
1623: ld [%l6 + %lo(cpcb)], %l6; \
1.1 deraadt 1624: ld [%l6 + PCB_WIM], %l5; \
1625: and %l0, 31, %l4; \
1626: sub %l4, %l5, %l5; \
1627: set uwtab, %l4; \
1628: ldub [%l4 + %l5], %l5; \
1629: st %l5, [%l6 + PCB_UW]; \
1630: /* cond codes still indicate whether in trap window */ \
1631: bz,a 2f; \
1.111 pk 1632: sethi %hi(_C_LABEL(eintstack)), %l7; \
1.1 deraadt 1633: /* yes, in trap window; must save regs */ \
1634: CALL_CLEAN_TRAP_WINDOW; \
1.111 pk 1635: sethi %hi(_C_LABEL(eintstack)), %l7; \
1.1 deraadt 1636: 2: \
1637: add %l7, stackspace, %sp; \
1638: 3: \
1.111 pk 1639: SET_SP_REDZONE_CONST(_C_LABEL(intstack), %l6, %l5); \
1.1 deraadt 1640: 4: \
1641: CHECK_SP_REDZONE(%l6, %l5)
1.97 pk 1642: #endif /* MULTIPROCESSOR */
1.1 deraadt 1643:
1644: /*
1645: * Handler for making the trap window shiny clean.
1646: *
1647: * On entry:
1648: * cpcb->pcb_nw = number of user windows
1649: * %l0 = %psr
1650: * %l1 must not be clobbered
1651: * %l2 must not be clobbered
1652: * %l3 must not be clobbered
1653: * %l4 = address for `return'
1654: * %l7 = saved %g7 (we put this in a delay slot above, to save work)
1655: *
1656: * On return:
1657: * %wim has changed, along with cpcb->pcb_wim
1658: * %g7 has been restored
1659: *
1660: * Normally, we push only one window.
1661: */
1662: clean_trap_window:
1663: mov %g5, %l5 ! save %g5
1664: mov %g6, %l6 ! ... and %g6
1665: /* mov %g7, %l7 ! ... and %g7 (already done for us) */
1.111 pk 1666: sethi %hi(cpcb), %g6 ! get current pcb
1667: ld [%g6 + %lo(cpcb)], %g6
1.1 deraadt 1668:
1669: /* Figure out whether it is a user window (cpcb->pcb_uw > 0). */
1670: ld [%g6 + PCB_UW], %g7
1671: deccc %g7
1672: bge ctw_user
1673: save %g0, %g0, %g0 ! in any case, enter window to save
1674:
1675: /* The window to be pushed is a kernel window. */
1676: std %l0, [%sp + (0*8)]
1677: ctw_merge:
1678: std %l2, [%sp + (1*8)]
1679: std %l4, [%sp + (2*8)]
1680: std %l6, [%sp + (3*8)]
1681: std %i0, [%sp + (4*8)]
1682: std %i2, [%sp + (5*8)]
1683: std %i4, [%sp + (6*8)]
1684: std %i6, [%sp + (7*8)]
1685:
1686: /* Set up new window invalid mask, and update cpcb->pcb_wim. */
1687: rd %psr, %g7 ! g7 = (junk << 5) + new_cwp
1688: mov 1, %g5 ! g5 = 1 << new_cwp;
1689: sll %g5, %g7, %g5
1690: wr %g5, 0, %wim ! setwim(g5);
1691: and %g7, 31, %g7 ! cpcb->pcb_wim = g7 & 31;
1.111 pk 1692: sethi %hi(cpcb), %g6 ! re-get current pcb
1693: ld [%g6 + %lo(cpcb)], %g6
1.1 deraadt 1694: st %g7, [%g6 + PCB_WIM]
1695: nop
1696: restore ! back to trap window
1697:
1698: mov %l5, %g5 ! restore g5
1699: mov %l6, %g6 ! ... and g6
1700: jmp %l4 + 8 ! return to caller
1701: mov %l7, %g7 ! ... and g7
1702: /* NOTREACHED */
1703:
1704: ctw_user:
1705: /*
1706: * The window to be pushed is a user window.
1707: * We must verify the stack pointer (alignment & permissions).
1708: * See comments above definition of PTE_OF_ADDR.
1709: */
1710: st %g7, [%g6 + PCB_UW] ! cpcb->pcb_uw--;
1711: btst 7, %sp ! if not aligned,
1712: bne ctw_invalid ! choke on it
1713: EMPTY
1.13 deraadt 1714:
1.111 pk 1715: sethi %hi(_C_LABEL(pgofset)), %g6 ! trash %g6=curpcb
1716: ld [%g6 + %lo(_C_LABEL(pgofset))], %g6
1.62 pk 1717: PTE_OF_ADDR(%sp, %g7, ctw_invalid, %g6, NOP_ON_4M_1)
1718: CMP_PTE_USER_WRITE(%g7, %g5, NOP_ON_4M_2) ! likewise if not writable
1.1 deraadt 1719: bne ctw_invalid
1720: EMPTY
1.52 pk 1721: /* Note side-effect of SLT_IF_1PAGE_RW: decrements %g6 by 62 */
1.13 deraadt 1722: SLT_IF_1PAGE_RW(%sp, %g7, %g6)
1.1 deraadt 1723: bl,a ctw_merge ! all ok if only 1
1724: std %l0, [%sp]
1725: add %sp, 7*8, %g5 ! check last addr too
1.154 thorpej 1726: add %g6, 62, %g6 /* restore %g6 to `pgofset' */
1.62 pk 1727: PTE_OF_ADDR(%g5, %g7, ctw_invalid, %g6, NOP_ON_4M_3)
1728: CMP_PTE_USER_WRITE(%g7, %g6, NOP_ON_4M_4)
1.1 deraadt 1729: be,a ctw_merge ! all ok: store <l0,l1> and merge
1730: std %l0, [%sp]
1731:
1732: /*
1733: * The window we wanted to push could not be pushed.
1734: * Instead, save ALL user windows into the pcb.
1735: * We will notice later that we did this, when we
1736: * get ready to return from our trap or syscall.
1737: *
1738: * The code here is run rarely and need not be optimal.
1739: */
1740: ctw_invalid:
1741: /*
1742: * Reread cpcb->pcb_uw. We decremented this earlier,
1743: * so it is off by one.
1744: */
1.111 pk 1745: sethi %hi(cpcb), %g6 ! re-get current pcb
1746: ld [%g6 + %lo(cpcb)], %g6
1.13 deraadt 1747:
1.1 deraadt 1748: ld [%g6 + PCB_UW], %g7 ! (number of user windows) - 1
1749: add %g6, PCB_RW, %g5
1750:
1751: /* save g7+1 windows, starting with the current one */
1752: 1: ! do {
1753: std %l0, [%g5 + (0*8)] ! rw->rw_local[0] = l0;
1754: std %l2, [%g5 + (1*8)] ! ...
1755: std %l4, [%g5 + (2*8)]
1756: std %l6, [%g5 + (3*8)]
1757: std %i0, [%g5 + (4*8)]
1758: std %i2, [%g5 + (5*8)]
1759: std %i4, [%g5 + (6*8)]
1760: std %i6, [%g5 + (7*8)]
1761: deccc %g7 ! if (n > 0) save(), rw++;
1762: bge,a 1b ! } while (--n >= 0);
1763: save %g5, 64, %g5
1764:
1765: /* stash sp for bottommost window */
1766: st %sp, [%g5 + 64 + (7*8)]
1767:
1768: /* set up new wim */
1769: rd %psr, %g7 ! g7 = (junk << 5) + new_cwp;
1770: mov 1, %g5 ! g5 = 1 << new_cwp;
1771: sll %g5, %g7, %g5
1772: wr %g5, 0, %wim ! wim = g5;
1773: and %g7, 31, %g7
1774: st %g7, [%g6 + PCB_WIM] ! cpcb->pcb_wim = new_cwp;
1775:
1776: /* fix up pcb fields */
1777: ld [%g6 + PCB_UW], %g7 ! n = cpcb->pcb_uw;
1778: add %g7, 1, %g5
1779: st %g5, [%g6 + PCB_NSAVED] ! cpcb->pcb_nsaved = n + 1;
1780: st %g0, [%g6 + PCB_UW] ! cpcb->pcb_uw = 0;
1781:
1782: /* return to trap window */
1783: 1: deccc %g7 ! do {
1784: bge 1b ! restore();
1785: restore ! } while (--n >= 0);
1786:
1787: mov %l5, %g5 ! restore g5, g6, & g7, and return
1788: mov %l6, %g6
1789: jmp %l4 + 8
1790: mov %l7, %g7
1791: /* NOTREACHED */
1792:
1793:
1794: /*
1795: * Each memory access (text or data) fault, from user or kernel mode,
1796: * comes here. We read the error register and figure out what has
1797: * happened.
1798: *
1799: * This cannot be done from C code since we must not enable traps (and
1800: * hence may not use the `save' instruction) until we have decided that
1801: * the error is or is not an asynchronous one that showed up after a
1802: * synchronous error, but which must be handled before the sync err.
1803: *
1804: * Most memory faults are user mode text or data faults, which can cause
1805: * signal delivery or ptracing, for which we must build a full trapframe.
1806: * It does not seem worthwhile to work to avoid this in the other cases,
1807: * so we store all the %g registers on the stack immediately.
1808: *
1809: * On entry:
1810: * %l0 = %psr
1811: * %l1 = return pc
1812: * %l2 = return npc
1813: * %l3 = T_TEXTFAULT or T_DATAFAULT
1814: *
1815: * Internal:
1816: * %l4 = %y, until we call mem_access_fault (then onto trapframe)
1817: * %l5 = IE_reg_addr, if async mem error
1818: *
1819: */
1.52 pk 1820:
1821: #if defined(SUN4)
1822: memfault_sun4:
1.1 deraadt 1823: TRAP_SETUP(-CCFSZ-80)
1.111 pk 1824: INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
1.1 deraadt 1825:
1826: st %g1, [%sp + CCFSZ + 20] ! save g1
1827: rd %y, %l4 ! save y
1828:
1.19 deraadt 1829: /*
1830: * registers:
1831: * memerr.ctrl = memory error control reg., error if 0x80 set
1832: * memerr.vaddr = address of memory error
1833: * buserr = basically just like sun4c sync error reg but
1834: * no SER_WRITE bit (have to figure out from code).
1835: */
1.111 pk 1836: set _C_LABEL(par_err_reg), %o0 ! memerr ctrl addr -- XXX mapped?
1.20 deraadt 1837: ld [%o0], %o0 ! get it
1.19 deraadt 1838: std %g2, [%sp + CCFSZ + 24] ! save g2, g3
1839: ld [%o0], %o1 ! memerr ctrl register
1840: inc 4, %o0 ! now VA of memerr vaddr register
1841: std %g4, [%sp + CCFSZ + 32] ! (sneak g4,g5 in here)
1842: ld [%o0], %o2 ! memerr virt addr
1843: st %g0, [%o0] ! NOTE: this clears latching!!!
1844: btst ME_REG_IERR, %o1 ! memory error?
1845: ! XXX this value may not be correct
1846: ! as I got some parity errors and the
1847: ! correct bits were not on?
1848: std %g6, [%sp + CCFSZ + 40]
1.52 pk 1849: bz,a 0f ! no, just a regular fault
1.19 deraadt 1850: wr %l0, PSR_ET, %psr ! (and reenable traps)
1851:
1852: /* memory error = death for now XXX */
1853: clr %o3
1854: clr %o4
1.111 pk 1855: call _C_LABEL(memerr4_4c) ! memerr(0, ser, sva, 0, 0)
1.19 deraadt 1856: clr %o0
1.111 pk 1857: call _C_LABEL(prom_halt)
1.19 deraadt 1858: nop
1859:
1.52 pk 1860: 0:
1.19 deraadt 1861: /*
1862: * have to make SUN4 emulate SUN4C. 4C code expects
1863: * SER in %o1 and the offending VA in %o2, everything else is ok.
1864: * (must figure out if SER_WRITE should be set)
1865: */
1866: set AC_BUS_ERR, %o0 ! bus error register
1867: cmp %l3, T_TEXTFAULT ! text fault always on PC
1.50 pk 1868: be normal_mem_fault ! go
1.21 deraadt 1869: lduba [%o0] ASI_CONTROL, %o1 ! get its value
1.19 deraadt 1870:
1871: #define STORE_BIT 21 /* bit that indicates a store instruction for sparc */
1872: ld [%l1], %o3 ! offending instruction in %o3 [l1=pc]
1873: srl %o3, STORE_BIT, %o3 ! get load/store bit (wont fit simm13)
1874: btst 1, %o3 ! test for store operation
1875:
1876: bz normal_mem_fault ! if (z) is a load (so branch)
1877: sethi %hi(SER_WRITE), %o5 ! damn SER_WRITE wont fit simm13
1878: ! or %lo(SER_WRITE), %o5, %o5! not necessary since %lo is zero
1879: or %o5, %o1, %o1 ! set SER_WRITE
1880: #if defined(SUN4C) || defined(SUN4M)
1.52 pk 1881: ba,a normal_mem_fault
1882: !!nop ! XXX make efficient later
1.19 deraadt 1883: #endif /* SUN4C || SUN4M */
1884: #endif /* SUN4 */
1.52 pk 1885:
1886: memfault_sun4c:
1887: #if defined(SUN4C)
1888: TRAP_SETUP(-CCFSZ-80)
1.111 pk 1889: INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
1.52 pk 1890:
1891: st %g1, [%sp + CCFSZ + 20] ! save g1
1892: rd %y, %l4 ! save y
1893:
1894: /*
1895: * We know about the layout of the error registers here.
1896: * addr reg
1897: * ---- ---
1898: * a AC_SYNC_ERR
1899: * a+4 AC_SYNC_VA
1900: * a+8 AC_ASYNC_ERR
1901: * a+12 AC_ASYNC_VA
1902: */
1.19 deraadt 1903:
1.1 deraadt 1904: #if AC_SYNC_ERR + 4 != AC_SYNC_VA || \
1905: AC_SYNC_ERR + 8 != AC_ASYNC_ERR || AC_SYNC_ERR + 12 != AC_ASYNC_VA
1906: help help help ! I, I, I wanna be a lifeguard
1907: #endif
1908: set AC_SYNC_ERR, %o0
1909: std %g2, [%sp + CCFSZ + 24] ! save g2, g3
1910: lda [%o0] ASI_CONTROL, %o1 ! sync err reg
1911: inc 4, %o0
1912: std %g4, [%sp + CCFSZ + 32] ! (sneak g4,g5 in here)
1913: lda [%o0] ASI_CONTROL, %o2 ! sync virt addr
1914: btst SER_MEMERR, %o1 ! memory error?
1915: std %g6, [%sp + CCFSZ + 40]
1916: bz,a normal_mem_fault ! no, just a regular fault
1917: wr %l0, PSR_ET, %psr ! (and reenable traps)
1918:
1919: /*
1920: * We got a synchronous memory error. It could be one that
1921: * happened because there were two stores in a row, and the
1922: * first went into the write buffer, and the second caused this
1923: * synchronous trap; so there could now be a pending async error.
1924: * This is in fact the case iff the two va's differ.
1925: */
1926: inc 4, %o0
1927: lda [%o0] ASI_CONTROL, %o3 ! async err reg
1928: inc 4, %o0
1929: lda [%o0] ASI_CONTROL, %o4 ! async virt addr
1930: cmp %o2, %o4
1931: be,a 1f ! no, not an async err
1932: wr %l0, PSR_ET, %psr ! (and reenable traps)
1933:
1934: /*
1935: * Handle the async error; ignore the sync error for now
1936: * (we may end up getting it again, but so what?).
1937: * This code is essentially the same as that at `nmi' below,
1938: * but the register usage is different and we cannot merge.
1939: */
1.62 pk 1940: sethi %hi(INTRREG_VA), %l5 ! ienab_bic(IE_ALLIE);
1941: ldub [%l5 + %lo(INTRREG_VA)], %o0
1.1 deraadt 1942: andn %o0, IE_ALLIE, %o0
1.62 pk 1943: stb %o0, [%l5 + %lo(INTRREG_VA)]
1.1 deraadt 1944:
1945: /*
1946: * Now reenable traps and call C code.
1947: * %o1 through %o4 still hold the error reg contents.
1948: * If memerr() returns, return from the trap.
1949: */
1950: wr %l0, PSR_ET, %psr
1.111 pk 1951: call _C_LABEL(memerr4_4c) ! memerr(0, ser, sva, aer, ava)
1.1 deraadt 1952: clr %o0
1953:
1954: ld [%sp + CCFSZ + 20], %g1 ! restore g1 through g7
1955: wr %l0, 0, %psr ! and disable traps, 3 instr delay
1956: ldd [%sp + CCFSZ + 24], %g2
1957: ldd [%sp + CCFSZ + 32], %g4
1958: ldd [%sp + CCFSZ + 40], %g6
1959: /* now safe to set IE_ALLIE again */
1.62 pk 1960: ldub [%l5 + %lo(INTRREG_VA)], %o1
1.1 deraadt 1961: or %o1, IE_ALLIE, %o1
1.62 pk 1962: stb %o1, [%l5 + %lo(INTRREG_VA)]
1.1 deraadt 1963: b return_from_trap
1964: wr %l4, 0, %y ! restore y
1965:
1966: /*
1967: * Trap was a synchronous memory error.
1968: * %o1 through %o4 still hold the error reg contents.
1969: */
1970: 1:
1.111 pk 1971: call _C_LABEL(memerr4_4c) ! memerr(1, ser, sva, aer, ava)
1.1 deraadt 1972: mov 1, %o0
1973:
1974: ld [%sp + CCFSZ + 20], %g1 ! restore g1 through g7
1975: ldd [%sp + CCFSZ + 24], %g2
1976: ldd [%sp + CCFSZ + 32], %g4
1977: ldd [%sp + CCFSZ + 40], %g6
1978: wr %l4, 0, %y ! restore y
1979: b return_from_trap
1980: wr %l0, 0, %psr
1981: /* NOTREACHED */
1.52 pk 1982: #endif /* SUN4C */
1983:
1984: #if defined(SUN4M)
1985: memfault_sun4m:
1.94 pk 1986: sethi %hi(CPUINFO_VA), %l4
1987: ld [%l4 + %lo(CPUINFO_VA+CPUINFO_GETSYNCFLT)], %l5
1988: jmpl %l5, %l7
1989: or %l4, %lo(CPUINFO_SYNCFLTDUMP), %l4
1.52 pk 1990: TRAP_SETUP(-CCFSZ-80)
1.111 pk 1991: INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
1.52 pk 1992:
1993: st %g1, [%sp + CCFSZ + 20] ! save g1
1994: rd %y, %l4 ! save y
1995:
1996: std %g2, [%sp + CCFSZ + 24] ! save g2, g3
1.62 pk 1997: std %g4, [%sp + CCFSZ + 32] ! save g4, g5
1.94 pk 1998: std %g6, [%sp + CCFSZ + 40] ! sneak in g6, g7
1.52 pk 1999:
1.94 pk 2000: ! retrieve sync fault status/address
2001: sethi %hi(CPUINFO_VA+CPUINFO_SYNCFLTDUMP), %o0
2002: ld [%o0 + %lo(CPUINFO_VA+CPUINFO_SYNCFLTDUMP)], %o1
2003: ld [%o0 + %lo(CPUINFO_VA+CPUINFO_SYNCFLTDUMP+4)], %o2
1.52 pk 2004:
2005: wr %l0, PSR_ET, %psr ! reenable traps
2006:
2007: /* Finish stackframe, call C trap handler */
2008: std %l0, [%sp + CCFSZ + 0] ! set tf.tf_psr, tf.tf_pc
2009: mov %l3, %o0 ! (argument: type)
2010: st %l2, [%sp + CCFSZ + 8] ! set tf.tf_npc
2011: st %l4, [%sp + CCFSZ + 12] ! set tf.tf_y
2012: std %i0, [%sp + CCFSZ + 48] ! tf.tf_out[0], etc
2013: std %i2, [%sp + CCFSZ + 56]
2014: std %i4, [%sp + CCFSZ + 64]
2015: std %i6, [%sp + CCFSZ + 72]
1.111 pk 2016: ! mem_access_fault(type,sfsr,sfva,&tf);
2017: call _C_LABEL(mem_access_fault4m)
1.94 pk 2018: add %sp, CCFSZ, %o3 ! (argument: &tf)
1.52 pk 2019:
2020: ldd [%sp + CCFSZ + 0], %l0 ! load new values
2021: ldd [%sp + CCFSZ + 8], %l2
2022: wr %l3, 0, %y
2023: ld [%sp + CCFSZ + 20], %g1
2024: ldd [%sp + CCFSZ + 24], %g2
2025: ldd [%sp + CCFSZ + 32], %g4
2026: ldd [%sp + CCFSZ + 40], %g6
2027: ldd [%sp + CCFSZ + 48], %i0
2028: ldd [%sp + CCFSZ + 56], %i2
2029: ldd [%sp + CCFSZ + 64], %i4
2030: ldd [%sp + CCFSZ + 72], %i6
2031:
2032: b return_from_trap ! go return
2033: wr %l0, 0, %psr ! (but first disable traps again)
2034: #endif /* SUN4M */
1.1 deraadt 2035:
2036: normal_mem_fault:
2037: /*
2038: * Trap was some other error; call C code to deal with it.
2039: * Must finish trap frame (psr,pc,npc,%y,%o0..%o7) in case
2040: * we decide to deliver a signal or ptrace the process.
2041: * %g1..%g7 were already set up above.
2042: */
2043: std %l0, [%sp + CCFSZ + 0] ! set tf.tf_psr, tf.tf_pc
2044: mov %l3, %o0 ! (argument: type)
2045: st %l2, [%sp + CCFSZ + 8] ! set tf.tf_npc
2046: st %l4, [%sp + CCFSZ + 12] ! set tf.tf_y
2047: mov %l1, %o3 ! (argument: pc)
2048: std %i0, [%sp + CCFSZ + 48] ! tf.tf_out[0], etc
2049: std %i2, [%sp + CCFSZ + 56]
2050: mov %l0, %o4 ! (argument: psr)
2051: std %i4, [%sp + CCFSZ + 64]
2052: std %i6, [%sp + CCFSZ + 72]
1.111 pk 2053: call _C_LABEL(mem_access_fault)! mem_access_fault(type, ser, sva,
1.1 deraadt 2054: ! pc, psr, &tf);
2055: add %sp, CCFSZ, %o5 ! (argument: &tf)
2056:
2057: ldd [%sp + CCFSZ + 0], %l0 ! load new values
2058: ldd [%sp + CCFSZ + 8], %l2
2059: wr %l3, 0, %y
2060: ld [%sp + CCFSZ + 20], %g1
2061: ldd [%sp + CCFSZ + 24], %g2
2062: ldd [%sp + CCFSZ + 32], %g4
2063: ldd [%sp + CCFSZ + 40], %g6
2064: ldd [%sp + CCFSZ + 48], %i0
2065: ldd [%sp + CCFSZ + 56], %i2
2066: ldd [%sp + CCFSZ + 64], %i4
2067: ldd [%sp + CCFSZ + 72], %i6
2068:
2069: b return_from_trap ! go return
2070: wr %l0, 0, %psr ! (but first disable traps again)
2071:
2072:
2073: /*
2074: * fp_exception has to check to see if we are trying to save
2075: * the FP state, and if so, continue to save the FP state.
2076: *
2077: * We do not even bother checking to see if we were in kernel mode,
2078: * since users have no access to the special_fp_store instruction.
2079: *
2080: * This whole idea was stolen from Sprite.
2081: */
2082: fp_exception:
2083: set special_fp_store, %l4 ! see if we came from the special one
2084: cmp %l1, %l4 ! pc == special_fp_store?
2085: bne slowtrap ! no, go handle per usual
2086: EMPTY
2087: sethi %hi(savefpcont), %l4 ! yes, "return" to the special code
2088: or %lo(savefpcont), %l4, %l4
2089: jmp %l4
2090: rett %l4 + 4
2091:
2092: /*
2093: * slowtrap() builds a trap frame and calls trap().
2094: * This is called `slowtrap' because it *is*....
2095: * We have to build a full frame for ptrace(), for instance.
2096: *
2097: * Registers:
2098: * %l0 = %psr
2099: * %l1 = return pc
2100: * %l2 = return npc
2101: * %l3 = trap code
2102: */
2103: slowtrap:
2104: TRAP_SETUP(-CCFSZ-80)
2105: /*
2106: * Phew, ready to enable traps and call C code.
2107: */
2108: mov %l3, %o0 ! put type in %o0 for later
2109: Lslowtrap_reenter:
2110: wr %l0, PSR_ET, %psr ! traps on again
2111: std %l0, [%sp + CCFSZ] ! tf.tf_psr = psr; tf.tf_pc = ret_pc;
2112: rd %y, %l3
2113: std %l2, [%sp + CCFSZ + 8] ! tf.tf_npc = return_npc; tf.tf_y = %y;
2114: st %g1, [%sp + CCFSZ + 20]
2115: std %g2, [%sp + CCFSZ + 24]
2116: std %g4, [%sp + CCFSZ + 32]
2117: std %g6, [%sp + CCFSZ + 40]
2118: std %i0, [%sp + CCFSZ + 48]
2119: mov %l0, %o1 ! (psr)
2120: std %i2, [%sp + CCFSZ + 56]
2121: mov %l1, %o2 ! (pc)
2122: std %i4, [%sp + CCFSZ + 64]
2123: add %sp, CCFSZ, %o3 ! (&tf)
1.111 pk 2124: call _C_LABEL(trap) ! trap(type, psr, pc, &tf)
1.1 deraadt 2125: std %i6, [%sp + CCFSZ + 72]
2126:
2127: ldd [%sp + CCFSZ], %l0 ! load new values
2128: ldd [%sp + CCFSZ + 8], %l2
2129: wr %l3, 0, %y
2130: ld [%sp + CCFSZ + 20], %g1
2131: ldd [%sp + CCFSZ + 24], %g2
2132: ldd [%sp + CCFSZ + 32], %g4
2133: ldd [%sp + CCFSZ + 40], %g6
2134: ldd [%sp + CCFSZ + 48], %i0
2135: ldd [%sp + CCFSZ + 56], %i2
2136: ldd [%sp + CCFSZ + 64], %i4
2137: ldd [%sp + CCFSZ + 72], %i6
2138: b return_from_trap
2139: wr %l0, 0, %psr
2140:
2141: /*
2142: * Do a `software' trap by re-entering the trap code, possibly first
2143: * switching from interrupt stack to kernel stack. This is used for
2144: * scheduling and signal ASTs (which generally occur from softclock or
2145: * tty or net interrupts) and register window saves (which might occur
2146: * from anywhere).
2147: *
2148: * The current window is the trap window, and it is by definition clean.
2149: * We enter with the trap type in %o0. All we have to do is jump to
2150: * Lslowtrap_reenter above, but maybe after switching stacks....
2151: */
2152: softtrap:
1.142 mrg 2153: #if defined(MULTIPROCESSOR)
1.97 pk 2154: /*
2155: * The interrupt stack is not at a fixed location
2156: * and %sp must be checked against both ends.
2157: */
1.173 pk 2158: sethi %hi(_EINTSTACKP), %l6
2159: ld [%l6 + %lo(_EINTSTACKP)], %l7
1.97 pk 2160: cmp %sp, %l7
2161: bge Lslowtrap_reenter
2162: EMPTY
2163: set INT_STACK_SIZE, %l6
2164: sub %l7, %l6, %l7
2165: cmp %sp, %l7
2166: blu Lslowtrap_reenter
2167: EMPTY
2168: #else
1.111 pk 2169: sethi %hi(_C_LABEL(eintstack)), %l7
1.1 deraadt 2170: cmp %sp, %l7
2171: bge Lslowtrap_reenter
2172: EMPTY
1.97 pk 2173: #endif
1.111 pk 2174: sethi %hi(cpcb), %l6
2175: ld [%l6 + %lo(cpcb)], %l6
1.13 deraadt 2176: set USPACE-CCFSZ-80, %l5
1.1 deraadt 2177: add %l6, %l5, %l7
2178: SET_SP_REDZONE(%l6, %l5)
2179: b Lslowtrap_reenter
2180: mov %l7, %sp
2181:
2182: #ifdef KGDB
2183: /*
2184: * bpt is entered on all breakpoint traps.
2185: * If this is a kernel breakpoint, we do not want to call trap().
2186: * Among other reasons, this way we can set breakpoints in trap().
2187: */
2188: bpt:
2189: btst PSR_PS, %l0 ! breakpoint from kernel?
2190: bz slowtrap ! no, go do regular trap
2191: nop
2192:
1.137 mrg 2193: /* XXXSMP */
1.1 deraadt 2194: /*
2195: * Build a trap frame for kgdb_trap_glue to copy.
2196: * Enable traps but set ipl high so that we will not
2197: * see interrupts from within breakpoints.
2198: */
2199: TRAP_SETUP(-CCFSZ-80)
2200: or %l0, PSR_PIL, %l4 ! splhigh()
2201: wr %l4, 0, %psr ! the manual claims that this
2202: wr %l4, PSR_ET, %psr ! song and dance is necessary
2203: std %l0, [%sp + CCFSZ + 0] ! tf.tf_psr, tf.tf_pc
2204: mov %l3, %o0 ! trap type arg for kgdb_trap_glue
2205: rd %y, %l3
2206: std %l2, [%sp + CCFSZ + 8] ! tf.tf_npc, tf.tf_y
2207: rd %wim, %l3
2208: st %l3, [%sp + CCFSZ + 16] ! tf.tf_wim (a kgdb-only r/o field)
2209: st %g1, [%sp + CCFSZ + 20] ! tf.tf_global[1]
2210: std %g2, [%sp + CCFSZ + 24] ! etc
2211: std %g4, [%sp + CCFSZ + 32]
2212: std %g6, [%sp + CCFSZ + 40]
2213: std %i0, [%sp + CCFSZ + 48] ! tf.tf_in[0..1]
2214: std %i2, [%sp + CCFSZ + 56] ! etc
2215: std %i4, [%sp + CCFSZ + 64]
2216: std %i6, [%sp + CCFSZ + 72]
2217:
2218: /*
2219: * Now call kgdb_trap_glue(); if it returns, call trap().
2220: */
2221: mov %o0, %l3 ! gotta save trap type
1.111 pk 2222: call _C_LABEL(kgdb_trap_glue)! kgdb_trap_glue(type, &trapframe)
1.1 deraadt 2223: add %sp, CCFSZ, %o1 ! (&trapframe)
2224:
2225: /*
2226: * Use slowtrap to call trap---but first erase our tracks
2227: * (put the registers back the way they were).
2228: */
2229: mov %l3, %o0 ! slowtrap will need trap type
2230: ld [%sp + CCFSZ + 12], %l3
2231: wr %l3, 0, %y
2232: ld [%sp + CCFSZ + 20], %g1
2233: ldd [%sp + CCFSZ + 24], %g2
2234: ldd [%sp + CCFSZ + 32], %g4
2235: b Lslowtrap_reenter
2236: ldd [%sp + CCFSZ + 40], %g6
2237:
2238: /*
2239: * Enter kernel breakpoint. Write all the windows (not including the
2240: * current window) into the stack, so that backtrace works. Copy the
2241: * supplied trap frame to the kgdb stack and switch stacks.
2242: *
2243: * kgdb_trap_glue(type, tf0)
2244: * int type;
2245: * struct trapframe *tf0;
2246: */
1.111 pk 2247: _ENTRY(_C_LABEL(kgdb_trap_glue))
1.1 deraadt 2248: save %sp, -CCFSZ, %sp
2249:
1.111 pk 2250: call _C_LABEL(write_all_windows)
1.1 deraadt 2251: mov %sp, %l4 ! %l4 = current %sp
2252:
2253: /* copy trapframe to top of kgdb stack */
1.127 pk 2254: set _C_LABEL(kgdb_stack) + KGDB_STACK_SIZE - 80, %l0
1.1 deraadt 2255: ! %l0 = tfcopy -> end_of_kgdb_stack
2256: mov 80, %l1
2257: 1: ldd [%i1], %l2
2258: inc 8, %i1
2259: deccc 8, %l1
2260: std %l2, [%l0]
2261: bg 1b
2262: inc 8, %l0
2263:
2264: #ifdef DEBUG
2265: /* save old red zone and then turn it off */
2266: sethi %hi(_redzone), %l7
2267: ld [%l7 + %lo(_redzone)], %l6
2268: st %g0, [%l7 + %lo(_redzone)]
2269: #endif
2270: /* switch to kgdb stack */
2271: add %l0, -CCFSZ-80, %sp
2272:
2273: /* if (kgdb_trap(type, tfcopy)) kgdb_rett(tfcopy); */
2274: mov %i0, %o0
1.111 pk 2275: call _C_LABEL(kgdb_trap)
1.1 deraadt 2276: add %l0, -80, %o1
2277: tst %o0
2278: bnz,a kgdb_rett
2279: add %l0, -80, %g1
2280:
2281: /*
2282: * kgdb_trap() did not handle the trap at all so the stack is
2283: * still intact. A simple `restore' will put everything back,
2284: * after we reset the stack pointer.
2285: */
2286: mov %l4, %sp
2287: #ifdef DEBUG
2288: st %l6, [%l7 + %lo(_redzone)] ! restore red zone
2289: #endif
2290: ret
2291: restore
2292:
2293: /*
2294: * Return from kgdb trap. This is sort of special.
2295: *
2296: * We know that kgdb_trap_glue wrote the window above it, so that we will
2297: * be able to (and are sure to have to) load it up. We also know that we
2298: * came from kernel land and can assume that the %fp (%i6) we load here
2299: * is proper. We must also be sure not to lower ipl (it is at splhigh())
2300: * until we have traps disabled, due to the SPARC taking traps at the
2301: * new ipl before noticing that PSR_ET has been turned off. We are on
2302: * the kgdb stack, so this could be disastrous.
2303: *
2304: * Note that the trapframe argument in %g1 points into the current stack
2305: * frame (current window). We abandon this window when we move %g1->tf_psr
2306: * into %psr, but we will not have loaded the new %sp yet, so again traps
2307: * must be disabled.
2308: */
2309: kgdb_rett:
2310: rd %psr, %g4 ! turn off traps
2311: wr %g4, PSR_ET, %psr
2312: /* use the three-instruction delay to do something useful */
2313: ld [%g1], %g2 ! pick up new %psr
2314: ld [%g1 + 12], %g3 ! set %y
2315: wr %g3, 0, %y
2316: #ifdef DEBUG
2317: st %l6, [%l7 + %lo(_redzone)] ! and restore red zone
2318: #endif
2319: wr %g0, 0, %wim ! enable window changes
2320: nop; nop; nop
2321: /* now safe to set the new psr (changes CWP, leaves traps disabled) */
2322: wr %g2, 0, %psr ! set rett psr (including cond codes)
2323: /* 3 instruction delay before we can use the new window */
2324: /*1*/ ldd [%g1 + 24], %g2 ! set new %g2, %g3
2325: /*2*/ ldd [%g1 + 32], %g4 ! set new %g4, %g5
2326: /*3*/ ldd [%g1 + 40], %g6 ! set new %g6, %g7
2327:
2328: /* now we can use the new window */
2329: mov %g1, %l4
2330: ld [%l4 + 4], %l1 ! get new pc
2331: ld [%l4 + 8], %l2 ! get new npc
2332: ld [%l4 + 20], %g1 ! set new %g1
2333:
2334: /* set up returnee's out registers, including its %sp */
2335: ldd [%l4 + 48], %i0
2336: ldd [%l4 + 56], %i2
2337: ldd [%l4 + 64], %i4
2338: ldd [%l4 + 72], %i6
2339:
2340: /* load returnee's window, making the window above it be invalid */
2341: restore
2342: restore %g0, 1, %l1 ! move to inval window and set %l1 = 1
2343: rd %psr, %l0
2344: sll %l1, %l0, %l1
2345: wr %l1, 0, %wim ! %wim = 1 << (%psr & 31)
1.111 pk 2346: sethi %hi(cpcb), %l1
2347: ld [%l1 + %lo(cpcb)], %l1
1.1 deraadt 2348: and %l0, 31, %l0 ! CWP = %psr & 31;
2349: st %l0, [%l1 + PCB_WIM] ! cpcb->pcb_wim = CWP;
2350: save %g0, %g0, %g0 ! back to window to reload
2351: LOADWIN(%sp)
2352: save %g0, %g0, %g0 ! back to trap window
2353: /* note, we have not altered condition codes; safe to just rett */
2354: RETT
2355: #endif
2356:
2357: /*
2358: * syscall() builds a trap frame and calls syscall().
2359: * sun_syscall is same but delivers sun system call number
2360: * XXX should not have to save&reload ALL the registers just for
2361: * ptrace...
2362: */
1.122 christos 2363: _C_LABEL(_syscall):
1.1 deraadt 2364: TRAP_SETUP(-CCFSZ-80)
1.173 pk 2365: #ifdef DEBUG
2366: or %g1, 0x1000, %l6 ! mark syscall
2367: TRAP_TRACE(%l6,%l5)
2368: #endif
1.1 deraadt 2369: wr %l0, PSR_ET, %psr
2370: std %l0, [%sp + CCFSZ + 0] ! tf_psr, tf_pc
2371: rd %y, %l3
2372: std %l2, [%sp + CCFSZ + 8] ! tf_npc, tf_y
2373: st %g1, [%sp + CCFSZ + 20] ! tf_g[1]
2374: std %g2, [%sp + CCFSZ + 24] ! tf_g[2], tf_g[3]
2375: std %g4, [%sp + CCFSZ + 32] ! etc
2376: std %g6, [%sp + CCFSZ + 40]
2377: mov %g1, %o0 ! (code)
2378: std %i0, [%sp + CCFSZ + 48]
2379: add %sp, CCFSZ, %o1 ! (&tf)
2380: std %i2, [%sp + CCFSZ + 56]
2381: mov %l1, %o2 ! (pc)
2382: std %i4, [%sp + CCFSZ + 64]
1.111 pk 2383: call _C_LABEL(syscall) ! syscall(code, &tf, pc, suncompat)
1.1 deraadt 2384: std %i6, [%sp + CCFSZ + 72]
2385: ! now load em all up again, sigh
2386: ldd [%sp + CCFSZ + 0], %l0 ! new %psr, new pc
2387: ldd [%sp + CCFSZ + 8], %l2 ! new npc, new %y
2388: wr %l3, 0, %y
1.51 pk 2389: /* see `proc_trampoline' for the reason for this label */
2390: return_from_syscall:
1.1 deraadt 2391: ld [%sp + CCFSZ + 20], %g1
2392: ldd [%sp + CCFSZ + 24], %g2
2393: ldd [%sp + CCFSZ + 32], %g4
2394: ldd [%sp + CCFSZ + 40], %g6
2395: ldd [%sp + CCFSZ + 48], %i0
2396: ldd [%sp + CCFSZ + 56], %i2
2397: ldd [%sp + CCFSZ + 64], %i4
2398: ldd [%sp + CCFSZ + 72], %i6
2399: b return_from_trap
2400: wr %l0, 0, %psr
2401:
2402: /*
2403: * Interrupts. Software interrupts must be cleared from the software
2404: * interrupt enable register. Rather than calling ienab_bic for each,
2405: * we do them in-line before enabling traps.
2406: *
2407: * After preliminary setup work, the interrupt is passed to each
2408: * registered handler in turn. These are expected to return nonzero if
2409: * they took care of the interrupt. If a handler claims the interrupt,
2410: * we exit (hardware interrupts are latched in the requestor so we'll
2411: * just take another interrupt in the unlikely event of simultaneous
2412: * interrupts from two different devices at the same level). If we go
2413: * through all the registered handlers and no one claims it, we report a
2414: * stray interrupt. This is more or less done as:
2415: *
2416: * for (ih = intrhand[intlev]; ih; ih = ih->ih_next)
2417: * if ((*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : &frame))
2418: * return;
2419: * strayintr(&frame);
2420: *
2421: * Software interrupts are almost the same with three exceptions:
2422: * (1) we clear the interrupt from the software interrupt enable
2423: * register before calling any handler (we have to clear it first
2424: * to avoid an interrupt-losing race),
2425: * (2) we always call all the registered handlers (there is no way
2426: * to tell if the single bit in the software interrupt register
2427: * represents one or many requests)
2428: * (3) we never announce a stray interrupt (because of (1), another
2429: * interrupt request can come in while we're in the handler. If
1.52 pk 2430: * the handler deals with everything for both the original & the
1.1 deraadt 2431: * new request, we'll erroneously report a stray interrupt when
2432: * we take the software interrupt for the new request.
2433: *
2434: * Inputs:
2435: * %l0 = %psr
2436: * %l1 = return pc
2437: * %l2 = return npc
2438: * %l3 = interrupt level
2439: * (software interrupt only) %l4 = bits to clear in interrupt register
2440: *
2441: * Internal:
2442: * %l4, %l5: local variables
2443: * %l6 = %y
2444: * %l7 = %g1
2445: * %g2..%g7 go to stack
2446: *
2447: * An interrupt frame is built in the space for a full trapframe;
2448: * this contains the psr, pc, npc, and interrupt level.
2449: */
1.52 pk 2450: softintr_sun44c:
1.62 pk 2451: sethi %hi(INTRREG_VA), %l6
2452: ldub [%l6 + %lo(INTRREG_VA)], %l5
1.1 deraadt 2453: andn %l5, %l4, %l5
1.62 pk 2454: stb %l5, [%l6 + %lo(INTRREG_VA)]
1.52 pk 2455:
2456: softintr_common:
1.1 deraadt 2457: INTR_SETUP(-CCFSZ-80)
2458: std %g2, [%sp + CCFSZ + 24] ! save registers
1.181 uwe 2459: INCR(_C_LABEL(uvmexp)+V_SOFT) ! cnt.v_intr++; (clobbers %o0,%o1)
1.1 deraadt 2460: mov %g1, %l7
2461: rd %y, %l6
2462: std %g4, [%sp + CCFSZ + 32]
2463: andn %l0, PSR_PIL, %l4 ! %l4 = psr & ~PSR_PIL |
2464: sll %l3, 8, %l5 ! intlev << IPLSHIFT
2465: std %g6, [%sp + CCFSZ + 40]
2466: or %l5, %l4, %l4 ! ;
2467: wr %l4, 0, %psr ! the manual claims this
2468: wr %l4, PSR_ET, %psr ! song and dance is necessary
2469: std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe
2470: sll %l3, 2, %l5
1.111 pk 2471: set _C_LABEL(intrcnt), %l4 ! intrcnt[intlev]++;
1.1 deraadt 2472: ld [%l4 + %l5], %o0
2473: std %l2, [%sp + CCFSZ + 8]
2474: inc %o0
2475: st %o0, [%l4 + %l5]
1.169 pk 2476: set _C_LABEL(sintrhand), %l4! %l4 = sintrhand[intlev];
1.1 deraadt 2477: ld [%l4 + %l5], %l4
1.175 pk 2478:
2479: #if defined(MULTIPROCESSOR)
1.177 pk 2480: /* Grab the kernel lock for interrupt levels <= IPL_CLOCK */
2481: cmp %l3, IPL_CLOCK
1.183 pk 2482: bgeu 3f
1.177 pk 2483: st %fp, [%sp + CCFSZ + 16]
1.175 pk 2484: call _C_LABEL(intr_lock_kernel)
2485: nop
2486: #endif
2487:
1.1 deraadt 2488: b 3f
2489: st %fp, [%sp + CCFSZ + 16]
2490:
1.166 pk 2491: 1: ld [%l4 + 12], %o2 ! ih->ih_classipl
2492: rd %psr, %o3 ! (bits already shifted to PIL field)
2493: andn %o3, PSR_PIL, %o3 ! %o3 = psr & ~PSR_PIL
2494: wr %o3, %o2, %psr ! splraise(ih->ih_classipl)
2495: ld [%l4], %o1
1.1 deraadt 2496: ld [%l4 + 4], %o0
1.166 pk 2497: nop ! one more isns before touching ICC
1.1 deraadt 2498: tst %o0
2499: bz,a 2f
2500: add %sp, CCFSZ, %o0
2501: 2: jmpl %o1, %o7 ! (void)(*ih->ih_fun)(...)
2502: ld [%l4 + 8], %l4 ! and ih = ih->ih_next
2503: 3: tst %l4 ! while ih != NULL
2504: bnz 1b
2505: nop
1.175 pk 2506:
2507: #if defined(MULTIPROCESSOR)
1.177 pk 2508: cmp %l3, IPL_CLOCK
1.183 pk 2509: bgeu 0f
1.175 pk 2510: nop
2511: call _C_LABEL(intr_unlock_kernel)
2512: nop
2513: 0:
2514: #endif
2515:
1.1 deraadt 2516: mov %l7, %g1
2517: wr %l6, 0, %y
2518: ldd [%sp + CCFSZ + 24], %g2
2519: ldd [%sp + CCFSZ + 32], %g4
2520: ldd [%sp + CCFSZ + 40], %g6
2521: b return_from_trap
2522: wr %l0, 0, %psr
2523:
2524: /*
1.52 pk 2525: * _sparc_interrupt{44c,4m} is exported for paranoia checking
2526: * (see intr.c).
1.1 deraadt 2527: */
1.52 pk 2528: #if defined(SUN4M)
1.111 pk 2529: _ENTRY(_C_LABEL(sparc_interrupt4m))
1.149 uwe 2530: #if !defined(MSIIEP) /* "normal" sun4m */
1.96 pk 2531: sethi %hi(CPUINFO_VA+CPUINFO_INTREG), %l6
2532: ld [%l6 + %lo(CPUINFO_VA+CPUINFO_INTREG)], %l6
1.160 uwe 2533: mov 1, %l4
1.96 pk 2534: ld [%l6 + ICR_PI_PEND_OFFSET], %l5 ! get pending interrupts
1.160 uwe 2535: sll %l4, %l3, %l4 ! hw intr bits are in the lower halfword
2536:
2537: btst %l4, %l5 ! has pending hw intr at this level?
2538: bnz sparc_interrupt_common
1.52 pk 2539: nop
2540:
1.160 uwe 2541: ! both softint pending and clear bits are in upper halfwords of
2542: ! their respective registers so shift the test bit in %l4 up there
2543: sll %l4, 16, %l4
1.199 pk 2544:
2545: #if defined(MULTIPROCESSOR)
2546: cmp %l3, 14
2547: be lev14_softint
2548: #endif
2549: st %l4, [%l6 + ICR_PI_CLR_OFFSET]
2550:
1.161 uwe 2551: #ifdef DIAGNOSTIC
1.160 uwe 2552: btst %l4, %l5 ! make sure softint pending bit is set
2553: bnz softintr_common
1.199 pk 2554: !st %l4, [%l6 + ICR_PI_CLR_OFFSET]
1.160 uwe 2555: /* FALLTHROUGH to sparc_interrupt4m_bogus */
2556: #else
2557: b softintr_common
1.199 pk 2558: !st %l4, [%l6 + ICR_PI_CLR_OFFSET]
1.160 uwe 2559: #endif
1.199 pk 2560: nop
1.160 uwe 2561:
1.149 uwe 2562: #else /* MSIIEP */
2563: sethi %hi(MSIIEP_PCIC_VA), %l6
2564: mov 1, %l4
2565: ld [%l6 + PCIC_PROC_IPR_REG], %l5 ! get pending interrupts
1.160 uwe 2566: sll %l4, %l3, %l4 ! hw intr bits are in the lower halfword
2567:
2568: btst %l4, %l5 ! has pending hw intr at this level?
1.149 uwe 2569: bnz sparc_interrupt_common
2570: nop
2571:
1.160 uwe 2572: #ifdef DIAGNOSTIC
2573: ! softint pending bits are in the upper halfword, but softint
2574: ! clear bits are in the lower halfword so we want the bit in %l4
2575: ! kept in the lower half and instead shift pending bits right
2576: srl %l5, 16, %l7
2577: btst %l4, %l7 ! make sure softint pending bit is set
2578: bnz softintr_common
2579: sth %l4, [%l6 + PCIC_SOFT_INTR_CLEAR_REG]
2580: /* FALLTHROUGH to sparc_interrupt4m_bogus */
2581: #else
1.149 uwe 2582: b softintr_common
2583: sth %l4, [%l6 + PCIC_SOFT_INTR_CLEAR_REG]
1.160 uwe 2584: #endif
2585:
1.149 uwe 2586: #endif /* MSIIEP */
1.160 uwe 2587:
2588: #ifdef DIAGNOSTIC
2589: /*
2590: * sparc_interrupt4m detected that neither hardware nor software
2591: * interrupt pending bit is set for this interrupt. Report this
2592: * situation, this is most probably a symptom of a driver bug.
2593: */
2594: sparc_interrupt4m_bogus:
2595: INTR_SETUP(-CCFSZ-80)
2596: std %g2, [%sp + CCFSZ + 24] ! save registers
2597: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
2598: mov %g1, %l7
2599: rd %y, %l6
2600: std %g4, [%sp + CCFSZ + 32]
2601: andn %l0, PSR_PIL, %l4 ! %l4 = psr & ~PSR_PIL |
2602: sll %l3, 8, %l5 ! intlev << IPLSHIFT
2603: std %g6, [%sp + CCFSZ + 40]
2604: or %l5, %l4, %l4 ! ;
2605: wr %l4, 0, %psr ! the manual claims this
2606: wr %l4, PSR_ET, %psr ! song and dance is necessary
2607: std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe
2608: sll %l3, 2, %l5
2609: set _C_LABEL(intrcnt), %l4 ! intrcnt[intlev]++;
2610: ld [%l4 + %l5], %o0
2611: std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe
2612: inc %o0
2613: st %o0, [%l4 + %l5]
2614:
2615: st %fp, [%sp + CCFSZ + 16]
2616:
2617: /* Unhandled interrupts while cold cause IPL to be raised to `high' */
2618: sethi %hi(_C_LABEL(cold)), %o0
2619: ld [%o0 + %lo(_C_LABEL(cold))], %o0
2620: tst %o0 ! if (cold) {
2621: bnz,a 1f ! splhigh();
2622: or %l0, 0xf00, %l0 ! } else
2623:
2624: call _C_LABEL(bogusintr) ! strayintr(&intrframe)
2625: add %sp, CCFSZ, %o0
2626: /* all done: restore registers and go return */
2627: 1:
2628: mov %l7, %g1
2629: wr %l6, 0, %y
2630: ldd [%sp + CCFSZ + 24], %g2
2631: ldd [%sp + CCFSZ + 32], %g4
2632: ldd [%sp + CCFSZ + 40], %g6
2633: b return_from_trap
2634: wr %l0, 0, %psr
2635: #endif /* DIAGNOSTIC */
1.149 uwe 2636: #endif /* SUN4M */
1.52 pk 2637:
1.111 pk 2638: _ENTRY(_C_LABEL(sparc_interrupt44c))
2639: sparc_interrupt_common:
1.1 deraadt 2640: INTR_SETUP(-CCFSZ-80)
2641: std %g2, [%sp + CCFSZ + 24] ! save registers
1.111 pk 2642: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
1.1 deraadt 2643: mov %g1, %l7
2644: rd %y, %l6
2645: std %g4, [%sp + CCFSZ + 32]
2646: andn %l0, PSR_PIL, %l4 ! %l4 = psr & ~PSR_PIL |
2647: sll %l3, 8, %l5 ! intlev << IPLSHIFT
2648: std %g6, [%sp + CCFSZ + 40]
2649: or %l5, %l4, %l4 ! ;
2650: wr %l4, 0, %psr ! the manual claims this
2651: wr %l4, PSR_ET, %psr ! song and dance is necessary
2652: std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe
2653: sll %l3, 2, %l5
1.111 pk 2654: set _C_LABEL(intrcnt), %l4 ! intrcnt[intlev]++;
1.1 deraadt 2655: ld [%l4 + %l5], %o0
2656: std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe
2657: inc %o0
2658: st %o0, [%l4 + %l5]
1.111 pk 2659: set _C_LABEL(intrhand), %l4 ! %l4 = intrhand[intlev];
1.1 deraadt 2660: ld [%l4 + %l5], %l4
1.137 mrg 2661:
1.175 pk 2662: #if defined(MULTIPROCESSOR)
1.177 pk 2663: /* Grab the kernel lock for interrupt levels <= IPL_CLOCK */
2664: cmp %l3, IPL_CLOCK
1.183 pk 2665: bgeu 3f
1.177 pk 2666: st %fp, [%sp + CCFSZ + 16]
1.137 mrg 2667: call _C_LABEL(intr_lock_kernel)
2668: nop
2669: #endif
1.1 deraadt 2670: b 3f
2671: st %fp, [%sp + CCFSZ + 16]
2672:
1.166 pk 2673: 1: ld [%l4 + 12], %o2 ! ih->ih_classipl
2674: rd %psr, %o3 ! (bits already shifted to PIL field)
2675: andn %o3, PSR_PIL, %o3 ! %o3 = psr & ~PSR_PIL
2676: wr %o3, %o2, %psr ! splraise(ih->ih_classipl)
2677: ld [%l4], %o1
1.1 deraadt 2678: ld [%l4 + 4], %o0
1.166 pk 2679: nop ! one more isns before touching ICC
1.1 deraadt 2680: tst %o0
2681: bz,a 2f
2682: add %sp, CCFSZ, %o0
2683: 2: jmpl %o1, %o7 ! handled = (*ih->ih_fun)(...)
2684: ld [%l4 + 8], %l4 ! and ih = ih->ih_next
2685: tst %o0
2686: bnz 4f ! if (handled) break
2687: nop
2688: 3: tst %l4
2689: bnz 1b ! while (ih)
2690: nop
1.76 pk 2691:
2692: /* Unhandled interrupts while cold cause IPL to be raised to `high' */
1.111 pk 2693: sethi %hi(_C_LABEL(cold)), %o0
2694: ld [%o0 + %lo(_C_LABEL(cold))], %o0
1.76 pk 2695: tst %o0 ! if (cold) {
2696: bnz,a 4f ! splhigh();
2697: or %l0, 0xf00, %l0 ! } else
2698:
1.111 pk 2699: call _C_LABEL(strayintr) ! strayintr(&intrframe)
1.1 deraadt 2700: add %sp, CCFSZ, %o0
2701: /* all done: restore registers and go return */
1.137 mrg 2702: 4:
1.175 pk 2703: #if defined(MULTIPROCESSOR)
1.177 pk 2704: cmp %l3, IPL_CLOCK
1.183 pk 2705: bgeu 0f
1.170 pk 2706: nop
1.137 mrg 2707: call _C_LABEL(intr_unlock_kernel)
2708: nop
1.170 pk 2709: 0:
1.137 mrg 2710: #endif
2711: mov %l7, %g1
1.1 deraadt 2712: wr %l6, 0, %y
2713: ldd [%sp + CCFSZ + 24], %g2
2714: ldd [%sp + CCFSZ + 32], %g4
2715: ldd [%sp + CCFSZ + 40], %g6
2716: b return_from_trap
2717: wr %l0, 0, %psr
2718:
1.199 pk 2719: #if defined(MULTIPROCESSOR)
2720: /*
2721: * Level 14 software interrupt: fast IPI
2722: */
2723: lev14_softint:
2724: sll %l3, 2, %l5
2725: set _C_LABEL(intrcnt), %l4 ! intrcnt[intlev]++;
2726: ld [%l4 + %l5], %l7
2727: inc %l7
2728: st %l7, [%l4 + %l5]
2729:
2730: sethi %hi(CPUINFO_VA), %l6
2731: ld [%l6 + CPUINFO_XMSG_TRAP], %l7
2732: #ifdef DIAGNOSTIC
2733: tst %l7
2734: bz sparc_interrupt4m_bogus
2735: nop
2736: #endif
2737: jmp %l7
2738: ld [%l6 + CPUINFO_XMSG_ARG0], %l3 ! prefetch 1st arg
2739:
2740: /*
2741: * Fast flush handlers. xcalled from other CPUs throught soft interrupt 14
2742: * On entry: %l6 = CPUINFO_VA
2743: * %l3 = first argument
2744: *
2745: * As always, these fast trap handlers should preserve all registers
2746: * except %l3 to %l7
2747: */
2748: _ENTRY(_C_LABEL(ft_tlb_flush))
2749: ! <%l3 already fetched for us> ! va
2750: ld [%l6 + CPUINFO_XMSG_ARG2], %l5 ! level
2751: andn %l3, 0xfff, %l3 ! %l3 = (va&~0xfff | lvl);
2752: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! context
2753: or %l3, %l5, %l3
2754:
2755: mov SRMMU_CXR, %l7 !
2756: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2757: sta %l4, [%l7]ASI_SRMMU ! set new context
2758:
2759: sta %g0, [%l3]ASI_SRMMUFP ! flush TLB
2760:
2761: ft_rett:
2762: ! common return from Fast Flush handlers
2763: ! enter here with %l5 = ctx to restore, %l6 = CPUINFO_VA, %l7 = ctx reg
2764: mov 1, %l4 !
2765: sta %l5, [%l7]ASI_SRMMU ! restore context
2766: st %l4, [%l6 + CPUINFO_XMSG_CMPLT] ! completed = 1
2767:
2768: mov %l0, %psr ! return from trap
2769: nop
2770: RETT
2771:
2772: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_page))
2773: ! <%l3 already fetched for us> ! va
2774: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! context
2775:
2776: mov SRMMU_CXR, %l7 !
2777: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2778: sta %l4, [%l7]ASI_SRMMU ! set new context
2779:
2780: set 4096, %l4 ! N = page size
2781: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2782: 1:
2783: sta %g0, [%l3]ASI_IDCACHELFP ! flush cache line
2784: subcc %l4, %l7, %l4 ! p += linesz;
2785: bpos 1b ! while ((N -= linesz) > 0)
2786: add %l3, %l7, %l3
2787:
2788: ld [%l6 + CPUINFO_XMSG_ARG0], %l3 ! reload va
2789: !or %l3, ASI_SRMMUFP_L3(=0), %l3 ! va |= ASI_SRMMUFP_L3
2790: sta %g0, [%l3]ASI_SRMMUFP ! flush TLB
2791:
2792: b ft_rett
2793: mov SRMMU_CXR, %l7 ! reload ctx register
2794:
2795: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_segment))
2796: ! <%l3 already fetched for us> ! vr
2797: ld [%l6 + CPUINFO_XMSG_ARG1], %l5 ! vs
2798: ld [%l6 + CPUINFO_XMSG_ARG2], %l4 ! context
2799:
2800: sll %l3, 24, %l3 ! va = VSTOVA(vr,vs)
2801: sll %l5, 18, %l5
2802: or %l3, %l5, %l3
2803:
2804: mov SRMMU_CXR, %l7 !
2805: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2806: sta %l4, [%l7]ASI_SRMMU ! set new context
2807:
2808: ld [%l6 + CPUINFO_CACHE_NLINES], %l4
2809: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2810: 1:
2811: sta %g0, [%l3]ASI_IDCACHELFS ! flush cache line
2812: deccc %l4 ! p += linesz;
2813: bpos 1b ! while (--nlines > 0)
2814: add %l3, %l7, %l3
2815:
2816: b ft_rett
2817: mov SRMMU_CXR, %l7 ! reload ctx register
2818:
2819: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_region))
2820: ! <%l3 already fetched for us> ! vr
2821: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! context
2822:
2823: sll %l3, 24, %l3 ! va = VRTOVA(vr)
2824:
2825: mov SRMMU_CXR, %l7 !
2826: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2827: sta %l4, [%l7]ASI_SRMMU ! set new context
2828:
2829: ld [%l6 + CPUINFO_CACHE_NLINES], %l4
2830: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2831: 1:
2832: sta %g0, [%l3]ASI_IDCACHELFR ! flush cache line
2833: deccc %l4 ! p += linesz;
2834: bpos 1b ! while (--nlines > 0)
2835: add %l3, %l7, %l3
2836:
2837: b ft_rett
2838: mov SRMMU_CXR, %l7 ! reload ctx register
2839:
2840: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_context))
2841: ! <%l3 already fetched for us> ! context
2842:
2843: mov SRMMU_CXR, %l7 !
2844: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2845: sta %l3, [%l7]ASI_SRMMU ! set new context
2846:
2847: ld [%l6 + CPUINFO_CACHE_NLINES], %l4
2848: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2849: mov %g0, %l3 ! va = 0
2850: 1:
2851: sta %g0, [%l3]ASI_IDCACHELFC ! flush cache line
2852: deccc %l4 ! p += linesz;
2853: bpos 1b ! while (--nlines > 0)
2854: add %l3, %l7, %l3
2855:
2856: b ft_rett
2857: mov SRMMU_CXR, %l7 ! reload ctx register
2858:
2859: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_range))
2860: ! <%l3 already fetched for us> ! va
2861: ld [%l6 + CPUINFO_XMSG_ARG2], %l4 ! context
2862:
2863: andn %l3, 7, %l3 ! double-word alignment
2864:
2865: mov SRMMU_CXR, %l7 !
2866: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2867: sta %l4, [%l7]ASI_SRMMU ! set new context
2868:
2869: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! size
2870: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2871: 1:
2872: sta %g0, [%l3]ASI_IDCACHELFP ! flush cache line
2873: subcc %l4, %l7, %l4 ! p += linesz;
2874: bpos 1b ! while ((sz -= linesz) > 0)
2875: add %l3, %l7, %l3
2876:
2877: /* Flush TLB on all pages we visited */
2878: ld [%l6 + CPUINFO_XMSG_ARG0], %l3 ! reload va
2879: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! reload sz
2880: add %l3, %l4, %l4 ! %l4 = round_page(va + sz)
2881: add %l4, 0xfff, %l4
2882: andn %l4, 0xfff, %l4
2883: andn %l3, 0xfff, %l3 ! va &= ~PGOFSET;
2884: sub %l4, %l3, %l4 ! and finally: size rounded
2885: ! to page boundary
2886: set 4096, %l7 ! N = page size
2887:
2888: 2:
2889: !or %l3, ASI_SRMMUFP_L3(=0), %l3 ! va |= ASI_SRMMUFP_L3
2890: sta %g0, [%l3]ASI_SRMMUFP ! flush TLB
2891: subcc %l4, %l7, %l4 ! while ((sz -= PGSIZE) > 0)
2892: bpos 2b
2893: add %l3, %l7, %l3
2894:
2895: b ft_rett
2896: mov SRMMU_CXR, %l7 ! reload ctx register
2897:
2898: #endif /* MULTIPROCESSOR */
2899:
1.1 deraadt 2900: #ifdef notyet
2901: /*
2902: * Level 12 (ZS serial) interrupt. Handle it quickly, schedule a
2903: * software interrupt, and get out. Do the software interrupt directly
2904: * if we would just take it on the way out.
2905: *
2906: * Input:
2907: * %l0 = %psr
2908: * %l1 = return pc
2909: * %l2 = return npc
2910: * Internal:
2911: * %l3 = zs device
2912: * %l4, %l5 = temporary
2913: * %l6 = rr3 (or temporary data) + 0x100 => need soft int
2914: * %l7 = zs soft status
2915: */
2916: zshard:
2917: #endif /* notyet */
2918:
2919: /*
2920: * Level 15 interrupt. An async memory error has occurred;
2921: * take care of it (typically by panicking, but hey...).
2922: * %l0 = %psr
2923: * %l1 = return pc
2924: * %l2 = return npc
2925: * %l3 = 15 * 4 (why? just because!)
2926: *
2927: * Internal:
2928: * %l4 = %y
2929: * %l5 = %g1
2930: * %l6 = %g6
2931: * %l7 = %g7
2932: * g2, g3, g4, g5 go to stack
2933: *
2934: * This code is almost the same as that in mem_access_fault,
2935: * except that we already know the problem is not a `normal' fault,
2936: * and that we must be extra-careful with interrupt enables.
2937: */
1.52 pk 2938:
2939: #if defined(SUN4)
2940: nmi_sun4:
1.1 deraadt 2941: INTR_SETUP(-CCFSZ-80)
1.111 pk 2942: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
1.1 deraadt 2943: /*
2944: * Level 15 interrupts are nonmaskable, so with traps off,
2945: * disable all interrupts to prevent recursion.
2946: */
1.62 pk 2947: sethi %hi(INTRREG_VA), %o0
2948: ldub [%o0 + %lo(INTRREG_VA)], %o1
1.157 uwe 2949: andn %o1, IE_ALLIE, %o1
1.62 pk 2950: stb %o1, [%o0 + %lo(INTRREG_VA)]
1.1 deraadt 2951: wr %l0, PSR_ET, %psr ! okay, turn traps on again
2952:
2953: std %g2, [%sp + CCFSZ + 0] ! save g2, g3
2954: rd %y, %l4 ! save y
2955:
1.19 deraadt 2956: std %g4, [%sp + CCFSZ + 8] ! save g4, g5
2957: mov %g1, %l5 ! save g1, g6, g7
2958: mov %g6, %l6
2959: mov %g7, %l7
2960: #if defined(SUN4C) || defined(SUN4M)
1.52 pk 2961: b,a nmi_common
1.19 deraadt 2962: #endif /* SUN4C || SUN4M */
1.52 pk 2963: #endif
2964:
2965: #if defined(SUN4C)
2966: nmi_sun4c:
2967: INTR_SETUP(-CCFSZ-80)
1.111 pk 2968: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
1.52 pk 2969: /*
2970: * Level 15 interrupts are nonmaskable, so with traps off,
2971: * disable all interrupts to prevent recursion.
2972: */
1.62 pk 2973: sethi %hi(INTRREG_VA), %o0
2974: ldub [%o0 + %lo(INTRREG_VA)], %o1
1.157 uwe 2975: andn %o1, IE_ALLIE, %o1
1.62 pk 2976: stb %o1, [%o0 + %lo(INTRREG_VA)]
1.52 pk 2977: wr %l0, PSR_ET, %psr ! okay, turn traps on again
2978:
2979: std %g2, [%sp + CCFSZ + 0] ! save g2, g3
2980: rd %y, %l4 ! save y
2981:
2982: ! must read the sync error register too.
1.1 deraadt 2983: set AC_SYNC_ERR, %o0
2984: lda [%o0] ASI_CONTROL, %o1 ! sync err reg
2985: inc 4, %o0
2986: lda [%o0] ASI_CONTROL, %o2 ! sync virt addr
2987: std %g4, [%sp + CCFSZ + 8] ! save g4,g5
2988: mov %g1, %l5 ! save g1,g6,g7
2989: mov %g6, %l6
2990: mov %g7, %l7
2991: inc 4, %o0
2992: lda [%o0] ASI_CONTROL, %o3 ! async err reg
2993: inc 4, %o0
2994: lda [%o0] ASI_CONTROL, %o4 ! async virt addr
1.52 pk 2995: #if defined(SUN4M)
2996: !!b,a nmi_common
2997: #endif /* SUN4M */
2998: #endif /* SUN4C */
2999:
3000: nmi_common:
1.1 deraadt 3001: ! and call C code
1.111 pk 3002: call _C_LABEL(memerr4_4c) ! memerr(0, ser, sva, aer, ava)
1.95 pk 3003: clr %o0
1.1 deraadt 3004:
3005: mov %l5, %g1 ! restore g1 through g7
3006: ldd [%sp + CCFSZ + 0], %g2
3007: ldd [%sp + CCFSZ + 8], %g4
3008: wr %l0, 0, %psr ! re-disable traps
3009: mov %l6, %g6
3010: mov %l7, %g7
3011:
3012: ! set IE_ALLIE again (safe, we disabled traps again above)
1.62 pk 3013: sethi %hi(INTRREG_VA), %o0
3014: ldub [%o0 + %lo(INTRREG_VA)], %o1
1.1 deraadt 3015: or %o1, IE_ALLIE, %o1
1.62 pk 3016: stb %o1, [%o0 + %lo(INTRREG_VA)]
1.1 deraadt 3017: b return_from_trap
3018: wr %l4, 0, %y ! restore y
3019:
1.52 pk 3020: #if defined(SUN4M)
3021: nmi_sun4m:
3022: INTR_SETUP(-CCFSZ-80)
1.111 pk 3023: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
1.94 pk 3024:
3025: /* Read the Pending Interrupts register */
1.96 pk 3026: sethi %hi(CPUINFO_VA+CPUINFO_INTREG), %l6
3027: ld [%l6 + %lo(CPUINFO_VA+CPUINFO_INTREG)], %l6
3028: ld [%l6 + ICR_PI_PEND_OFFSET], %l5 ! get pending interrupts
3029:
1.111 pk 3030: set _C_LABEL(nmi_soft), %o3 ! assume a softint
1.105 pk 3031: set PINTR_IC, %o1 ! hard lvl 15 bit
3032: sethi %hi(PINTR_SINTRLEV(15)), %o0 ! soft lvl 15 bit
1.94 pk 3033: btst %o0, %l5 ! soft level 15?
1.101 pk 3034: bnz,a 1f !
1.105 pk 3035: mov %o0, %o1 ! shift int clear bit to SOFTINT 15
3036:
1.154 thorpej 3037: set _C_LABEL(nmi_hard), %o3 /* it's a hardint; switch handler */
1.94 pk 3038:
1.52 pk 3039: /*
3040: * Level 15 interrupts are nonmaskable, so with traps off,
3041: * disable all interrupts to prevent recursion.
3042: */
3043: sethi %hi(ICR_SI_SET), %o0
1.101 pk 3044: set SINTR_MA, %o2
3045: st %o2, [%o0 + %lo(ICR_SI_SET)]
1.142 mrg 3046: #if defined(MULTIPROCESSOR) && defined(DDB)
3047: b 2f
3048: clr %o0
3049: #endif
1.52 pk 3050:
1.101 pk 3051: 1:
1.142 mrg 3052: #if defined(MULTIPROCESSOR) && defined(DDB)
3053: /*
3054: * Setup a trapframe for nmi_soft; this might be an IPI telling
3055: * us to pause, so lets save some state for DDB to get at.
3056: */
3057: std %l0, [%sp + CCFSZ] ! tf.tf_psr = psr; tf.tf_pc = ret_pc;
3058: rd %y, %l3
3059: std %l2, [%sp + CCFSZ + 8] ! tf.tf_npc = return_npc; tf.tf_y = %y;
3060: st %g1, [%sp + CCFSZ + 20]
3061: std %g2, [%sp + CCFSZ + 24]
3062: std %g4, [%sp + CCFSZ + 32]
3063: std %g6, [%sp + CCFSZ + 40]
3064: std %i0, [%sp + CCFSZ + 48]
3065: std %i2, [%sp + CCFSZ + 56]
3066: std %i4, [%sp + CCFSZ + 64]
3067: std %i6, [%sp + CCFSZ + 72]
3068: add %sp, CCFSZ, %o0
3069: 2:
3070: #else
3071: clr %o0
3072: #endif
1.105 pk 3073: /*
3074: * Now clear the NMI. Apparently, we must allow some time
3075: * to let the bits sink in..
3076: */
1.96 pk 3077: st %o1, [%l6 + ICR_PI_CLR_OFFSET]
1.105 pk 3078: nop; nop; nop;
3079: ld [%l6 + ICR_PI_PEND_OFFSET], %g0 ! drain register!?
1.172 pk 3080: nop;
1.52 pk 3081:
1.172 pk 3082: or %l0, PSR_PIL, %o4 ! splhigh()
3083: wr %o4, 0, %psr !
3084: wr %o4, PSR_ET, %psr ! turn traps on again
1.52 pk 3085:
1.142 mrg 3086: std %g2, [%sp + CCFSZ + 80] ! save g2, g3
1.52 pk 3087: rd %y, %l4 ! save y
1.142 mrg 3088: std %g4, [%sp + CCFSZ + 88] ! save g4,g5
1.52 pk 3089:
3090: /* Finish stackframe, call C trap handler */
3091: mov %g1, %l5 ! save g1,g6,g7
3092: mov %g6, %l6
3093:
1.142 mrg 3094: jmpl %o3, %o7 ! nmi_hard(0) or nmi_soft(&tf)
3095: mov %g7, %l7
1.105 pk 3096:
1.52 pk 3097: mov %l5, %g1 ! restore g1 through g7
1.142 mrg 3098: ldd [%sp + CCFSZ + 80], %g2
3099: ldd [%sp + CCFSZ + 88], %g4
1.52 pk 3100: wr %l0, 0, %psr ! re-disable traps
3101: mov %l6, %g6
3102: mov %l7, %g7
3103:
1.105 pk 3104: !cmp %o0, 0 ! was this a soft nmi
3105: !be 4f
1.154 thorpej 3106: /* XXX - we need to unblock `mask all ints' only on a hard nmi */
1.101 pk 3107:
1.52 pk 3108: ! enable interrupts again (safe, we disabled traps again above)
3109: sethi %hi(ICR_SI_CLR), %o0
3110: set SINTR_MA, %o1
3111: st %o1, [%o0 + %lo(ICR_SI_CLR)]
3112:
1.101 pk 3113: 4:
1.52 pk 3114: b return_from_trap
3115: wr %l4, 0, %y ! restore y
3116: #endif /* SUN4M */
3117:
3118: #ifdef GPROF
3119: .globl window_of, winof_user
3120: .globl window_uf, winuf_user, winuf_ok, winuf_invalid
3121: .globl return_from_trap, rft_kernel, rft_user, rft_invalid
3122: .globl softtrap, slowtrap
1.122 christos 3123: .globl clean_trap_window, _C_LABEL(_syscall)
1.52 pk 3124: #endif
1.1 deraadt 3125:
3126: /*
3127: * Window overflow trap handler.
3128: * %l0 = %psr
3129: * %l1 = return pc
3130: * %l2 = return npc
3131: */
3132: window_of:
3133: #ifdef TRIVIAL_WINDOW_OVERFLOW_HANDLER
3134: /* a trivial version that assumes %sp is ok */
3135: /* (for testing only!) */
3136: save %g0, %g0, %g0
3137: std %l0, [%sp + (0*8)]
3138: rd %psr, %l0
3139: mov 1, %l1
3140: sll %l1, %l0, %l0
3141: wr %l0, 0, %wim
3142: std %l2, [%sp + (1*8)]
3143: std %l4, [%sp + (2*8)]
3144: std %l6, [%sp + (3*8)]
3145: std %i0, [%sp + (4*8)]
3146: std %i2, [%sp + (5*8)]
3147: std %i4, [%sp + (6*8)]
3148: std %i6, [%sp + (7*8)]
3149: restore
3150: RETT
3151: #else
3152: /*
3153: * This is similar to TRAP_SETUP, but we do not want to spend
3154: * a lot of time, so we have separate paths for kernel and user.
3155: * We also know for sure that the window has overflowed.
3156: */
1.173 pk 3157: TRAP_TRACE2(5,%l6,%l5)
1.1 deraadt 3158: btst PSR_PS, %l0
3159: bz winof_user
3160: sethi %hi(clean_trap_window), %l7
3161:
3162: /*
3163: * Overflow from kernel mode. Call clean_trap_window to
3164: * do the dirty work, then just return, since we know prev
3165: * window is valid. clean_trap_windows might dump all *user*
3166: * windows into the pcb, but we do not care: there is at
3167: * least one kernel window (a trap or interrupt frame!)
3168: * above us.
3169: */
3170: jmpl %l7 + %lo(clean_trap_window), %l4
3171: mov %g7, %l7 ! for clean_trap_window
3172:
3173: wr %l0, 0, %psr ! put back the @%*! cond. codes
3174: nop ! (let them settle in)
3175: RETT
3176:
3177: winof_user:
3178: /*
3179: * Overflow from user mode.
3180: * If clean_trap_window dumps the registers into the pcb,
3181: * rft_user will need to call trap(), so we need space for
3182: * a trap frame. We also have to compute pcb_nw.
3183: *
3184: * SHOULD EXPAND IN LINE TO AVOID BUILDING TRAP FRAME ON
3185: * `EASY' SAVES
3186: */
1.111 pk 3187: sethi %hi(cpcb), %l6
3188: ld [%l6 + %lo(cpcb)], %l6
1.1 deraadt 3189: ld [%l6 + PCB_WIM], %l5
3190: and %l0, 31, %l3
3191: sub %l3, %l5, %l5 /* l5 = CWP - pcb_wim */
3192: set uwtab, %l4
3193: ldub [%l4 + %l5], %l5 /* l5 = uwtab[l5] */
3194: st %l5, [%l6 + PCB_UW]
3195: jmpl %l7 + %lo(clean_trap_window), %l4
3196: mov %g7, %l7 ! for clean_trap_window
1.111 pk 3197: sethi %hi(cpcb), %l6
3198: ld [%l6 + %lo(cpcb)], %l6
1.13 deraadt 3199: set USPACE-CCFSZ-80, %l5
1.1 deraadt 3200: add %l6, %l5, %sp /* over to kernel stack */
3201: CHECK_SP_REDZONE(%l6, %l5)
3202:
3203: /*
3204: * Copy return_from_trap far enough to allow us
3205: * to jump directly to rft_user_or_recover_pcb_windows
3206: * (since we know that is where we are headed).
3207: */
3208: ! and %l0, 31, %l3 ! still set (clean_trap_window
3209: ! leaves this register alone)
3210: set wmask, %l6
3211: ldub [%l6 + %l3], %l5 ! %l5 = 1 << ((CWP + 1) % nwindows)
3212: b rft_user_or_recover_pcb_windows
3213: rd %wim, %l4 ! (read %wim first)
3214: #endif /* end `real' version of window overflow trap handler */
3215:
3216: /*
3217: * Window underflow trap handler.
3218: * %l0 = %psr
3219: * %l1 = return pc
3220: * %l2 = return npc
3221: *
3222: * A picture:
3223: *
3224: * T R I X
3225: * 0 0 0 1 0 0 0 (%wim)
3226: * [bit numbers increase towards the right;
3227: * `restore' moves right & `save' moves left]
3228: *
3229: * T is the current (Trap) window, R is the window that attempted
3230: * a `Restore' instruction, I is the Invalid window, and X is the
3231: * window we want to make invalid before we return.
3232: *
3233: * Since window R is valid, we cannot use rft_user to restore stuff
3234: * for us. We have to duplicate its logic. YUCK.
3235: *
3236: * Incidentally, TRIX are for kids. Silly rabbit!
3237: */
3238: window_uf:
3239: #ifdef TRIVIAL_WINDOW_UNDERFLOW_HANDLER
3240: wr %g0, 0, %wim ! allow us to enter I
3241: restore ! to R
3242: nop
3243: nop
3244: restore ! to I
3245: restore %g0, 1, %l1 ! to X
3246: rd %psr, %l0
3247: sll %l1, %l0, %l0
3248: wr %l0, 0, %wim
3249: save %g0, %g0, %g0 ! back to I
3250: LOADWIN(%sp)
3251: save %g0, %g0, %g0 ! back to R
3252: save %g0, %g0, %g0 ! back to T
3253: RETT
3254: #else
1.173 pk 3255: TRAP_TRACE2(6,%l6,%l5)
1.1 deraadt 3256: wr %g0, 0, %wim ! allow us to enter I
3257: btst PSR_PS, %l0
3258: restore ! enter window R
3259: bz winuf_user
3260: restore ! enter window I
3261:
3262: /*
3263: * Underflow from kernel mode. Just recover the
3264: * registers and go (except that we have to update
3265: * the blasted user pcb fields).
3266: */
3267: restore %g0, 1, %l1 ! enter window X, then set %l1 to 1
3268: rd %psr, %l0 ! cwp = %psr & 31;
3269: and %l0, 31, %l0
3270: sll %l1, %l0, %l1 ! wim = 1 << cwp;
3271: wr %l1, 0, %wim ! setwim(wim);
1.111 pk 3272: sethi %hi(cpcb), %l1
3273: ld [%l1 + %lo(cpcb)], %l1
1.1 deraadt 3274: st %l0, [%l1 + PCB_WIM] ! cpcb->pcb_wim = cwp;
3275: save %g0, %g0, %g0 ! back to window I
3276: LOADWIN(%sp)
3277: save %g0, %g0, %g0 ! back to R
3278: save %g0, %g0, %g0 ! and then to T
3279: wr %l0, 0, %psr ! fix those cond codes....
3280: nop ! (let them settle in)
3281: RETT
3282:
3283: winuf_user:
3284: /*
3285: * Underflow from user mode.
3286: *
3287: * We cannot use rft_user (as noted above) because
3288: * we must re-execute the `restore' instruction.
3289: * Since it could be, e.g., `restore %l0,0,%l0',
3290: * it is not okay to touch R's registers either.
3291: *
3292: * We are now in window I.
3293: */
3294: btst 7, %sp ! if unaligned, it is invalid
3295: bne winuf_invalid
3296: EMPTY
3297:
1.111 pk 3298: sethi %hi(_C_LABEL(pgofset)), %l4
3299: ld [%l4 + %lo(_C_LABEL(pgofset))], %l4
1.62 pk 3300: PTE_OF_ADDR(%sp, %l7, winuf_invalid, %l4, NOP_ON_4M_5)
3301: CMP_PTE_USER_READ(%l7, %l5, NOP_ON_4M_6) ! if first page not readable,
1.1 deraadt 3302: bne winuf_invalid ! it is invalid
3303: EMPTY
1.13 deraadt 3304: SLT_IF_1PAGE_RW(%sp, %l7, %l4) ! first page is readable
1.1 deraadt 3305: bl,a winuf_ok ! if only one page, enter window X
3306: restore %g0, 1, %l1 ! and goto ok, & set %l1 to 1
3307: add %sp, 7*8, %l5
1.13 deraadt 3308: add %l4, 62, %l4
1.62 pk 3309: PTE_OF_ADDR(%l5, %l7, winuf_invalid, %l4, NOP_ON_4M_7)
3310: CMP_PTE_USER_READ(%l7, %l5, NOP_ON_4M_8) ! check second page too
1.1 deraadt 3311: be,a winuf_ok ! enter window X and goto ok
3312: restore %g0, 1, %l1 ! (and then set %l1 to 1)
3313:
3314: winuf_invalid:
3315: /*
3316: * We were unable to restore the window because %sp
3317: * is invalid or paged out. Return to the trap window
3318: * and call trap(T_WINUF). This will save R to the user
3319: * stack, then load both R and I into the pcb rw[] area,
3320: * and return with pcb_nsaved set to -1 for success, 0 for
3321: * failure. `Failure' indicates that someone goofed with the
3322: * trap registers (e.g., signals), so that we need to return
3323: * from the trap as from a syscall (probably to a signal handler)
3324: * and let it retry the restore instruction later. Note that
3325: * window R will have been pushed out to user space, and thus
3326: * be the invalid window, by the time we get back here. (We
3327: * continue to label it R anyway.) We must also set %wim again,
3328: * and set pcb_uw to 1, before enabling traps. (Window R is the
3329: * only window, and it is a user window).
3330: */
3331: save %g0, %g0, %g0 ! back to R
3332: save %g0, 1, %l4 ! back to T, then %l4 = 1
1.111 pk 3333: sethi %hi(cpcb), %l6
3334: ld [%l6 + %lo(cpcb)], %l6
1.1 deraadt 3335: st %l4, [%l6 + PCB_UW] ! pcb_uw = 1
3336: ld [%l6 + PCB_WIM], %l5 ! get log2(%wim)
3337: sll %l4, %l5, %l4 ! %l4 = old %wim
3338: wr %l4, 0, %wim ! window I is now invalid again
1.13 deraadt 3339: set USPACE-CCFSZ-80, %l5
1.1 deraadt 3340: add %l6, %l5, %sp ! get onto kernel stack
3341: CHECK_SP_REDZONE(%l6, %l5)
3342:
3343: /*
3344: * Okay, call trap(T_WINUF, psr, pc, &tf).
3345: * See `slowtrap' above for operation.
3346: */
3347: wr %l0, PSR_ET, %psr
3348: std %l0, [%sp + CCFSZ + 0] ! tf.tf_psr, tf.tf_pc
3349: rd %y, %l3
3350: std %l2, [%sp + CCFSZ + 8] ! tf.tf_npc, tf.tf_y
3351: mov T_WINUF, %o0
3352: st %g1, [%sp + CCFSZ + 20] ! tf.tf_global[1]
3353: mov %l0, %o1
3354: std %g2, [%sp + CCFSZ + 24] ! etc
3355: mov %l1, %o2
3356: std %g4, [%sp + CCFSZ + 32]
3357: add %sp, CCFSZ, %o3
3358: std %g6, [%sp + CCFSZ + 40]
3359: std %i0, [%sp + CCFSZ + 48] ! tf.tf_out[0], etc
3360: std %i2, [%sp + CCFSZ + 56]
3361: std %i4, [%sp + CCFSZ + 64]
1.111 pk 3362: call _C_LABEL(trap) ! trap(T_WINUF, pc, psr, &tf)
1.1 deraadt 3363: std %i6, [%sp + CCFSZ + 72] ! tf.tf_out[6]
3364:
3365: ldd [%sp + CCFSZ + 0], %l0 ! new psr, pc
3366: ldd [%sp + CCFSZ + 8], %l2 ! new npc, %y
3367: wr %l3, 0, %y
3368: ld [%sp + CCFSZ + 20], %g1
3369: ldd [%sp + CCFSZ + 24], %g2
3370: ldd [%sp + CCFSZ + 32], %g4
3371: ldd [%sp + CCFSZ + 40], %g6
3372: ldd [%sp + CCFSZ + 48], %i0 ! %o0 for window R, etc
3373: ldd [%sp + CCFSZ + 56], %i2
3374: ldd [%sp + CCFSZ + 64], %i4
3375: wr %l0, 0, %psr ! disable traps: test must be atomic
3376: ldd [%sp + CCFSZ + 72], %i6
1.111 pk 3377: sethi %hi(cpcb), %l6
3378: ld [%l6 + %lo(cpcb)], %l6
1.1 deraadt 3379: ld [%l6 + PCB_NSAVED], %l7 ! if nsaved is -1, we have our regs
3380: tst %l7
3381: bl,a 1f ! got them
3382: wr %g0, 0, %wim ! allow us to enter windows R, I
3383: b,a return_from_trap
3384:
3385: /*
3386: * Got 'em. Load 'em up.
3387: */
3388: 1:
3389: mov %g6, %l3 ! save %g6; set %g6 = cpcb
3390: mov %l6, %g6
3391: st %g0, [%g6 + PCB_NSAVED] ! and clear magic flag
3392: restore ! from T to R
3393: restore ! from R to I
3394: restore %g0, 1, %l1 ! from I to X, then %l1 = 1
3395: rd %psr, %l0 ! cwp = %psr;
3396: sll %l1, %l0, %l1
3397: wr %l1, 0, %wim ! make window X invalid
3398: and %l0, 31, %l0
3399: st %l0, [%g6 + PCB_WIM] ! cpcb->pcb_wim = cwp;
3400: nop ! unnecessary? old wim was 0...
3401: save %g0, %g0, %g0 ! back to I
3402: LOADWIN(%g6 + PCB_RW + 64) ! load from rw[1]
3403: save %g0, %g0, %g0 ! back to R
3404: LOADWIN(%g6 + PCB_RW) ! load from rw[0]
3405: save %g0, %g0, %g0 ! back to T
3406: wr %l0, 0, %psr ! restore condition codes
3407: mov %l3, %g6 ! fix %g6
3408: RETT
3409:
3410: /*
3411: * Restoring from user stack, but everything has checked out
3412: * as good. We are now in window X, and %l1 = 1. Window R
3413: * is still valid and holds user values.
3414: */
3415: winuf_ok:
3416: rd %psr, %l0
3417: sll %l1, %l0, %l1
3418: wr %l1, 0, %wim ! make this one invalid
1.111 pk 3419: sethi %hi(cpcb), %l2
3420: ld [%l2 + %lo(cpcb)], %l2
1.1 deraadt 3421: and %l0, 31, %l0
3422: st %l0, [%l2 + PCB_WIM] ! cpcb->pcb_wim = cwp;
3423: save %g0, %g0, %g0 ! back to I
3424: LOADWIN(%sp)
3425: save %g0, %g0, %g0 ! back to R
3426: save %g0, %g0, %g0 ! back to T
3427: wr %l0, 0, %psr ! restore condition codes
3428: nop ! it takes three to tangle
3429: RETT
3430: #endif /* end `real' version of window underflow trap handler */
3431:
3432: /*
3433: * Various return-from-trap routines (see return_from_trap).
3434: */
3435:
3436: /*
3437: * Return from trap, to kernel.
3438: * %l0 = %psr
3439: * %l1 = return pc
3440: * %l2 = return npc
3441: * %l4 = %wim
3442: * %l5 = bit for previous window
3443: */
3444: rft_kernel:
3445: btst %l5, %l4 ! if (wim & l5)
3446: bnz 1f ! goto reload;
3447: wr %l0, 0, %psr ! but first put !@#*% cond codes back
3448:
3449: /* previous window is valid; just rett */
3450: nop ! wait for cond codes to settle in
3451: RETT
3452:
3453: /*
3454: * Previous window is invalid.
3455: * Update %wim and then reload l0..i7 from frame.
3456: *
3457: * T I X
3458: * 0 0 1 0 0 (%wim)
3459: * [see picture in window_uf handler]
3460: *
3461: * T is the current (Trap) window, I is the Invalid window,
3462: * and X is the window we want to make invalid. Window X
3463: * currently has no useful values.
3464: */
3465: 1:
3466: wr %g0, 0, %wim ! allow us to enter window I
3467: nop; nop; nop ! (it takes a while)
3468: restore ! enter window I
3469: restore %g0, 1, %l1 ! enter window X, then %l1 = 1
3470: rd %psr, %l0 ! CWP = %psr & 31;
3471: and %l0, 31, %l0
3472: sll %l1, %l0, %l1 ! wim = 1 << CWP;
3473: wr %l1, 0, %wim ! setwim(wim);
1.111 pk 3474: sethi %hi(cpcb), %l1
3475: ld [%l1 + %lo(cpcb)], %l1
1.1 deraadt 3476: st %l0, [%l1 + PCB_WIM] ! cpcb->pcb_wim = l0 & 31;
3477: save %g0, %g0, %g0 ! back to window I
3478: LOADWIN(%sp)
3479: save %g0, %g0, %g0 ! back to window T
3480: /*
3481: * Note that the condition codes are still set from
3482: * the code at rft_kernel; we can simply return.
3483: */
3484: RETT
3485:
3486: /*
3487: * Return from trap, to user. Checks for scheduling trap (`ast') first;
3488: * will re-enter trap() if set. Note that we may have to switch from
3489: * the interrupt stack to the kernel stack in this case.
3490: * %l0 = %psr
3491: * %l1 = return pc
3492: * %l2 = return npc
3493: * %l4 = %wim
3494: * %l5 = bit for previous window
3495: * %l6 = cpcb
3496: * If returning to a valid window, just set psr and return.
3497: */
3498: rft_user:
1.179 pk 3499: ! sethi %hi(_WANT_AST)), %l7 ! (done below)
3500: ld [%l7 + %lo(_WANT_AST)], %l7
1.1 deraadt 3501: tst %l7 ! want AST trap?
3502: bne,a softtrap ! yes, re-enter trap with type T_AST
3503: mov T_AST, %o0
3504:
3505: btst %l5, %l4 ! if (wim & l5)
3506: bnz 1f ! goto reload;
3507: wr %l0, 0, %psr ! restore cond codes
3508: nop ! (three instruction delay)
3509: RETT
3510:
3511: /*
3512: * Previous window is invalid.
3513: * Before we try to load it, we must verify its stack pointer.
3514: * This is much like the underflow handler, but a bit easier
3515: * since we can use our own local registers.
3516: */
3517: 1:
3518: btst 7, %fp ! if unaligned, address is invalid
3519: bne rft_invalid
3520: EMPTY
3521:
1.111 pk 3522: sethi %hi(_C_LABEL(pgofset)), %l3
3523: ld [%l3 + %lo(_C_LABEL(pgofset))], %l3
1.62 pk 3524: PTE_OF_ADDR(%fp, %l7, rft_invalid, %l3, NOP_ON_4M_9)
3525: CMP_PTE_USER_READ(%l7, %l5, NOP_ON_4M_10) ! try first page
1.1 deraadt 3526: bne rft_invalid ! no good
3527: EMPTY
1.13 deraadt 3528: SLT_IF_1PAGE_RW(%fp, %l7, %l3)
1.1 deraadt 3529: bl,a rft_user_ok ! only 1 page: ok
3530: wr %g0, 0, %wim
3531: add %fp, 7*8, %l5
1.13 deraadt 3532: add %l3, 62, %l3
1.62 pk 3533: PTE_OF_ADDR(%l5, %l7, rft_invalid, %l3, NOP_ON_4M_11)
3534: CMP_PTE_USER_READ(%l7, %l5, NOP_ON_4M_12) ! check 2nd page too
1.1 deraadt 3535: be,a rft_user_ok
3536: wr %g0, 0, %wim
3537:
3538: /*
3539: * The window we wanted to pull could not be pulled. Instead,
3540: * re-enter trap with type T_RWRET. This will pull the window
3541: * into cpcb->pcb_rw[0] and set cpcb->pcb_nsaved to -1, which we
3542: * will detect when we try to return again.
3543: */
3544: rft_invalid:
3545: b softtrap
3546: mov T_RWRET, %o0
3547:
3548: /*
3549: * The window we want to pull can be pulled directly.
3550: */
3551: rft_user_ok:
3552: ! wr %g0, 0, %wim ! allow us to get into it
3553: wr %l0, 0, %psr ! fix up the cond codes now
3554: nop; nop; nop
3555: restore ! enter window I
3556: restore %g0, 1, %l1 ! enter window X, then %l1 = 1
3557: rd %psr, %l0 ! l0 = (junk << 5) + CWP;
3558: sll %l1, %l0, %l1 ! %wim = 1 << CWP;
3559: wr %l1, 0, %wim
1.111 pk 3560: sethi %hi(cpcb), %l1
3561: ld [%l1 + %lo(cpcb)], %l1
1.1 deraadt 3562: and %l0, 31, %l0
3563: st %l0, [%l1 + PCB_WIM] ! cpcb->pcb_wim = l0 & 31;
3564: save %g0, %g0, %g0 ! back to window I
3565: LOADWIN(%sp) ! suck hard
3566: save %g0, %g0, %g0 ! back to window T
3567: RETT
3568:
3569: /*
3570: * Return from trap. Entered after a
3571: * wr %l0, 0, %psr
3572: * which disables traps so that we can rett; registers are:
3573: *
3574: * %l0 = %psr
3575: * %l1 = return pc
3576: * %l2 = return npc
3577: *
3578: * (%l3..%l7 anything).
3579: *
3580: * If we are returning to user code, we must:
3581: * 1. Check for register windows in the pcb that belong on the stack.
3582: * If there are any, reenter trap with type T_WINOF.
3583: * 2. Make sure the register windows will not underflow. This is
3584: * much easier in kernel mode....
3585: */
3586: return_from_trap:
3587: ! wr %l0, 0, %psr ! disable traps so we can rett
3588: ! (someone else did this already)
3589: and %l0, 31, %l5
3590: set wmask, %l6
3591: ldub [%l6 + %l5], %l5 ! %l5 = 1 << ((CWP + 1) % nwindows)
3592: btst PSR_PS, %l0 ! returning to userland?
3593: bnz rft_kernel ! no, go return to kernel
3594: rd %wim, %l4 ! (read %wim in any case)
3595:
3596: rft_user_or_recover_pcb_windows:
3597: /*
3598: * (entered with %l4=%wim, %l5=wmask[cwp]; %l0..%l2 as usual)
3599: *
3600: * check cpcb->pcb_nsaved:
3601: * if 0, do a `normal' return to user (see rft_user);
3602: * if > 0, cpcb->pcb_rw[] holds registers to be copied to stack;
3603: * if -1, cpcb->pcb_rw[0] holds user registers for rett window
3604: * from an earlier T_RWRET pseudo-trap.
3605: */
1.111 pk 3606: sethi %hi(cpcb), %l6
3607: ld [%l6 + %lo(cpcb)], %l6
1.1 deraadt 3608: ld [%l6 + PCB_NSAVED], %l7
3609: tst %l7
3610: bz,a rft_user
1.179 pk 3611: sethi %hi(_WANT_AST), %l7 ! first instr of rft_user
1.1 deraadt 3612:
3613: bg,a softtrap ! if (pcb_nsaved > 0)
3614: mov T_WINOF, %o0 ! trap(T_WINOF);
3615:
3616: /*
3617: * To get here, we must have tried to return from a previous
3618: * trap and discovered that it would cause a window underflow.
3619: * We then must have tried to pull the registers out of the
3620: * user stack (from the address in %fp==%i6) and discovered
3621: * that it was either unaligned or not loaded in memory, and
3622: * therefore we ran a trap(T_RWRET), which loaded one set of
3623: * registers into cpcb->pcb_pcb_rw[0] (if it had killed the
3624: * process due to a bad stack, we would not be here).
3625: *
3626: * We want to load pcb_rw[0] into the previous window, which
3627: * we know is currently invalid. In other words, we want
3628: * %wim to be 1 << ((cwp + 2) % nwindows).
3629: */
3630: wr %g0, 0, %wim ! enable restores
3631: mov %g6, %l3 ! save g6 in l3
3632: mov %l6, %g6 ! set g6 = &u
3633: st %g0, [%g6 + PCB_NSAVED] ! clear cpcb->pcb_nsaved
3634: restore ! enter window I
3635: restore %g0, 1, %l1 ! enter window X, then %l1 = 1
3636: rd %psr, %l0
3637: sll %l1, %l0, %l1 ! %wim = 1 << CWP;
3638: wr %l1, 0, %wim
3639: and %l0, 31, %l0
3640: st %l0, [%g6 + PCB_WIM] ! cpcb->pcb_wim = CWP;
3641: nop ! unnecessary? old wim was 0...
3642: save %g0, %g0, %g0 ! back to window I
3643: LOADWIN(%g6 + PCB_RW)
3644: save %g0, %g0, %g0 ! back to window T (trap window)
3645: wr %l0, 0, %psr ! cond codes, cond codes everywhere
3646: mov %l3, %g6 ! restore g6
3647: RETT
3648:
3649: ! exported end marker for kernel gdb
1.111 pk 3650: .globl _C_LABEL(endtrapcode)
3651: _C_LABEL(endtrapcode):
1.1 deraadt 3652:
3653: /*
3654: * init_tables(nwin) int nwin;
3655: *
3656: * Set up the uwtab and wmask tables.
3657: * We know nwin > 1.
3658: */
3659: init_tables:
3660: /*
3661: * for (i = -nwin, j = nwin - 2; ++i < 0; j--)
3662: * uwtab[i] = j;
3663: * (loop runs at least once)
3664: */
3665: set uwtab, %o3
3666: sub %g0, %o0, %o1 ! i = -nwin + 1
3667: inc %o1
3668: add %o0, -2, %o2 ! j = nwin - 2;
3669: 0:
3670: stb %o2, [%o3 + %o1] ! uwtab[i] = j;
3671: 1:
3672: inccc %o1 ! ++i < 0?
3673: bl 0b ! yes, continue loop
3674: dec %o2 ! in any case, j--
3675:
3676: /*
3677: * (i now equals 0)
3678: * for (j = nwin - 1; i < nwin; i++, j--)
3679: * uwtab[i] = j;
3680: * (loop runs at least twice)
3681: */
3682: sub %o0, 1, %o2 ! j = nwin - 1
3683: 0:
3684: stb %o2, [%o3 + %o1] ! uwtab[i] = j
3685: inc %o1 ! i++
3686: 1:
3687: cmp %o1, %o0 ! i < nwin?
3688: bl 0b ! yes, continue
3689: dec %o2 ! in any case, j--
3690:
3691: /*
3692: * We observe that, for i in 0..nwin-2, (i+1)%nwin == i+1;
3693: * for i==nwin-1, (i+1)%nwin == 0.
3694: * To avoid adding 1, we run i from 1 to nwin and set
3695: * wmask[i-1].
3696: *
3697: * for (i = j = 1; i < nwin; i++) {
3698: * j <<= 1; (j now == 1 << i)
3699: * wmask[i - 1] = j;
3700: * }
3701: * (loop runs at least once)
3702: */
3703: set wmask - 1, %o3
3704: mov 1, %o1 ! i = 1;
3705: mov 2, %o2 ! j = 2;
3706: 0:
3707: stb %o2, [%o3 + %o1] ! (wmask - 1)[i] = j;
3708: inc %o1 ! i++
3709: cmp %o1, %o0 ! i < nwin?
3710: bl,a 0b ! yes, continue
3711: sll %o2, 1, %o2 ! (and j <<= 1)
3712:
3713: /*
3714: * Now i==nwin, so we want wmask[i-1] = 1.
3715: */
3716: mov 1, %o2 ! j = 1;
3717: retl
3718: stb %o2, [%o3 + %o1] ! (wmask - 1)[i] = j;
3719:
1.13 deraadt 3720:
1.1 deraadt 3721: dostart:
1.32 pk 3722: /*
3723: * Startup.
3724: *
1.186 pk 3725: * We may have been loaded in low RAM, at some address which
1.119 christos 3726: * is page aligned (PROM_LOADADDR actually) rather than where we
3727: * want to run (KERNBASE+PROM_LOADADDR). Until we get everything set,
1.32 pk 3728: * we have to be sure to use only pc-relative addressing.
3729: */
3730:
1.27 pk 3731: /*
1.186 pk 3732: * Find out if the above is the case.
3733: */
3734: 0: call 1f
3735: sethi %hi(0b), %l0 ! %l0 = virtual address of 0:
3736: 1: or %l0, %lo(0b), %l0
3737: sub %l0, %o7, %l7 ! subtract actual physical address of 0:
3738:
3739: /*
3740: * If we're already running at our desired virtual load address,
3741: * %l7 will be set to 0, otherwise it will be KERNBASE.
3742: * From now on until the end of locore bootstrap code, %l7 will
3743: * be used to relocate memory references.
3744: */
3745: #define RELOCATE(l,r) \
3746: set l, r; \
3747: sub r, %l7, r
3748:
3749: /*
3750: * We use the bootinfo method to pass arguments, and the new
1.153 pk 3751: * magic number indicates that. A pointer to the kernel top, i.e.
3752: * the first address after the load kernel image (including DDB
3753: * symbols, if any) is passed in %o4[0] and the bootinfo structure
3754: * is passed in %o4[1].
3755: *
3756: * A magic number is passed in %o5 to allow for bootloaders
3757: * that know nothing about the bootinfo structure or previous
3758: * DDB symbol loading conventions.
1.117 christos 3759: *
3760: * For compatibility with older versions, we check for DDB arguments
1.153 pk 3761: * if the older magic number is there. The loader passes `kernel_top'
3762: * (previously known as `esym') in %o4.
3763: *
1.40 pk 3764: * Note: we don't touch %o1-%o3; SunOS bootloaders seem to use them
3765: * for their own mirky business.
1.73 pk 3766: *
1.153 pk 3767: * Pre-NetBSD 1.3 bootblocks had KERNBASE compiled in, and used it
3768: * to compute the value of `kernel_top' (previously known as `esym').
3769: * In order to successfully boot a kernel built with a different value
3770: * for KERNBASE using old bootblocks, we fixup `kernel_top' here by
3771: * the difference between KERNBASE and the old value (known to be
3772: * 0xf8000000) compiled into pre-1.3 bootblocks.
1.27 pk 3773: */
1.117 christos 3774:
3775: set 0x44444232, %l3 ! bootinfo magic
3776: cmp %o5, %l3
3777: bne 1f
1.118 pk 3778: nop
3779:
3780: /* The loader has passed to us a `bootinfo' structure */
1.153 pk 3781: ld [%o4], %l3 ! 1st word is kernel_top
1.186 pk 3782: add %l3, %l7, %o5 ! relocate: + KERNBASE
3783: RELOCATE(_C_LABEL(kernel_top),%l3)
3784: st %o5, [%l3] ! and store it
1.120 pk 3785:
3786: ld [%o4 + 4], %l3 ! 2nd word is bootinfo
1.186 pk 3787: add %l3, %l7, %o5 ! relocate
3788: RELOCATE(_C_LABEL(bootinfo),%l3)
3789: st %o5, [%l3] ! store bootinfo
1.153 pk 3790: b,a 4f
1.117 christos 3791:
1.118 pk 3792: 1:
1.153 pk 3793: #ifdef DDB
1.120 pk 3794: /* Check for old-style DDB loader magic */
1.186 pk 3795: set KERNBASE, %l4
1.153 pk 3796: set 0x44444231, %l3 ! Is it DDB_MAGIC1?
1.117 christos 3797: cmp %o5, %l3
1.118 pk 3798: be,a 2f
3799: clr %l4 ! if DDB_MAGIC1, clear %l4
1.115 christos 3800:
1.153 pk 3801: set 0x44444230, %l3 ! Is it DDB_MAGIC0?
3802: cmp %o5, %l3 ! if so, need to relocate %o4
1.154 thorpej 3803: bne 3f /* if not, there's no bootloader info */
1.73 pk 3804:
1.118 pk 3805: ! note: %l4 set to KERNBASE above.
1.73 pk 3806: set 0xf8000000, %l5 ! compute correction term:
3807: sub %l5, %l4, %l4 ! old KERNBASE (0xf8000000 ) - KERNBASE
3808:
1.117 christos 3809: 2:
1.40 pk 3810: tst %o4 ! do we have the symbols?
1.117 christos 3811: bz 3f
1.73 pk 3812: sub %o4, %l4, %o4 ! apply compat correction
1.153 pk 3813: sethi %hi(_C_LABEL(kernel_top) - KERNBASE), %l3 ! and store it
3814: st %o4, [%l3 + %lo(_C_LABEL(kernel_top) - KERNBASE)]
3815: b,a 4f
1.117 christos 3816: 3:
1.27 pk 3817: #endif
1.13 deraadt 3818: /*
1.153 pk 3819: * The boot loader did not pass in a value for `kernel_top';
3820: * let it default to `end'.
3821: */
3822: set end, %o4
1.186 pk 3823: RELOCATE(_C_LABEL(kernel_top),%l3)
3824: st %o4, [%l3] ! store kernel_top
1.153 pk 3825:
3826: 4:
3827:
3828: /*
1.13 deraadt 3829: * Sun4 passes in the `load address'. Although possible, its highly
3830: * unlikely that OpenBoot would place the prom vector there.
3831: */
1.119 christos 3832: set PROM_LOADADDR, %g7
1.17 pk 3833: cmp %o0, %g7
1.50 pk 3834: be is_sun4
1.14 deraadt 3835: nop
3836:
1.158 thorpej 3837: #if defined(SUN4C) || defined(SUN4M) || defined(SUN4D)
1.144 uwe 3838: /*
3839: * Be prepared to get OF client entry in either %o0 or %o3.
1.158 thorpej 3840: * XXX Will this ever trip on sun4d? Let's hope not!
1.144 uwe 3841: */
3842: cmp %o0, 0
3843: be is_openfirm
3844: nop
3845:
3846: mov %o0, %g7 ! save romp passed by boot code
1.9 deraadt 3847:
1.109 pk 3848: /* First, check `romp->pv_magic' */
3849: ld [%g7 + PV_MAGIC], %o0 ! v = pv->pv_magic
3850: set OBP_MAGIC, %o1
3851: cmp %o0, %o1 ! if ( v != OBP_MAGIC) {
1.144 uwe 3852: bne is_sun4m ! assume this is an OPENFIRM machine
1.109 pk 3853: nop ! }
3854:
1.13 deraadt 3855: /*
1.158 thorpej 3856: * are we on a sun4c or a sun4m or a sun4d?
1.13 deraadt 3857: */
1.28 deraadt 3858: ld [%g7 + PV_NODEOPS], %o4 ! node = pv->pv_nodeops->no_nextnode(0)
3859: ld [%o4 + NO_NEXTNODE], %o4
1.18 deraadt 3860: call %o4
3861: mov 0, %o0 ! node
1.37 pk 3862:
1.186 pk 3863: !mov %o0, %l0
3864: RELOCATE(cputypvar,%o1) ! name = "compatible"
3865: RELOCATE(cputypval,%l2) ! buffer ptr (assume buffer long enough)
1.28 deraadt 3866: ld [%g7 + PV_NODEOPS], %o4 ! (void)pv->pv_nodeops->no_getprop(...)
3867: ld [%o4 + NO_GETPROP], %o4
1.18 deraadt 3868: call %o4
1.186 pk 3869: mov %l2, %o2
3870: !set cputypval-KERNBASE, %o2 ! buffer ptr
3871: ldub [%l2 + 4], %o0 ! which is it... "sun4c", "sun4m", "sun4d"?
1.18 deraadt 3872: cmp %o0, 'c'
1.50 pk 3873: be is_sun4c
1.13 deraadt 3874: nop
1.18 deraadt 3875: cmp %o0, 'm'
1.50 pk 3876: be is_sun4m
1.18 deraadt 3877: nop
1.158 thorpej 3878: cmp %o0, 'd'
3879: be is_sun4d
3880: nop
3881: #endif /* SUN4C || SUN4M || SUN4D */
1.18 deraadt 3882:
1.158 thorpej 3883: /*
3884: * Don't know what type of machine this is; just halt back
3885: * out to the PROM.
3886: */
1.28 deraadt 3887: ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
1.18 deraadt 3888: call %o1
3889: nop
3890:
1.109 pk 3891: is_openfirm:
1.144 uwe 3892: ! OF client entry in %o3 (kernel booted directly by PROM?)
3893: mov %o3, %g7
1.109 pk 3894: /* FALLTHROUGH to sun4m case */
3895:
1.18 deraadt 3896: is_sun4m:
1.13 deraadt 3897: #if defined(SUN4M)
1.52 pk 3898: set trapbase_sun4m, %g6
1.13 deraadt 3899: mov SUN4CM_PGSHIFT, %g5
3900: b start_havetype
3901: mov CPU_SUN4M, %g4
3902: #else
1.186 pk 3903: RELOCATE(sun4m_notsup,%o0)
1.28 deraadt 3904: ld [%g7 + PV_EVAL], %o1
1.9 deraadt 3905: call %o1 ! print a message saying that the
3906: nop ! sun4m architecture is not supported
1.158 thorpej 3907: ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
3908: call %o1
3909: nop
3910: /*NOTREACHED*/
3911: #endif
3912: is_sun4d:
3913: #if defined(SUN4D)
1.159 thorpej 3914: set trapbase_sun4m, %g6 /* XXXJRT trapbase_sun4d */
1.158 thorpej 3915: mov SUN4CM_PGSHIFT, %g5
3916: b start_havetype
3917: mov CPU_SUN4D, %g4
3918: #else
1.186 pk 3919: RELOCATE(sun4d_notsup,%o0)
1.158 thorpej 3920: ld [%g7 + PV_EVAL], %o1
3921: call %o1 ! print a message saying that the
3922: nop ! sun4d architecture is not supported
1.28 deraadt 3923: ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
1.9 deraadt 3924: call %o1
3925: nop
1.13 deraadt 3926: /*NOTREACHED*/
3927: #endif
3928: is_sun4c:
3929: #if defined(SUN4C)
1.52 pk 3930: set trapbase_sun4c, %g6
1.13 deraadt 3931: mov SUN4CM_PGSHIFT, %g5
3932:
3933: set AC_CONTEXT, %g1 ! paranoia: set context to kernel
3934: stba %g0, [%g1] ASI_CONTROL
3935:
3936: b start_havetype
3937: mov CPU_SUN4C, %g4 ! XXX CPU_SUN4
1.9 deraadt 3938: #else
1.186 pk 3939: RELOCATE(sun4c_notsup,%o0)
1.28 deraadt 3940:
3941: ld [%g7 + PV_ROMVEC_VERS], %o1
3942: cmp %o1, 0
3943: bne 1f
3944: nop
3945:
3946: ! stupid version 0 rom interface is pv_eval(int length, char *string)
3947: mov %o0, %o1
3948: 2: ldub [%o0], %o4
1.186 pk 3949: tst %o4
1.28 deraadt 3950: bne 2b
3951: inc %o0
3952: dec %o0
3953: sub %o0, %o1, %o0
3954:
3955: 1: ld [%g7 + PV_EVAL], %o2
3956: call %o2 ! print a message saying that the
1.9 deraadt 3957: nop ! sun4c architecture is not supported
1.28 deraadt 3958: ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
1.9 deraadt 3959: call %o1
3960: nop
1.13 deraadt 3961: /*NOTREACHED*/
1.9 deraadt 3962: #endif
1.13 deraadt 3963: is_sun4:
3964: #if defined(SUN4)
1.52 pk 3965: set trapbase_sun4, %g6
1.13 deraadt 3966: mov SUN4_PGSHIFT, %g5
1.1 deraadt 3967:
1.13 deraadt 3968: set AC_CONTEXT, %g1 ! paranoia: set context to kernel
3969: stba %g0, [%g1] ASI_CONTROL
3970:
3971: b start_havetype
1.14 deraadt 3972: mov CPU_SUN4, %g4
1.13 deraadt 3973: #else
1.14 deraadt 3974: set PROM_BASE, %g7
3975:
1.186 pk 3976: RELOCATE(sun4_notsup,%o0)
1.28 deraadt 3977: ld [%g7 + OLDMON_PRINTF], %o1
1.13 deraadt 3978: call %o1 ! print a message saying that the
3979: nop ! sun4 architecture is not supported
1.28 deraadt 3980: ld [%g7 + OLDMON_HALT], %o1 ! by this kernel, then halt
1.13 deraadt 3981: call %o1
3982: nop
3983: /*NOTREACHED*/
3984: #endif
3985:
3986: start_havetype:
1.186 pk 3987: cmp %l7, 0
3988: be startmap_done
3989:
1.1 deraadt 3990: /*
3991: * Step 1: double map low RAM (addresses [0.._end-start-1])
3992: * to KERNBASE (addresses [KERNBASE.._end-1]). None of these
3993: * are `bad' aliases (since they are all on segment boundaries)
3994: * so we do not have to worry about cache aliasing.
3995: *
3996: * We map in another couple of segments just to have some
3997: * more memory (512K, actually) guaranteed available for
3998: * bootstrap code (pmap_bootstrap needs memory to hold MMU
1.39 pk 3999: * and context data structures). Note: this is only relevant
4000: * for 2-level MMU sun4/sun4c machines.
1.1 deraadt 4001: */
4002: clr %l0 ! lowva
4003: set KERNBASE, %l1 ! highva
1.153 pk 4004:
4005: sethi %hi(_C_LABEL(kernel_top) - KERNBASE), %o0
4006: ld [%o0 + %lo(_C_LABEL(kernel_top) - KERNBASE)], %o1
4007: set (2 << 18), %o2 ! add slack for sun4c MMU
4008: add %o1, %o2, %l2 ! last va that must be remapped
4009:
1.13 deraadt 4010: /*
4011: * Need different initial mapping functions for different
4012: * types of machines.
4013: */
4014: #if defined(SUN4C)
4015: cmp %g4, CPU_SUN4C
1.9 deraadt 4016: bne 1f
1.14 deraadt 4017: set 1 << 18, %l3 ! segment size in bytes
1.1 deraadt 4018: 0:
4019: lduba [%l0] ASI_SEGMAP, %l4 ! segmap[highva] = segmap[lowva];
4020: stba %l4, [%l1] ASI_SEGMAP
4021: add %l3, %l1, %l1 ! highva += segsiz;
4022: cmp %l1, %l2 ! done?
1.34 pk 4023: blu 0b ! no, loop
1.1 deraadt 4024: add %l3, %l0, %l0 ! (and lowva += segsz)
1.135 pk 4025: b,a startmap_done
1.52 pk 4026: 1:
1.13 deraadt 4027: #endif /* SUN4C */
1.135 pk 4028:
1.13 deraadt 4029: #if defined(SUN4)
4030: cmp %g4, CPU_SUN4
4031: bne 2f
1.114 pk 4032: #if defined(SUN4_MMU3L)
1.34 pk 4033: set AC_IDPROM+1, %l3
4034: lduba [%l3] ASI_CONTROL, %l3
4035: cmp %l3, 0x24 ! XXX - SUN4_400
4036: bne no_3mmu
1.133 pk 4037: nop
1.135 pk 4038:
4039: /*
4040: * Three-level sun4 MMU.
4041: * Double-map by duplicating a single region entry (which covers
4042: * 16MB) corresponding to the kernel's virtual load address.
4043: */
1.34 pk 4044: add %l0, 2, %l0 ! get to proper half-word in RG space
4045: add %l1, 2, %l1
4046: lduha [%l0] ASI_REGMAP, %l4 ! regmap[highva] = regmap[lowva];
4047: stha %l4, [%l1] ASI_REGMAP
1.135 pk 4048: b,a startmap_done
1.34 pk 4049: no_3mmu:
4050: #endif
1.135 pk 4051:
4052: /*
4053: * Three-level sun4 MMU.
4054: * Double-map by duplicating the required number of segment
4055: * entries corresponding to the kernel's virtual load address.
4056: */
4057: set 1 << 18, %l3 ! segment size in bytes
1.13 deraadt 4058: 0:
4059: lduha [%l0] ASI_SEGMAP, %l4 ! segmap[highva] = segmap[lowva];
4060: stha %l4, [%l1] ASI_SEGMAP
4061: add %l3, %l1, %l1 ! highva += segsiz;
4062: cmp %l1, %l2 ! done?
1.34 pk 4063: blu 0b ! no, loop
1.13 deraadt 4064: add %l3, %l0, %l0 ! (and lowva += segsz)
1.37 pk 4065: b,a startmap_done
1.52 pk 4066: 2:
1.13 deraadt 4067: #endif /* SUN4 */
1.135 pk 4068:
1.159 thorpej 4069: #if defined(SUN4M) || defined(SUN4D)
4070: cmp %g4, CPU_SUN4M
4071: beq 3f
4072: nop
4073: cmp %g4, CPU_SUN4D
1.164 pk 4074: bne 4f
1.13 deraadt 4075:
1.159 thorpej 4076: 3:
1.37 pk 4077: /*
1.38 pk 4078: * The OBP guarantees us a 16MB mapping using a level 1 PTE at
1.135 pk 4079: * the start of the memory bank in which we were loaded. All we
4080: * have to do is copy the entry.
4081: * Also, we must check to see if we have a TI Viking in non-mbus mode,
4082: * and if so do appropriate flipping and turning off traps before
1.38 pk 4083: * we dork with MMU passthrough. -grrr
1.37 pk 4084: */
4085:
1.38 pk 4086: sethi %hi(0x40000000), %o1 ! TI version bit
4087: rd %psr, %o0
4088: andcc %o0, %o1, %g0
4089: be remap_notvik ! is non-TI normal MBUS module
4090: lda [%g0] ASI_SRMMU, %o0 ! load MMU
4091: andcc %o0, 0x800, %g0
4092: bne remap_notvik ! It is a viking MBUS module
4093: nop
4094:
4095: /*
4096: * Ok, we have a non-Mbus TI Viking, a MicroSparc.
4097: * In this scenerio, in order to play with the MMU
4098: * passthrough safely, we need turn off traps, flip
4099: * the AC bit on in the mmu status register, do our
4100: * passthroughs, then restore the mmu reg and %psr
4101: */
4102: rd %psr, %o4 ! saved here till done
4103: andn %o4, 0x20, %o5
4104: wr %o5, 0x0, %psr
4105: nop; nop; nop;
4106: set SRMMU_CXTPTR, %o0
4107: lda [%o0] ASI_SRMMU, %o0 ! get context table ptr
4108: sll %o0, 4, %o0 ! make physical
4109: lda [%g0] ASI_SRMMU, %o3 ! hold mmu-sreg here
4110: /* 0x8000 is AC bit in Viking mmu-ctl reg */
4111: set 0x8000, %o2
4112: or %o3, %o2, %o2
4113: sta %o2, [%g0] ASI_SRMMU ! AC bit on
1.135 pk 4114:
1.38 pk 4115: lda [%o0] ASI_BYPASS, %o1
4116: srl %o1, 4, %o1
4117: sll %o1, 8, %o1 ! get phys addr of l1 entry
4118: lda [%o1] ASI_BYPASS, %l4
4119: srl %l1, 22, %o2 ! note: 22 == RGSHIFT - 2
4120: add %o1, %o2, %o1
4121: sta %l4, [%o1] ASI_BYPASS
1.135 pk 4122:
1.38 pk 4123: sta %o3, [%g0] ASI_SRMMU ! restore mmu-sreg
4124: wr %o4, 0x0, %psr ! restore psr
1.164 pk 4125: b,a startmap_done
1.38 pk 4126:
4127: /*
4128: * The following is generic and should work on all
4129: * Mbus based SRMMU's.
4130: */
4131: remap_notvik:
4132: set SRMMU_CXTPTR, %o0
4133: lda [%o0] ASI_SRMMU, %o0 ! get context table ptr
4134: sll %o0, 4, %o0 ! make physical
4135: lda [%o0] ASI_BYPASS, %o1
4136: srl %o1, 4, %o1
4137: sll %o1, 8, %o1 ! get phys addr of l1 entry
4138: lda [%o1] ASI_BYPASS, %l4
4139: srl %l1, 22, %o2 ! note: 22 == RGSHIFT - 2
4140: add %o1, %o2, %o1
4141: sta %l4, [%o1] ASI_BYPASS
1.52 pk 4142: !b,a startmap_done
1.163 pk 4143: 4:
1.159 thorpej 4144: #endif /* SUN4M || SUN4D */
1.13 deraadt 4145: ! botch! We should blow up.
4146:
4147: startmap_done:
1.1 deraadt 4148: /*
4149: * All set, fix pc and npc. Once we are where we should be,
4150: * we can give ourselves a stack and enable traps.
4151: */
1.9 deraadt 4152: set 1f, %g1
4153: jmp %g1
1.1 deraadt 4154: nop
4155: 1:
1.197 wiz 4156: sethi %hi(_C_LABEL(cputyp)), %o0 ! what type of CPU we are on
1.111 pk 4157: st %g4, [%o0 + %lo(_C_LABEL(cputyp))]
1.9 deraadt 4158:
1.111 pk 4159: sethi %hi(_C_LABEL(pgshift)), %o0 ! pgshift = log2(nbpg)
4160: st %g5, [%o0 + %lo(_C_LABEL(pgshift))]
1.13 deraadt 4161:
4162: mov 1, %o0 ! nbpg = 1 << pgshift
4163: sll %o0, %g5, %g5
1.111 pk 4164: sethi %hi(_C_LABEL(nbpg)), %o0 ! nbpg = bytes in a page
4165: st %g5, [%o0 + %lo(_C_LABEL(nbpg))]
1.13 deraadt 4166:
4167: sub %g5, 1, %g5
1.111 pk 4168: sethi %hi(_C_LABEL(pgofset)), %o0 ! page offset = bytes in a page - 1
4169: st %g5, [%o0 + %lo(_C_LABEL(pgofset))]
1.13 deraadt 4170:
1.9 deraadt 4171: rd %psr, %g3 ! paranoia: make sure ...
4172: andn %g3, PSR_ET, %g3 ! we have traps off
4173: wr %g3, 0, %psr ! so that we can fiddle safely
4174: nop; nop; nop
4175:
4176: wr %g0, 0, %wim ! make sure we can set psr
4177: nop; nop; nop
4178: wr %g0, PSR_S|PSR_PS|PSR_PIL, %psr ! set initial psr
4179: nop; nop; nop
4180:
4181: wr %g0, 2, %wim ! set initial %wim (w1 invalid)
4182: mov 1, %g1 ! set pcb_wim (log2(%wim) = 1)
1.111 pk 4183: sethi %hi(_C_LABEL(u0) + PCB_WIM), %g2
4184: st %g1, [%g2 + %lo(_C_LABEL(u0) + PCB_WIM)]
1.9 deraadt 4185:
1.1 deraadt 4186: set USRSTACK - CCFSZ, %fp ! as if called from user code
4187: set estack0 - CCFSZ - 80, %sp ! via syscall(boot_me_up) or somesuch
4188: rd %psr, %l0
4189: wr %l0, PSR_ET, %psr
1.9 deraadt 4190: nop; nop; nop
1.1 deraadt 4191:
1.52 pk 4192: /* Export actual trapbase */
1.111 pk 4193: sethi %hi(_C_LABEL(trapbase)), %o0
4194: st %g6, [%o0+%lo(_C_LABEL(trapbase))]
1.52 pk 4195:
1.117 christos 4196: #ifdef notdef
1.1 deraadt 4197: /*
4198: * Step 2: clear BSS. This may just be paranoia; the boot
4199: * loader might already do it for us; but what the hell.
4200: */
4201: set _edata, %o0 ! bzero(edata, end - edata)
4202: set _end, %o1
1.111 pk 4203: call _C_LABEL(bzero)
1.1 deraadt 4204: sub %o1, %o0, %o1
1.117 christos 4205: #endif
1.1 deraadt 4206:
4207: /*
4208: * Stash prom vectors now, after bzero, as it lives in bss
4209: * (which we just zeroed).
4210: * This depends on the fact that bzero does not use %g7.
4211: */
1.111 pk 4212: sethi %hi(_C_LABEL(romp)), %l0
4213: st %g7, [%l0 + %lo(_C_LABEL(romp))]
1.1 deraadt 4214:
4215: /*
4216: * Step 3: compute number of windows and set up tables.
4217: * We could do some of this later.
4218: */
4219: save %sp, -64, %sp
4220: rd %psr, %g1
4221: restore
4222: and %g1, 31, %g1 ! want just the CWP bits
4223: add %g1, 1, %o0 ! compute nwindows
1.111 pk 4224: sethi %hi(_C_LABEL(nwindows)), %o1 ! may as well tell everyone
1.1 deraadt 4225: call init_tables
1.111 pk 4226: st %o0, [%o1 + %lo(_C_LABEL(nwindows))]
1.1 deraadt 4227:
1.148 pk 4228: #if defined(SUN4) || defined(SUN4C)
1.29 deraadt 4229: /*
1.148 pk 4230: * Some sun4/sun4c models have fewer than 8 windows. For extra
1.29 deraadt 4231: * speed, we do not need to save/restore those windows
1.196 pk 4232: * The save/restore code has 6 "save"'s followed by 6
1.29 deraadt 4233: * "restore"'s -- we "nop" out the last "save" and first
4234: * "restore"
4235: */
4236: cmp %o0, 8
1.50 pk 4237: be 1f
1.29 deraadt 4238: noplab: nop
1.148 pk 4239: sethi %hi(noplab), %l0
4240: ld [%l0 + %lo(noplab)], %l1
1.29 deraadt 4241: set wb1, %l0
1.173 pk 4242: st %l1, [%l0 + 5*4]
4243: st %l1, [%l0 + 6*4]
1.29 deraadt 4244: 1:
4245: #endif
4246:
1.159 thorpej 4247: #if (defined(SUN4) || defined(SUN4C)) && (defined(SUN4M) || defined(SUN4D))
1.62 pk 4248:
4249: /*
4250: * Patch instructions at specified labels that start
4251: * per-architecture code-paths.
4252: */
4253: Lgandul: nop
4254:
4255: #define MUNGE(label) \
4256: sethi %hi(label), %o0; \
4257: st %l0, [%o0 + %lo(label)]
4258:
4259: sethi %hi(Lgandul), %o0
4260: ld [%o0 + %lo(Lgandul)], %l0 ! %l0 = NOP
4261:
4262: cmp %g4, CPU_SUN4M
1.159 thorpej 4263: beq,a 2f
4264: nop
4265:
4266: cmp %g4, CPU_SUN4D
1.62 pk 4267: bne,a 1f
4268: nop
4269:
1.159 thorpej 4270: 2: ! this should be automated!
1.62 pk 4271: MUNGE(NOP_ON_4M_1)
4272: MUNGE(NOP_ON_4M_2)
4273: MUNGE(NOP_ON_4M_3)
4274: MUNGE(NOP_ON_4M_4)
4275: MUNGE(NOP_ON_4M_5)
4276: MUNGE(NOP_ON_4M_6)
4277: MUNGE(NOP_ON_4M_7)
4278: MUNGE(NOP_ON_4M_8)
4279: MUNGE(NOP_ON_4M_9)
4280: MUNGE(NOP_ON_4M_10)
4281: MUNGE(NOP_ON_4M_11)
4282: MUNGE(NOP_ON_4M_12)
1.152 pk 4283: MUNGE(NOP_ON_4M_15)
1.62 pk 4284: b,a 2f
4285:
4286: 1:
1.68 mycroft 4287: MUNGE(NOP_ON_4_4C_1)
1.62 pk 4288:
4289: 2:
4290:
4291: #undef MUNGE
4292: #endif
4293:
1.1 deraadt 4294: /*
4295: * Step 4: change the trap base register, now that our trap handlers
4296: * will function (they need the tables we just set up).
1.195 pk 4297: * This depends on the fact that memset does not use %g6.
1.1 deraadt 4298: */
1.52 pk 4299: wr %g6, 0, %tbr
1.9 deraadt 4300: nop; nop; nop ! paranoia
1.37 pk 4301:
1.195 pk 4302: /* Clear `cpuinfo': memset(&cpuinfo, 0, sizeof cpuinfo) */
4303: sethi %hi(CPUINFO_VA), %o0
4304: set CPUINFO_STRUCTSIZE, %o2
1.192 jdolecek 4305: call _C_LABEL(memset)
1.194 martin 4306: clr %o1
1.98 pk 4307:
1.131 thorpej 4308: /*
4309: * Initialize `cpuinfo' fields which are needed early. Note
4310: * we make the cpuinfo self-reference at the local VA for now.
4311: * It may be changed to reference a global VA later.
4312: */
1.111 pk 4313: set _C_LABEL(u0), %o0 ! cpuinfo.curpcb = u0;
4314: sethi %hi(cpcb), %l0
4315: st %o0, [%l0 + %lo(cpcb)]
1.98 pk 4316:
1.132 pk 4317: sethi %hi(CPUINFO_VA), %o0 ! cpuinfo.ci_self = &cpuinfo;
1.131 thorpej 4318: sethi %hi(_CISELFP), %l0
4319: st %o0, [%l0 + %lo(_CISELFP)]
4320:
1.111 pk 4321: set _C_LABEL(eintstack), %o0 ! cpuinfo.eintstack= _eintstack;
1.101 pk 4322: sethi %hi(_EINTSTACKP), %l0
4323: st %o0, [%l0 + %lo(_EINTSTACKP)]
1.1 deraadt 4324:
4325: /*
1.11 deraadt 4326: * Ready to run C code; finish bootstrap.
1.1 deraadt 4327: */
1.111 pk 4328: call _C_LABEL(bootstrap)
1.1 deraadt 4329: nop
1.11 deraadt 4330:
4331: /*
4332: * Call main. This returns to us after loading /sbin/init into
4333: * user space. (If the exec fails, main() does not return.)
4334: */
1.111 pk 4335: call _C_LABEL(main)
1.11 deraadt 4336: clr %o0 ! our frame arg is ignored
1.89 pk 4337: /*NOTREACHED*/
1.164 pk 4338:
1.198 pk 4339: /*
4340: * Openfirmware entry point: openfirmware(void *args)
4341: */
4342: ENTRY(openfirmware)
4343: sethi %hi(_C_LABEL(romp)), %o1
4344: ld [%o1 + %lo(_C_LABEL(romp))], %o2
4345: jmp %o2
4346: nop
1.165 pk 4347:
4348: #if defined(SUN4M) || defined(SUN4D)
4349: /*
4350: * V8 multiply and divide routines, to be copied over the code
4351: * for the V6/V7 routines. Seems a shame to spend the call, but....
4352: * Note: while .umul and .smul return a 64-bit result in %o1%o0,
4353: * gcc only really cares about the low 32 bits in %o0. This is
4354: * really just gcc output, cleaned up a bit.
4355: */
1.164 pk 4356: .globl _C_LABEL(sparc_v8_muldiv)
4357: _C_LABEL(sparc_v8_muldiv):
4358: save %sp, -CCFSZ, %sp
4359:
4360: #define OVERWRITE(rtn, v8_rtn, len) \
4361: set v8_rtn, %o0; \
4362: set rtn, %o1; \
4363: call _C_LABEL(bcopy); \
4364: mov len, %o2; \
4365: /* now flush the insn cache */ \
4366: set rtn, %o0; \
4367: mov len, %o1; \
4368: 0: \
4369: flush %o0; \
4370: subcc %o1, 8, %o1; \
4371: bgu 0b; \
4372: add %o0, 8, %o0; \
4373:
1.188 uwe 4374: OVERWRITE(.mul, v8_smul, .Lv8_smul_len)
4375: OVERWRITE(.umul, v8_umul, .Lv8_umul_len)
4376: OVERWRITE(.div, v8_sdiv, .Lv8_sdiv_len)
4377: OVERWRITE(.udiv, v8_udiv, .Lv8_udiv_len)
4378: OVERWRITE(.rem, v8_srem, .Lv8_srem_len)
4379: OVERWRITE(.urem, v8_urem, .Lv8_urem_len)
1.164 pk 4380: #undef OVERWRITE
4381: ret
4382: restore
4383:
4384: v8_smul:
4385: retl
4386: smul %o0, %o1, %o0
1.188 uwe 4387: .Lv8_smul_len = .-v8_smul
1.164 pk 4388: v8_umul:
4389: retl
4390: umul %o0, %o1, %o0
4391: !v8_umul_len = 2 * 4
1.188 uwe 4392: .Lv8_umul_len = .-v8_umul
1.164 pk 4393: v8_sdiv:
4394: sra %o0, 31, %g2
4395: wr %g2, 0, %y
4396: nop; nop; nop
4397: retl
4398: sdiv %o0, %o1, %o0
1.188 uwe 4399: .Lv8_sdiv_len = .-v8_sdiv
1.164 pk 4400: v8_udiv:
4401: wr %g0, 0, %y
4402: nop; nop; nop
4403: retl
4404: udiv %o0, %o1, %o0
1.188 uwe 4405: .Lv8_udiv_len = .-v8_udiv
1.164 pk 4406: v8_srem:
4407: sra %o0, 31, %g3
4408: wr %g3, 0, %y
4409: nop; nop; nop
4410: sdiv %o0, %o1, %g2
4411: smul %g2, %o1, %g2
4412: retl
4413: sub %o0, %g2, %o0
1.188 uwe 4414: .Lv8_srem_len = .-v8_srem
1.164 pk 4415: v8_urem:
4416: wr %g0, 0, %y
4417: nop; nop; nop
4418: udiv %o0, %o1, %g2
4419: smul %g2, %o1, %g2
4420: retl
4421: sub %o0, %g2, %o0
1.188 uwe 4422: .Lv8_urem_len = .-v8_urem
1.164 pk 4423:
1.165 pk 4424: #endif /* SUN4M || SUN4D */
1.89 pk 4425:
1.145 mrg 4426: #if defined(MULTIPROCESSOR)
1.89 pk 4427: /*
4428: * Entry point for non-boot CPUs in MP systems.
4429: */
1.111 pk 4430: .globl _C_LABEL(cpu_hatch)
4431: _C_LABEL(cpu_hatch):
1.89 pk 4432: rd %psr, %g3 ! paranoia: make sure ...
4433: andn %g3, PSR_ET, %g3 ! we have traps off
4434: wr %g3, 0, %psr ! so that we can fiddle safely
4435: nop; nop; nop
4436:
4437: wr %g0, 0, %wim ! make sure we can set psr
4438: nop; nop; nop
4439: wr %g0, PSR_S|PSR_PS|PSR_PIL, %psr ! set initial psr
4440: nop; nop; nop
4441:
4442: wr %g0, 2, %wim ! set initial %wim (w1 invalid)
4443:
4444: /* Initialize Trap Base register */
1.111 pk 4445: sethi %hi(_C_LABEL(trapbase)), %o0
4446: ld [%o0+%lo(_C_LABEL(trapbase))], %g6
1.89 pk 4447: wr %g6, 0, %tbr
4448: nop; nop; nop ! paranoia
4449:
4450: /* Set up a stack */
4451: set USRSTACK - CCFSZ, %fp ! as if called from user code
1.182 mrg 4452: sethi %hi(IDLE_UP), %o0
4453: ld [%o0 + %lo(IDLE_UP)], %o0
1.102 pk 4454: set USPACE - CCFSZ - 80, %sp
4455: add %sp, %o0, %sp
1.89 pk 4456:
4457: /* Enable traps */
4458: rd %psr, %l0
4459: wr %l0, PSR_ET, %psr
1.182 mrg 4460: nop; nop
1.89 pk 4461:
4462: /* Call C code */
1.111 pk 4463: call _C_LABEL(cpu_setup)
1.182 mrg 4464: nop ! 3rd from above
1.89 pk 4465:
1.170 pk 4466: /* Enable interrupts */
4467: rd %psr, %l0
4468: andn %l0, PSR_PIL, %l0 ! psr &= ~PSR_PIL;
4469: wr %l0, 0, %psr ! (void) spl0();
4470: nop; nop; nop
4471:
1.145 mrg 4472: /* Wait for go_smp_cpus to go */
4473: set _C_LABEL(go_smp_cpus), %l1
1.142 mrg 4474: ld [%l1], %l0
4475: 1:
1.145 mrg 4476: cmp %l0, %g0
1.142 mrg 4477: be 1b
4478: ld [%l1], %l0
4479:
1.173 pk 4480: mov PSR_S|PSR_ET, %l1 ! oldpsr = PSR_S | PSR_ET;
4481: sethi %hi(_C_LABEL(sched_whichqs)), %l2
4482: clr %l4
4483: sethi %hi(cpcb), %l6
4484: b idle_enter
1.185 thorpej 4485: sethi %hi(curlwp), %l7
1.145 mrg 4486:
4487: #endif /* MULTIPROCESSOR */
1.1 deraadt 4488:
1.141 mrg 4489: #include "sigcode_state.s"
1.122 christos 4490:
1.111 pk 4491: .globl _C_LABEL(sigcode)
4492: .globl _C_LABEL(esigcode)
4493: _C_LABEL(sigcode):
1.1 deraadt 4494:
1.122 christos 4495: SAVE_STATE
4496:
1.1 deraadt 4497: ldd [%fp + 64], %o0 ! sig, code
4498: ld [%fp + 76], %o3 ! arg3
4499: call %g1 ! (*sa->sa_handler)(sig,code,scp,arg3)
4500: add %fp, 64 + 16, %o2 ! scp
4501:
1.122 christos 4502: RESTORE_STATE
1.1 deraadt 4503:
1.92 pk 4504: ! get registers back & set syscall #
1.189 pk 4505: restore %g0, SYS_compat_16___sigreturn14, %g1
1.1 deraadt 4506: add %sp, 64 + 16, %o0 ! compute scp
4507: t ST_SYSCALL ! sigreturn(scp)
4508: ! sigreturn does not return unless it fails
4509: mov SYS_exit, %g1 ! exit(errno)
4510: t ST_SYSCALL
1.185 thorpej 4511: /* NOTREACHED */
1.111 pk 4512: _C_LABEL(esigcode):
1.1 deraadt 4513:
4514: /*
4515: * Primitives
1.52 pk 4516: */
1.1 deraadt 4517:
1.63 pk 4518: /*
4519: * General-purpose NULL routine.
4520: */
4521: ENTRY(sparc_noop)
4522: retl
4523: nop
1.1 deraadt 4524:
4525: /*
1.24 deraadt 4526: * getfp() - get stack frame pointer
4527: */
4528: ENTRY(getfp)
4529: retl
4530: mov %fp, %o0
4531:
4532: /*
1.1 deraadt 4533: * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
4534: *
4535: * Copy a null terminated string from the user address space into
4536: * the kernel address space.
4537: */
4538: ENTRY(copyinstr)
4539: ! %o0 = fromaddr, %o1 = toaddr, %o2 = maxlen, %o3 = &lencopied
1.126 chs 4540: mov %o1, %o5 ! save = toaddr;
4541: tst %o2 ! maxlen == 0?
4542: beq,a Lcstoolong ! yes, return ENAMETOOLONG
4543: sethi %hi(cpcb), %o4
4544:
1.1 deraadt 4545: set KERNBASE, %o4
4546: cmp %o0, %o4 ! fromaddr < KERNBASE?
1.126 chs 4547: blu Lcsdocopy ! yes, go do it
4548: sethi %hi(cpcb), %o4 ! (first instr of copy)
1.1 deraadt 4549:
4550: b Lcsdone ! no, return EFAULT
4551: mov EFAULT, %o0
4552:
4553: /*
4554: * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
4555: *
4556: * Copy a null terminated string from the kernel
4557: * address space to the user address space.
4558: */
4559: ENTRY(copyoutstr)
4560: ! %o0 = fromaddr, %o1 = toaddr, %o2 = maxlen, %o3 = &lencopied
1.126 chs 4561: mov %o1, %o5 ! save = toaddr;
4562: tst %o2 ! maxlen == 0?
4563: beq,a Lcstoolong ! yes, return ENAMETOOLONG
4564: sethi %hi(cpcb), %o4
4565:
1.1 deraadt 4566: set KERNBASE, %o4
4567: cmp %o1, %o4 ! toaddr < KERNBASE?
1.126 chs 4568: blu Lcsdocopy ! yes, go do it
1.111 pk 4569: sethi %hi(cpcb), %o4 ! (first instr of copy)
1.1 deraadt 4570:
4571: b Lcsdone ! no, return EFAULT
4572: mov EFAULT, %o0
4573:
4574: Lcsdocopy:
1.111 pk 4575: ! sethi %hi(cpcb), %o4 ! (done earlier)
4576: ld [%o4 + %lo(cpcb)], %o4 ! catch faults
1.138 chs 4577: set Lcsdone, %g1
1.126 chs 4578: st %g1, [%o4 + PCB_ONFAULT]
1.1 deraadt 4579:
4580: ! XXX should do this in bigger chunks when possible
4581: 0: ! loop:
4582: ldsb [%o0], %g1 ! c = *fromaddr;
4583: tst %g1
4584: stb %g1, [%o1] ! *toaddr++ = c;
4585: be 1f ! if (c == NULL)
4586: inc %o1 ! goto ok;
4587: deccc %o2 ! if (--len > 0) {
1.126 chs 4588: bgu 0b ! fromaddr++;
1.1 deraadt 4589: inc %o0 ! goto loop;
4590: ! }
1.126 chs 4591: Lcstoolong: !
1.1 deraadt 4592: b Lcsdone ! error = ENAMETOOLONG;
4593: mov ENAMETOOLONG, %o0 ! goto done;
4594: 1: ! ok:
4595: clr %o0 ! error = 0;
4596: Lcsdone: ! done:
4597: sub %o1, %o5, %o1 ! len = to - save;
4598: tst %o3 ! if (lencopied)
4599: bnz,a 3f
4600: st %o1, [%o3] ! *lencopied = len;
4601: 3:
4602: retl ! cpcb->pcb_onfault = 0;
4603: st %g0, [%o4 + PCB_ONFAULT]! return (error);
4604:
4605: /*
4606: * copystr(fromaddr, toaddr, maxlength, &lencopied)
4607: *
4608: * Copy a null terminated string from one point to another in
4609: * the kernel address space. (This is a leaf procedure, but
4610: * it does not seem that way to the C compiler.)
4611: */
4612: ENTRY(copystr)
4613: mov %o1, %o5 ! to0 = to;
1.126 chs 4614: tst %o2 ! if (maxlength == 0)
4615: beq,a 2f !
4616: mov ENAMETOOLONG, %o0 ! ret = ENAMETOOLONG; goto done;
4617:
1.1 deraadt 4618: 0: ! loop:
4619: ldsb [%o0], %o4 ! c = *from;
4620: tst %o4
4621: stb %o4, [%o1] ! *to++ = c;
4622: be 1f ! if (c == 0)
4623: inc %o1 ! goto ok;
4624: deccc %o2 ! if (--len > 0) {
1.126 chs 4625: bgu,a 0b ! from++;
1.1 deraadt 4626: inc %o0 ! goto loop;
4627: b 2f ! }
4628: mov ENAMETOOLONG, %o0 ! ret = ENAMETOOLONG; goto done;
4629: 1: ! ok:
4630: clr %o0 ! ret = 0;
4631: 2:
4632: sub %o1, %o5, %o1 ! len = to - to0;
4633: tst %o3 ! if (lencopied)
4634: bnz,a 3f
4635: st %o1, [%o3] ! *lencopied = len;
4636: 3:
4637: retl
4638: nop
4639:
1.52 pk 4640: /*
1.1 deraadt 4641: * Copyin(src, dst, len)
4642: *
4643: * Copy specified amount of data from user space into the kernel.
4644: */
4645: ENTRY(copyin)
4646: set KERNBASE, %o3
4647: cmp %o0, %o3 ! src < KERNBASE?
4648: blu,a Ldocopy ! yes, can try it
1.111 pk 4649: sethi %hi(cpcb), %o3
1.1 deraadt 4650:
4651: /* source address points into kernel space: return EFAULT */
4652: retl
4653: mov EFAULT, %o0
4654:
4655: /*
4656: * Copyout(src, dst, len)
4657: *
4658: * Copy specified amount of data from kernel to user space.
4659: * Just like copyin, except that the `dst' addresses are user space
4660: * rather than the `src' addresses.
4661: */
4662: ENTRY(copyout)
4663: set KERNBASE, %o3
4664: cmp %o1, %o3 ! dst < KERBASE?
4665: blu,a Ldocopy
1.111 pk 4666: sethi %hi(cpcb), %o3
1.1 deraadt 4667:
4668: /* destination address points into kernel space: return EFAULT */
4669: retl
4670: mov EFAULT, %o0
4671:
4672: /*
4673: * ******NOTE****** this depends on bcopy() not using %g7
4674: */
4675: Ldocopy:
1.111 pk 4676: ! sethi %hi(cpcb), %o3
4677: ld [%o3 + %lo(cpcb)], %o3
1.1 deraadt 4678: set Lcopyfault, %o4
4679: mov %o7, %g7 ! save return address
1.111 pk 4680: call _C_LABEL(bcopy) ! bcopy(src, dst, len)
1.1 deraadt 4681: st %o4, [%o3 + PCB_ONFAULT]
4682:
1.111 pk 4683: sethi %hi(cpcb), %o3
4684: ld [%o3 + %lo(cpcb)], %o3
1.1 deraadt 4685: st %g0, [%o3 + PCB_ONFAULT]
4686: jmp %g7 + 8
4687: clr %o0 ! return 0
4688:
4689: ! Copyin or copyout fault. Clear cpcb->pcb_onfault and return EFAULT.
4690: ! Note that although we were in bcopy, there is no state to clean up;
4691: ! the only special thing is that we have to return to [g7 + 8] rather than
4692: ! [o7 + 8].
4693: Lcopyfault:
1.111 pk 4694: sethi %hi(cpcb), %o3
4695: ld [%o3 + %lo(cpcb)], %o3
1.1 deraadt 4696: jmp %g7 + 8
1.138 chs 4697: st %g0, [%o3 + PCB_ONFAULT]
1.1 deraadt 4698:
4699:
4700: /*
4701: * Write all user windows presently in the CPU back to the user's stack.
4702: * We just do `save' instructions until pcb_uw == 0.
4703: *
4704: * p = cpcb;
4705: * nsaves = 0;
4706: * while (p->pcb_uw > 0)
4707: * save(), nsaves++;
4708: * while (--nsaves >= 0)
4709: * restore();
4710: */
4711: ENTRY(write_user_windows)
1.111 pk 4712: sethi %hi(cpcb), %g6
4713: ld [%g6 + %lo(cpcb)], %g6
1.1 deraadt 4714: b 2f
4715: clr %g5
4716: 1:
4717: save %sp, -64, %sp
4718: 2:
4719: ld [%g6 + PCB_UW], %g7
4720: tst %g7
4721: bg,a 1b
4722: inc %g5
4723: 3:
4724: deccc %g5
4725: bge,a 3b
4726: restore
4727: retl
4728: nop
4729:
4730:
4731: /*
4732: * Switch statistics (for later tweaking):
4733: * nswitchdiff = p1 => p2 (i.e., chose different process)
1.10 deraadt 4734: * nswitchexit = number of calls to switchexit()
1.111 pk 4735: * cnt.v_swtch = total calls to swtch+swtchexit
1.1 deraadt 4736: */
1.111 pk 4737: .comm _C_LABEL(nswitchdiff), 4
4738: .comm _C_LABEL(nswitchexit), 4
1.1 deraadt 4739:
1.173 pk 4740: /*
4741: * switchexit is called only from cpu_exit() before the current process
4742: * has freed its vmspace and kernel stack; we must schedule them to be
1.185 thorpej 4743: * freed. (curlwp is already NULL.)
1.173 pk 4744: *
4745: * We lay the process to rest by changing to the `idle' kernel stack,
4746: * and note that the `last loaded process' is nonexistent.
4747: */
4748: ENTRY(switchexit)
4749: mov %o0, %g2 ! save proc for exit2() call
1.185 thorpej 4750: mov %o1, %g1 ! exit2() or lwp_exit2()
1.173 pk 4751:
4752: /*
4753: * Change pcb to idle u. area, i.e., set %sp to top of stack
4754: * and %psr to PSR_S|PSR_ET, and set cpcb to point to idle_u.
4755: * Once we have left the old stack, we can call exit2() to
4756: * destroy it. Call it any sooner and the register windows
4757: * go bye-bye.
4758: */
4759: #if defined(MULTIPROCESSOR)
4760: sethi %hi(IDLE_UP), %g5
4761: ld [%g5 + %lo(IDLE_UP)], %g5
4762: #else
4763: set _C_LABEL(idle_u), %g5
4764: #endif
4765: sethi %hi(cpcb), %g6
4766: mov 1, %g7
4767: wr %g0, PSR_S, %psr ! change to window 0, traps off
4768: wr %g0, 2, %wim ! and make window 1 the trap window
4769: st %g5, [%g6 + %lo(cpcb)] ! cpcb = &idle_u
4770: st %g7, [%g5 + PCB_WIM] ! idle_u.pcb_wim = log2(2) = 1
4771: #if defined(MULTIPROCESSOR)
4772: set USPACE-CCFSZ, %o1 !
4773: add %g5, %o1, %sp ! set new %sp
4774: #else
4775: set _C_LABEL(idle_u) + USPACE-CCFSZ, %sp ! set new %sp
4776: #endif
4777:
4778: #ifdef DEBUG
4779: mov %g5, %l6 ! %l6 = _idle_u
4780: SET_SP_REDZONE(%l6, %l5)
4781: #endif
4782: wr %g0, PSR_S|PSR_ET, %psr ! and then enable traps
1.176 pk 4783: nop
1.185 thorpej 4784: call %g1 ! {lwp}exit2(p)
1.173 pk 4785: mov %g2, %o0
4786:
4787: /*
4788: * Now fall through to `the last switch'. %l6 was set to
4789: * %hi(cpcb), but may have been clobbered in exit2(),
4790: * so all the registers described below will be set here.
4791: *
4792: * REGISTER USAGE AT THIS POINT:
4793: * %l1 = oldpsr (excluding ipl bits)
4794: * %l2 = %hi(whichqs)
4795: * %l4 = lastproc
4796: * %l6 = %hi(cpcb)
1.185 thorpej 4797: * %l7 = %hi(curlwp)
1.173 pk 4798: * %o0 = tmp 1
4799: * %o1 = tmp 2
4800: */
4801:
4802: INCR(_C_LABEL(nswitchexit)) ! nswitchexit++;
4803: INCR(_C_LABEL(uvmexp)+V_SWTCH) ! cnt.v_switch++;
4804:
4805: mov PSR_S|PSR_ET, %l1 ! oldpsr = PSR_S | PSR_ET;
4806: sethi %hi(_C_LABEL(sched_whichqs)), %l2
1.180 mrg 4807: #if !defined(MULTIPROCESSOR)
1.173 pk 4808: clr %l4 ! lastproc = NULL;
1.180 mrg 4809: #endif
1.173 pk 4810: sethi %hi(cpcb), %l6
1.185 thorpej 4811: sethi %hi(curlwp), %l7
1.173 pk 4812: b idle_enter
1.185 thorpej 4813: st %g0, [%l7 + %lo(curlwp)] ! curlwp = NULL;
1.173 pk 4814:
4815: /*
4816: * When no processes are on the runq, switch
4817: * idles here waiting for something to come ready.
4818: * The registers are set up as noted above.
1.184 pk 4819: *
4820: * There are three entry points into the idle loop.
4821: * idle_switch: when a switch to the CPU's idle stack is required
4822: * idle: when already on the idle stack, scheduler lock held
4823: * idle_enter: when already on the idle stack, scheduler lock not held
1.173 pk 4824: */
1.184 pk 4825: idle_switch:
4826: #if defined(MULTIPROCESSOR)
4827: sethi %hi(IDLE_UP), %g5
4828: ld [%g5 + %lo(IDLE_UP)], %g5
4829: #else
4830: set _C_LABEL(idle_u), %g5
4831: #endif
4832: mov %l6, %g6 ! save %hi(cpcb) before changing windows
4833: wr %g0, PSR_S|PSR_PIL, %psr! change to window 0, traps off
4834: wr %g0, 2, %wim ! and make window 1 the trap window
4835: mov 1, %o0
4836: st %g5, [%g6 + %lo(cpcb)] ! cpcb = &idle_u
4837: st %o0, [%g5 + PCB_WIM] ! idle_u.pcb_wim = log2(2) = 1
4838: #if defined(MULTIPROCESSOR)
4839: set USPACE-CCFSZ, %o1 !
4840: add %g5, %o1, %sp ! set new %sp
4841: #else
4842: set _C_LABEL(idle_u) + USPACE-CCFSZ, %sp ! set new %sp
4843: #endif
4844: mov %g0, %i6 ! paranoid
4845: mov %g0, %i7 !
4846:
4847: #ifdef DEBUG
4848: mov %g5, %o0 ! %o0 = _idle_u
4849: SET_SP_REDZONE(%o0, %o1)
4850: #endif
4851: ! enable traps and continue at splsched()
4852: wr %g0, PSR_S|PSR_ET|(IPL_SCHED<<8), %psr
4853:
4854: /* now set up the locals in our new window */
4855: mov PSR_S|PSR_ET, %l1 ! oldpsr = PSR_S | PSR_ET;
4856: sethi %hi(_C_LABEL(sched_whichqs)), %l2
4857: clr %l4 ! lastproc = NULL;
4858: sethi %hi(cpcb), %l6
1.185 thorpej 4859: sethi %hi(curlwp), %l7
1.184 pk 4860: /* FALLTHROUGH*/
4861:
1.173 pk 4862: idle:
4863: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
4864: ! unlock scheduler lock
4865: call _C_LABEL(sched_unlock_idle)
4866: nop
4867: #endif
4868:
4869: idle_enter:
1.180 mrg 4870: #if defined(MULTIPROCESSOR)
4871: clr %l4 ! lastproc = NULL;
4872: #endif
1.173 pk 4873: wr %l1, 0, %psr ! (void) spl0();
4874: 1: ! spin reading whichqs until nonzero
4875: ld [%l2 + %lo(_C_LABEL(sched_whichqs))], %o3
4876: tst %o3
4877: bnz,a idle_leave
4878: wr %l1, (IPL_SCHED << 8), %psr ! (void) splsched();
4879:
4880: ! Check uvm.page_idle_zero
4881: sethi %hi(_C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO), %o3
4882: ld [%o3 + %lo(_C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO)], %o3
4883: tst %o3
4884: bz 1b
4885: nop
4886:
4887: call _C_LABEL(uvm_pageidlezero)
4888: nop
4889: b,a 1b
4890:
1.184 pk 4891: idle_leave:
4892: ! just wrote to %psr; observe psr delay before doing a `save'
4893: ! or loading sched_whichqs.
4894: nop; nop
1.173 pk 4895: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
4896: /* Before we leave the idle loop, detain the scheduler lock */
4897: call _C_LABEL(sched_lock_idle)
4898: nop
4899: #endif
1.184 pk 4900: b Lsw_scan
4901: ld [%l2 + %lo(_C_LABEL(sched_whichqs))], %o3
1.173 pk 4902:
4903: Lsw_panic_rq:
4904: sethi %hi(1f), %o0
4905: call _C_LABEL(panic)
4906: or %lo(1f), %o0, %o0
4907: Lsw_panic_wchan:
4908: sethi %hi(2f), %o0
4909: call _C_LABEL(panic)
4910: or %lo(2f), %o0, %o0
4911: Lsw_panic_srun:
4912: sethi %hi(3f), %o0
4913: call _C_LABEL(panic)
4914: or %lo(3f), %o0, %o0
4915: 1: .asciz "switch rq"
4916: 2: .asciz "switch wchan"
4917: 3: .asciz "switch SRUN"
4918: _ALIGN
4919:
4920: /*
4921: * cpu_switch() picks a process to run and runs it, saving the current
4922: * one away. On the assumption that (since most workstations are
4923: * single user machines) the chances are quite good that the new
4924: * process will turn out to be the current process, we defer saving
4925: * it here until we have found someone to load. If that someone
4926: * is the current process we avoid both store and load.
4927: *
4928: * cpu_switch() is always entered at splsched.
4929: *
4930: * IT MIGHT BE WORTH SAVING BEFORE ENTERING idle TO AVOID HAVING TO
4931: * SAVE LATER WHEN SOMEONE ELSE IS READY ... MUST MEASURE!
4932: */
4933: .globl _C_LABEL(__ffstab)
1.185 thorpej 4934: ENTRY(cpu_switch)
1.184 pk 4935: ENTRY(cpu_switchto)
1.173 pk 4936: /*
4937: * REGISTER USAGE AT THIS POINT:
4938: * %l1 = oldpsr (excluding ipl bits)
4939: * %l2 = %hi(whichqs)
4940: * %l3(%g3) = p
4941: * %l4(%g4) = lastproc
4942: * %l5 = tmp 0
4943: * %l6 = %hi(cpcb)
1.185 thorpej 4944: * %l7 = %hi(curlwp)
1.173 pk 4945: * %o0 = tmp 1
4946: * %o1 = tmp 2
4947: * %o2 = tmp 3
4948: * %o3 = tmp 4, then at Lsw_scan, whichqs
4949: * %o4 = tmp 5, then at Lsw_scan, which
4950: * %o5 = tmp 6, then at Lsw_scan, q
4951: */
4952: save %sp, -CCFSZ, %sp
4953: mov %i0, %l4 ! save p
4954: sethi %hi(cpcb), %l6
4955: ld [%l6 + %lo(cpcb)], %o0
4956: std %i6, [%o0 + PCB_SP] ! cpcb->pcb_<sp,pc> = <fp,pc>;
4957: rd %psr, %l1 ! oldpsr = %psr;
1.185 thorpej 4958: sethi %hi(curlwp), %l7
1.173 pk 4959: st %l1, [%o0 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
4960: andn %l1, PSR_PIL, %l1 ! oldpsr &= ~PSR_PIL;
1.185 thorpej 4961: st %g0, [%l7 + %lo(curlwp)] ! curlwp = NULL;
1.173 pk 4962: /*
4963: * Save the old process: write back all windows (excluding
4964: * the current one). XXX crude; knows nwindows <= 8
4965: */
4966: #define SAVE save %sp, -64, %sp
4967: wb1: SAVE; SAVE; SAVE; SAVE; SAVE; SAVE; /* 6 of each: */
4968: restore; restore; restore; restore; restore; restore
4969:
1.184 pk 4970: #if defined(MULTIPROCESSOR)
4971: /* flush this process's context from TLB (on SUN4M/4D) */
4972: call _C_LABEL(pmap_deactivate) ! pmap_deactive(lastproc);
4973: mov %i0, %o0
4974: #endif
4975:
1.173 pk 4976: /* If we've been given a process to switch to, skip the rq stuff */
4977: tst %i1
4978: bnz,a Lsw_load
4979: mov %i1, %l3 ! but move into the expected register first
4980:
1.184 pk 4981: /* If nothing on the rq, wait after switching to idle stack */
1.173 pk 4982: sethi %hi(_C_LABEL(sched_whichqs)), %l2
1.184 pk 4983: ld [%l2 + %lo(_C_LABEL(sched_whichqs))], %o3
4984: tst %o3
4985: bz idle_switch
4986: EMPTY
1.173 pk 4987:
4988: Lsw_scan:
4989: /*
1.184 pk 4990: * Enter here with %o3 set to sched_whichqs.
4991: *
1.173 pk 4992: * Optimized inline expansion of `which = ffs(whichqs) - 1';
4993: * branches to idle if ffs(whichqs) was 0.
4994: */
4995: set _C_LABEL(__ffstab), %o2
4996: andcc %o3, 0xff, %o1 ! byte 0 zero?
4997: bz,a 1f ! yes, try byte 1
4998: srl %o3, 8, %o0
4999: b 2f ! ffs = ffstab[byte0]; which = ffs - 1;
5000: ldsb [%o2 + %o1], %o0
5001: 1: andcc %o0, 0xff, %o1 ! byte 1 zero?
5002: bz,a 1f ! yes, try byte 2
5003: srl %o0, 8, %o0
5004: ldsb [%o2 + %o1], %o0 ! which = ffstab[byte1] + 7;
5005: b 3f
5006: add %o0, 7, %o4
5007: 1: andcc %o0, 0xff, %o1 ! byte 2 zero?
5008: bz,a 1f ! yes, try byte 3
5009: srl %o0, 8, %o0
5010: ldsb [%o2 + %o1], %o0 ! which = ffstab[byte2] + 15;
5011: b 3f
5012: add %o0, 15, %o4
5013: 1: ldsb [%o2 + %o0], %o0 ! ffs = ffstab[byte3] + 24
5014: addcc %o0, 24, %o0 ! (note that ffstab[0] == -24)
5015: bz idle ! if answer was 0, go idle
5016: EMPTY
5017: 2: sub %o0, 1, %o4 ! which = ffs(whichqs) - 1
5018: 3: /* end optimized inline expansion */
5019:
5020: /*
5021: * We found a nonempty run queue. Take its first process.
5022: */
5023: set _C_LABEL(sched_qs), %o5 ! q = &qs[which];
5024: sll %o4, 3, %o0
5025: add %o0, %o5, %o5
5026: ld [%o5], %l3 ! p = q->ph_link;
5027: cmp %l3, %o5 ! if (p == q)
5028: be Lsw_panic_rq ! panic("switch rq");
5029: EMPTY
5030: ld [%l3], %o0 ! tmp0 = p->p_forw;
5031: st %o0, [%o5] ! q->ph_link = tmp0;
5032: st %o5, [%o0 + 4] ! tmp0->p_back = q;
5033: cmp %o0, %o5 ! if (tmp0 == q)
5034: bne Lsw_load
5035: EMPTY
5036: mov 1, %o1 ! whichqs &= ~(1 << which);
5037: sll %o1, %o4, %o1
5038: andn %o3, %o1, %o3
5039: st %o3, [%l2 + %lo(_C_LABEL(sched_whichqs))]
5040:
5041: Lsw_load:
5042: /*
5043: * PHASE TWO: NEW REGISTER USAGE:
5044: * %l1 = oldpsr (excluding ipl bits)
5045: * %l2 =
5046: * %l3 = p
5047: * %l4 = lastproc
5048: * %l5 =
5049: * %l6 = %hi(cpcb)
1.185 thorpej 5050: * %l7 = %hi(curlwp)
1.173 pk 5051: * %o0 = tmp 1
5052: * %o1 = tmp 2
5053: * %o2 = tmp 3
5054: * %o3 = vm
5055: */
5056:
5057: /* firewalls */
1.185 thorpej 5058: ld [%l3 + L_WCHAN], %o0 ! if (p->p_wchan)
1.173 pk 5059: tst %o0
5060: bne Lsw_panic_wchan ! panic("switch wchan");
5061: EMPTY
1.185 thorpej 5062: ld [%l3 + L_STAT], %o0 ! if (p->p_stat != LSRUN)
5063: cmp %o0, LSRUN
1.173 pk 5064: bne Lsw_panic_srun ! panic("switch SRUN");
5065: EMPTY
5066:
5067: /*
5068: * Committed to running process p.
5069: * It may be the same as the one we were running before.
5070: */
1.185 thorpej 5071: mov LSONPROC, %o0 ! p->p_stat = LSONPROC;
5072: st %o0, [%l3 + L_STAT]
1.173 pk 5073:
5074: /* p->p_cpu initialized in fork1() for single-processor */
5075: #if defined(MULTIPROCESSOR)
5076: sethi %hi(_CISELFP), %o0 ! p->p_cpu = cpuinfo.ci_self;
5077: ld [%o0 + %lo(_CISELFP)], %o0
1.185 thorpej 5078: st %o0, [%l3 + L_CPU]
1.173 pk 5079: #endif
5080:
1.185 thorpej 5081: ld [%l3 + L_ADDR], %g5 ! newpcb = p->p_addr;
1.173 pk 5082: st %g0, [%l3 + 4] ! p->p_back = NULL;
1.185 thorpej 5083: st %l3, [%l7 + %lo(curlwp)] ! curlwp = p;
1.173 pk 5084:
5085: /*
5086: * Load the new process. To load, we must change stacks and
5087: * and alter cpcb. We must also load the CWP and WIM from the
5088: * new process' PCB, since, when we finally return from
5089: * the trap, the CWP of the trap window must match the
5090: * CWP stored in the trap frame.
5091: *
5092: * Once the new CWP is set below our local registers become
5093: * invalid, so:
5094: *
5095: * PHASE THREE: NEW REGISTER USAGE:
5096: * %g2 = newpsr
5097: * %g3 = p
5098: * %g4 = lastproc
5099: * %g5 = newpcb
1.176 pk 5100: * %l0 = return value
1.173 pk 5101: * %l1 = oldpsr (excluding ipl bits)
5102: * %l6 = %hi(cpcb)
5103: * %o0 = tmp 1
5104: * %o1 = tmp 2
5105: * %o2 = tmp 3
5106: * %o3 = vm
5107: */
5108:
5109: mov %l3, %g3 ! save p and lastproc to globals
5110: mov %l4, %g4 !
5111: ld [%g5 + PCB_PSR], %g2 ! newpsr = newpcb->pcb_psr;
5112:
5113: /* traps off while we switch to the new stack */
5114: wr %l1, (IPL_SCHED << 8) | PSR_ET, %psr
5115:
5116: /* set new cpcb */
5117: st %g5, [%l6 + %lo(cpcb)] ! cpcb = newpcb;
5118:
5119: /* compute new wim */
5120: ld [%g5 + PCB_WIM], %o0
5121: mov 1, %o1
5122: sll %o1, %o0, %o0
5123: wr %o0, 0, %wim ! %wim = 1 << newpcb->pcb_wim;
5124: /* now must not change %psr for 3 more instrs */
5125: /* Clear FP & CP enable bits, as well as the PIL field */
5126: /*1,2*/ set PSR_EF|PSR_EC|PSR_PIL, %o0
5127: /*3*/ andn %g2, %o0, %g2 ! newpsr &= ~(PSR_EF|PSR_EC|PSR_PIL);
5128: /* set new psr, but with traps disabled */
5129: wr %g2, (IPL_SCHED << 8)|PSR_ET, %psr ! %psr = newpsr ^ PSR_ET;
5130:
5131: /* load new stack and return address */
5132: ldd [%g5 + PCB_SP], %i6 ! <fp,pc> = newpcb->pcb_<sp,pc>
5133: add %fp, -CCFSZ, %sp ! set stack frame for this window
5134: #ifdef DEBUG
5135: mov %g5, %o0
5136: SET_SP_REDZONE(%o0, %o1)
5137: CHECK_SP_REDZONE(%o0, %o1)
5138: #endif
5139:
5140: /* finally, enable traps and continue at splsched() */
5141: wr %g2, IPL_SCHED << 8 , %psr ! psr = newpsr;
5142:
1.191 pk 5143: mov %g3, %l3 ! restore p and lastproc from globals
5144: mov %g4, %l4 ! (globals will get clobbered by the
5145: ! sched_unlock_idle() below)
5146:
1.184 pk 5147: sethi %hi(_WANT_RESCHED), %o0 ! want_resched = 0;
5148: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
5149: /* Done with the run queues; release the scheduler lock */
5150: call _C_LABEL(sched_unlock_idle)
1.180 mrg 5151: #endif
1.184 pk 5152: st %g0, [%o0 + %lo(_WANT_RESCHED)]! delay slot
1.180 mrg 5153:
1.173 pk 5154: /*
5155: * Now running p. Make sure it has a context so that it
5156: * can talk about user space stuff. (Its pcb_uw is currently
5157: * zero so it is safe to have interrupts going here.)
1.176 pk 5158: *
5159: * On multi-processor machines, the context might have changed
5160: * (e.g. by exec(2)) even if we pick up the same process here.
1.173 pk 5161: */
1.191 pk 5162: subcc %l3, %l4, %l0 ! p == lastproc?
1.176 pk 5163: #if !defined(MULTIPROCESSOR)
1.173 pk 5164: be Lsw_sameproc ! yes, context is still set for p
5165: EMPTY
1.176 pk 5166: #endif
1.173 pk 5167:
1.191 pk 5168: ld [%l3 + L_PROC], %o2 ! p = l->l_proc;
1.173 pk 5169: INCR(_C_LABEL(nswitchdiff)) ! clobbers %o0,%o1
1.185 thorpej 5170: ld [%o2 + P_VMSPACE], %o3 ! vm = p->p_vmspace;
1.173 pk 5171: ld [%o3 + VM_PMAP], %o3 ! pm = vm->vm_map.vm_pmap;
1.180 mrg 5172: #if defined(MULTIPROCESSOR)
1.197 wiz 5173: /* Add this CPU to the pmap's CPU set */
1.180 mrg 5174: sethi %hi(CPUINFO_VA + CPUINFO_CPUNO), %o0
5175: ld [%o0 + %lo(CPUINFO_VA + CPUINFO_CPUNO)], %o1
5176: mov 1, %o2
5177: ld [%o3 + PMAP_CPUSET], %o0
5178: sll %o2, %o1, %o2
5179: or %o0, %o2, %o0 ! pm->pm_cpuset |= cpu_number();
5180: st %o0, [%o3 + PMAP_CPUSET]
5181: #endif
1.173 pk 5182: ld [%o3 + PMAP_CTX], %o0 ! if (pm->pm_ctx != NULL)
5183: tst %o0
5184: bnz,a Lsw_havectx ! goto havecontext;
5185: ld [%o3 + PMAP_CTXNUM], %i1 ! load context number
5186:
5187: /* p does not have a context: call ctx_alloc to get one */
5188: call _C_LABEL(ctx_alloc) ! ctx_alloc(pm);
5189: mov %o3, %o0
5190:
5191: ret
1.176 pk 5192: restore %g0, %l0, %o0 ! return (p != lastproc)
1.173 pk 5193:
5194: /* p does have a context: just switch to it */
5195: Lsw_havectx:
5196: ! context is in %i1
5197: #if defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
5198: NOP_ON_4M_15:
5199: b,a 1f
5200: b,a 2f
5201: #endif
5202: 1:
5203: #if defined(SUN4) || defined(SUN4C)
5204: set AC_CONTEXT, %o1
5205: stba %i1, [%o1] ASI_CONTROL ! setcontext(vm->vm_pmap.pm_ctxnum);
5206: ret
1.176 pk 5207: restore %g0, %l0, %o0 ! return (p != lastproc)
1.173 pk 5208: #endif
5209: 2:
1.176 pk 5210: #if defined(SUN4M) || defined(SUN4D)
1.173 pk 5211: /*
5212: * Flush caches that need to be flushed on context switch.
5213: * We know this is currently only necessary on the sun4m hypersparc.
5214: */
5215: sethi %hi(CPUINFO_VA + CPUINFO_PURE_VCACHE_FLS), %o0
5216: ld [%o0 + %lo(CPUINFO_VA + CPUINFO_PURE_VCACHE_FLS)], %o2
5217: jmpl %o2, %o7
5218: set SRMMU_CXR, %i2
5219: sta %i1, [%i2] ASI_SRMMU ! setcontext(vm->vm_pmap.pm_ctxnum);
5220: ret
1.176 pk 5221: restore %g0, %l0, %o0 ! return (p != lastproc)
1.173 pk 5222: #endif
5223:
1.176 pk 5224: #if !defined(MULTIPROCESSOR)
1.173 pk 5225: Lsw_sameproc:
5226: /*
5227: * We are resuming the process that was running at the
1.176 pk 5228: * call to switch().
1.173 pk 5229: */
5230: ret
1.176 pk 5231: restore %g0, %g0, %o0 ! return (0)
5232: #endif /* !MULTIPROCESSOR */
1.173 pk 5233:
1.185 thorpej 5234:
1.173 pk 5235: /*
5236: * Snapshot the current process so that stack frames are up to date.
5237: * Only used just before a crash dump.
5238: */
5239: ENTRY(snapshot)
5240: std %o6, [%o0 + PCB_SP] ! save sp
5241: rd %psr, %o1 ! save psr
5242: st %o1, [%o0 + PCB_PSR]
5243:
5244: /*
5245: * Just like switch(); same XXX comments apply.
5246: * 7 of each. Minor tweak: the 7th restore is
5247: * done after a ret.
5248: */
5249: SAVE; SAVE; SAVE; SAVE; SAVE; SAVE; SAVE
5250: restore; restore; restore; restore; restore; restore; ret; restore
5251:
5252:
5253: /*
5254: * cpu_fork() arrange for proc_trampoline() to run after a process gets
5255: * chosen in switch(). The stack frame will contain a function pointer
5256: * in %l0, and an argument to pass to it in %l2.
5257: *
5258: * If the function *(%l0) returns, we arrange for an immediate return
5259: * to user mode. This happens in two known cases: after execve(2) of init,
5260: * and when returning a child to user mode after a fork(2).
5261: *
5262: * If were setting up a kernel thread, the function *(%l0) will not return.
5263: */
5264: ENTRY(proc_trampoline)
5265: /*
5266: * Note: cpu_fork() has set up a stack frame for us to run in,
5267: * so we can call other functions from here without using
5268: * `save ... restore'.
5269: */
5270: #ifdef MULTIPROCESSOR
5271: /* Finish setup in SMP environment: acquire locks etc. */
5272: call _C_LABEL(proc_trampoline_mp)
5273: nop
5274: #endif
5275:
5276: /* Reset interrupt level */
1.174 pk 5277: rd %psr, %l2
5278: andn %l2, PSR_PIL, %o0 ! psr &= ~PSR_PIL;
1.173 pk 5279: wr %o0, 0, %psr ! (void) spl0();
5280: nop ! psr delay; the next 2 instructions
5281: ! can safely be made part of the
5282: ! required 3 instructions psr delay
5283: call %l0
5284: mov %l1, %o0
5285:
5286: /*
5287: * Here we finish up as in syscall, but simplified.
5288: * cpu_fork() (or sendsig(), if we took a pending signal
5289: * in child_return()) will have set the user-space return
5290: * address in tf_pc. In both cases, %npc should be %pc + 4.
5291: */
5292: ld [%sp + CCFSZ + 4], %l1 ! pc = tf->tf_pc from cpu_fork()
1.174 pk 5293: and %l2, PSR_CWP, %o1 ! keep current CWP
1.173 pk 5294: or %o1, PSR_S, %l0 ! user psr
5295: b return_from_syscall
5296: add %l1, 4, %l2 ! npc = pc+4
5297:
1.1 deraadt 5298: /*
5299: * {fu,su}{,i}{byte,word}
5300: */
1.111 pk 5301: _ENTRY(fuiword)
1.1 deraadt 5302: ENTRY(fuword)
5303: set KERNBASE, %o2
5304: cmp %o0, %o2 ! if addr >= KERNBASE...
5305: bgeu Lfsbadaddr
5306: EMPTY
5307: btst 3, %o0 ! or has low bits set...
5308: bnz Lfsbadaddr ! go return -1
5309: EMPTY
1.111 pk 5310: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5311: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5312: set Lfserr, %o3
5313: st %o3, [%o2 + PCB_ONFAULT]
5314: ld [%o0], %o0 ! fetch the word
5315: retl ! phew, made it, return the word
1.138 chs 5316: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
1.1 deraadt 5317:
5318: Lfserr:
5319: st %g0, [%o2 + PCB_ONFAULT]! error in r/w, clear pcb_onfault
5320: Lfsbadaddr:
5321: retl ! and return error indicator
1.21 deraadt 5322: mov -1, %o0
1.1 deraadt 5323:
5324: /*
5325: * This is just like Lfserr, but it's a global label that allows
5326: * mem_access_fault() to check to see that we don't want to try to
5327: * page in the fault. It's used by fuswintr() etc.
5328: */
1.111 pk 5329: .globl _C_LABEL(Lfsbail)
5330: _C_LABEL(Lfsbail):
1.1 deraadt 5331: st %g0, [%o2 + PCB_ONFAULT]! error in r/w, clear pcb_onfault
5332: retl ! and return error indicator
1.21 deraadt 5333: mov -1, %o0
1.1 deraadt 5334:
5335: /*
5336: * Like fusword but callable from interrupt context.
5337: * Fails if data isn't resident.
5338: */
5339: ENTRY(fuswintr)
5340: set KERNBASE, %o2
5341: cmp %o0, %o2 ! if addr >= KERNBASE
5342: bgeu Lfsbadaddr ! return error
5343: EMPTY
1.111 pk 5344: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfsbail;
5345: ld [%o2 + %lo(cpcb)], %o2
5346: set _C_LABEL(Lfsbail), %o3
1.1 deraadt 5347: st %o3, [%o2 + PCB_ONFAULT]
5348: lduh [%o0], %o0 ! fetch the halfword
5349: retl ! made it
5350: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
5351:
5352: ENTRY(fusword)
5353: set KERNBASE, %o2
5354: cmp %o0, %o2 ! if addr >= KERNBASE
5355: bgeu Lfsbadaddr ! return error
5356: EMPTY
1.111 pk 5357: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5358: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5359: set Lfserr, %o3
5360: st %o3, [%o2 + PCB_ONFAULT]
5361: lduh [%o0], %o0 ! fetch the halfword
5362: retl ! made it
5363: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
5364:
1.111 pk 5365: _ENTRY(fuibyte)
1.1 deraadt 5366: ENTRY(fubyte)
5367: set KERNBASE, %o2
5368: cmp %o0, %o2 ! if addr >= KERNBASE
5369: bgeu Lfsbadaddr ! return error
5370: EMPTY
1.111 pk 5371: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5372: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5373: set Lfserr, %o3
5374: st %o3, [%o2 + PCB_ONFAULT]
5375: ldub [%o0], %o0 ! fetch the byte
5376: retl ! made it
5377: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
5378:
1.111 pk 5379: _ENTRY(suiword)
1.1 deraadt 5380: ENTRY(suword)
5381: set KERNBASE, %o2
5382: cmp %o0, %o2 ! if addr >= KERNBASE ...
5383: bgeu Lfsbadaddr
5384: EMPTY
5385: btst 3, %o0 ! or has low bits set ...
5386: bnz Lfsbadaddr ! go return error
5387: EMPTY
1.111 pk 5388: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5389: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5390: set Lfserr, %o3
5391: st %o3, [%o2 + PCB_ONFAULT]
5392: st %o1, [%o0] ! store the word
5393: st %g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
5394: retl ! and return 0
5395: clr %o0
5396:
5397: ENTRY(suswintr)
5398: set KERNBASE, %o2
5399: cmp %o0, %o2 ! if addr >= KERNBASE
5400: bgeu Lfsbadaddr ! go return error
5401: EMPTY
1.111 pk 5402: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfsbail;
5403: ld [%o2 + %lo(cpcb)], %o2
5404: set _C_LABEL(Lfsbail), %o3
1.1 deraadt 5405: st %o3, [%o2 + PCB_ONFAULT]
5406: sth %o1, [%o0] ! store the halfword
5407: st %g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
5408: retl ! and return 0
5409: clr %o0
5410:
5411: ENTRY(susword)
5412: set KERNBASE, %o2
5413: cmp %o0, %o2 ! if addr >= KERNBASE
5414: bgeu Lfsbadaddr ! go return error
5415: EMPTY
1.111 pk 5416: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5417: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5418: set Lfserr, %o3
5419: st %o3, [%o2 + PCB_ONFAULT]
5420: sth %o1, [%o0] ! store the halfword
5421: st %g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
5422: retl ! and return 0
5423: clr %o0
5424:
1.111 pk 5425: _ENTRY(suibyte)
1.1 deraadt 5426: ENTRY(subyte)
5427: set KERNBASE, %o2
5428: cmp %o0, %o2 ! if addr >= KERNBASE
5429: bgeu Lfsbadaddr ! go return error
5430: EMPTY
1.111 pk 5431: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5432: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5433: set Lfserr, %o3
5434: st %o3, [%o2 + PCB_ONFAULT]
5435: stb %o1, [%o0] ! store the byte
5436: st %g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
5437: retl ! and return 0
5438: clr %o0
5439:
5440: /* probeget and probeset are meant to be used during autoconfiguration */
5441:
5442: /*
5443: * probeget(addr, size) caddr_t addr; int size;
5444: *
5445: * Read or write a (byte,word,longword) from the given address.
5446: * Like {fu,su}{byte,halfword,word} but our caller is supposed
5447: * to know what he is doing... the address can be anywhere.
5448: *
5449: * We optimize for space, rather than time, here.
5450: */
5451: ENTRY(probeget)
5452: ! %o0 = addr, %o1 = (1,2,4)
1.111 pk 5453: sethi %hi(cpcb), %o2
5454: ld [%o2 + %lo(cpcb)], %o2 ! cpcb->pcb_onfault = Lfserr;
1.1 deraadt 5455: set Lfserr, %o5
5456: st %o5, [%o2 + PCB_ONFAULT]
5457: btst 1, %o1
5458: bnz,a 0f ! if (len & 1)
5459: ldub [%o0], %o0 ! value = *(char *)addr;
5460: 0: btst 2, %o1
5461: bnz,a 0f ! if (len & 2)
5462: lduh [%o0], %o0 ! value = *(short *)addr;
5463: 0: btst 4, %o1
5464: bnz,a 0f ! if (len & 4)
5465: ld [%o0], %o0 ! value = *(int *)addr;
5466: 0: retl ! made it, clear onfault and return
5467: st %g0, [%o2 + PCB_ONFAULT]
5468:
5469: /*
5470: * probeset(addr, size, val) caddr_t addr; int size, val;
5471: *
5472: * As above, but we return 0 on success.
5473: */
5474: ENTRY(probeset)
5475: ! %o0 = addr, %o1 = (1,2,4), %o2 = val
1.111 pk 5476: sethi %hi(cpcb), %o3
5477: ld [%o3 + %lo(cpcb)], %o3 ! cpcb->pcb_onfault = Lfserr;
1.1 deraadt 5478: set Lfserr, %o5
1.35 pk 5479: st %o5, [%o3 + PCB_ONFAULT]
1.1 deraadt 5480: btst 1, %o1
5481: bnz,a 0f ! if (len & 1)
5482: stb %o2, [%o0] ! *(char *)addr = value;
5483: 0: btst 2, %o1
5484: bnz,a 0f ! if (len & 2)
5485: sth %o2, [%o0] ! *(short *)addr = value;
5486: 0: btst 4, %o1
5487: bnz,a 0f ! if (len & 4)
5488: st %o2, [%o0] ! *(int *)addr = value;
5489: 0: clr %o0 ! made it, clear onfault and return 0
5490: retl
1.35 pk 5491: st %g0, [%o3 + PCB_ONFAULT]
1.21 deraadt 5492:
5493: /*
1.22 deraadt 5494: * int xldcontrolb(caddr_t, pcb)
5495: * %o0 %o1
1.21 deraadt 5496: *
5497: * read a byte from the specified address in ASI_CONTROL space.
5498: */
1.22 deraadt 5499: ENTRY(xldcontrolb)
1.111 pk 5500: !sethi %hi(cpcb), %o2
5501: !ld [%o2 + %lo(cpcb)], %o2 ! cpcb->pcb_onfault = Lfsbail;
1.22 deraadt 5502: or %o1, %g0, %o2 ! %o2 = %o1
1.111 pk 5503: set _C_LABEL(Lfsbail), %o5
1.21 deraadt 5504: st %o5, [%o2 + PCB_ONFAULT]
5505: lduba [%o0] ASI_CONTROL, %o0 ! read
5506: 0: retl
1.1 deraadt 5507: st %g0, [%o2 + PCB_ONFAULT]
1.78 pk 5508:
5509: /*
5510: * int fkbyte(caddr_t, pcb)
5511: * %o0 %o1
5512: *
5513: * Just like fubyte(), but for kernel space.
5514: * (currently used to work around unexplained transient bus errors
5515: * when reading the VME interrupt vector)
5516: */
5517: ENTRY(fkbyte)
5518: or %o1, %g0, %o2 ! %o2 = %o1
1.111 pk 5519: set _C_LABEL(Lfsbail), %o5
1.78 pk 5520: st %o5, [%o2 + PCB_ONFAULT]
5521: ldub [%o0], %o0 ! fetch the byte
5522: retl ! made it
5523: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
1.1 deraadt 5524:
5525:
5526: /*
5527: * copywords(src, dst, nbytes)
5528: *
5529: * Copy `nbytes' bytes from src to dst, both of which are word-aligned;
5530: * nbytes is a multiple of four. It may, however, be zero, in which case
5531: * nothing is to be copied.
5532: */
5533: ENTRY(copywords)
5534: ! %o0 = src, %o1 = dst, %o2 = nbytes
5535: b 1f
5536: deccc 4, %o2
5537: 0:
5538: st %o3, [%o1 + %o2]
5539: deccc 4, %o2 ! while ((n -= 4) >= 0)
5540: 1:
5541: bge,a 0b ! *(int *)(dst+n) = *(int *)(src+n);
5542: ld [%o0 + %o2], %o3
5543: retl
5544: nop
5545:
5546: /*
5547: * qcopy(src, dst, nbytes)
5548: *
5549: * (q for `quad' or `quick', as opposed to b for byte/block copy)
5550: *
5551: * Just like copywords, but everything is multiples of 8.
5552: */
5553: ENTRY(qcopy)
5554: b 1f
5555: deccc 8, %o2
5556: 0:
5557: std %o4, [%o1 + %o2]
5558: deccc 8, %o2
5559: 1:
5560: bge,a 0b
5561: ldd [%o0 + %o2], %o4
5562: retl
5563: nop
5564:
5565: /*
5566: * qzero(addr, nbytes)
5567: *
5568: * Zeroes `nbytes' bytes of a quad-aligned virtual address,
5569: * where nbytes is itself a multiple of 8.
5570: */
5571: ENTRY(qzero)
5572: ! %o0 = addr, %o1 = len (in bytes)
5573: clr %g1
5574: 0:
5575: deccc 8, %o1 ! while ((n =- 8) >= 0)
5576: bge,a 0b
5577: std %g0, [%o0 + %o1] ! *(quad *)(addr + n) = 0;
5578: retl
5579: nop
5580:
5581: /*
1.83 mycroft 5582: * kernel bcopy
1.1 deraadt 5583: * Assumes regions do not overlap; has no useful return value.
5584: *
5585: * Must not use %g7 (see copyin/copyout above).
5586: */
5587:
5588: #define BCOPY_SMALL 32 /* if < 32, copy by bytes */
5589:
5590: ENTRY(bcopy)
5591: cmp %o2, BCOPY_SMALL
5592: Lbcopy_start:
5593: bge,a Lbcopy_fancy ! if >= this many, go be fancy.
5594: btst 7, %o0 ! (part of being fancy)
5595:
5596: /*
5597: * Not much to copy, just do it a byte at a time.
5598: */
5599: deccc %o2 ! while (--len >= 0)
5600: bl 1f
5601: EMPTY
5602: 0:
5603: inc %o0
5604: ldsb [%o0 - 1], %o4 ! (++dst)[-1] = *src++;
5605: stb %o4, [%o1]
5606: deccc %o2
5607: bge 0b
5608: inc %o1
5609: 1:
5610: retl
1.80 mrg 5611: nop
1.1 deraadt 5612: /* NOTREACHED */
5613:
5614: /*
5615: * Plenty of data to copy, so try to do it optimally.
5616: */
5617: Lbcopy_fancy:
5618: ! check for common case first: everything lines up.
5619: ! btst 7, %o0 ! done already
5620: bne 1f
5621: EMPTY
5622: btst 7, %o1
5623: be,a Lbcopy_doubles
5624: dec 8, %o2 ! if all lined up, len -= 8, goto bcopy_doubes
5625:
5626: ! If the low bits match, we can make these line up.
5627: 1:
5628: xor %o0, %o1, %o3 ! t = src ^ dst;
5629: btst 1, %o3 ! if (t & 1) {
5630: be,a 1f
5631: btst 1, %o0 ! [delay slot: if (src & 1)]
5632:
5633: ! low bits do not match, must copy by bytes.
5634: 0:
5635: ldsb [%o0], %o4 ! do {
5636: inc %o0 ! (++dst)[-1] = *src++;
5637: inc %o1
5638: deccc %o2
5639: bnz 0b ! } while (--len != 0);
5640: stb %o4, [%o1 - 1]
5641: retl
1.80 mrg 5642: nop
1.1 deraadt 5643: /* NOTREACHED */
5644:
5645: ! lowest bit matches, so we can copy by words, if nothing else
5646: 1:
5647: be,a 1f ! if (src & 1) {
5648: btst 2, %o3 ! [delay slot: if (t & 2)]
5649:
5650: ! although low bits match, both are 1: must copy 1 byte to align
5651: ldsb [%o0], %o4 ! *dst++ = *src++;
5652: stb %o4, [%o1]
5653: inc %o0
5654: inc %o1
5655: dec %o2 ! len--;
5656: btst 2, %o3 ! } [if (t & 2)]
5657: 1:
5658: be,a 1f ! if (t & 2) {
5659: btst 2, %o0 ! [delay slot: if (src & 2)]
5660: dec 2, %o2 ! len -= 2;
5661: 0:
5662: ldsh [%o0], %o4 ! do {
5663: sth %o4, [%o1] ! *(short *)dst = *(short *)src;
5664: inc 2, %o0 ! dst += 2, src += 2;
5665: deccc 2, %o2 ! } while ((len -= 2) >= 0);
5666: bge 0b
5667: inc 2, %o1
5668: b Lbcopy_mopb ! goto mop_up_byte;
5669: btst 1, %o2 ! } [delay slot: if (len & 1)]
5670: /* NOTREACHED */
5671:
5672: ! low two bits match, so we can copy by longwords
5673: 1:
5674: be,a 1f ! if (src & 2) {
5675: btst 4, %o3 ! [delay slot: if (t & 4)]
5676:
5677: ! although low 2 bits match, they are 10: must copy one short to align
5678: ldsh [%o0], %o4 ! (*short *)dst = *(short *)src;
5679: sth %o4, [%o1]
5680: inc 2, %o0 ! dst += 2;
5681: inc 2, %o1 ! src += 2;
5682: dec 2, %o2 ! len -= 2;
5683: btst 4, %o3 ! } [if (t & 4)]
5684: 1:
5685: be,a 1f ! if (t & 4) {
5686: btst 4, %o0 ! [delay slot: if (src & 4)]
5687: dec 4, %o2 ! len -= 4;
5688: 0:
5689: ld [%o0], %o4 ! do {
5690: st %o4, [%o1] ! *(int *)dst = *(int *)src;
5691: inc 4, %o0 ! dst += 4, src += 4;
5692: deccc 4, %o2 ! } while ((len -= 4) >= 0);
5693: bge 0b
5694: inc 4, %o1
5695: b Lbcopy_mopw ! goto mop_up_word_and_byte;
5696: btst 2, %o2 ! } [delay slot: if (len & 2)]
5697: /* NOTREACHED */
5698:
5699: ! low three bits match, so we can copy by doublewords
5700: 1:
5701: be 1f ! if (src & 4) {
5702: dec 8, %o2 ! [delay slot: len -= 8]
5703: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
5704: st %o4, [%o1]
5705: inc 4, %o0 ! dst += 4, src += 4, len -= 4;
5706: inc 4, %o1
5707: dec 4, %o2 ! }
5708: 1:
5709: Lbcopy_doubles:
5710: ldd [%o0], %o4 ! do {
5711: std %o4, [%o1] ! *(double *)dst = *(double *)src;
5712: inc 8, %o0 ! dst += 8, src += 8;
5713: deccc 8, %o2 ! } while ((len -= 8) >= 0);
5714: bge Lbcopy_doubles
5715: inc 8, %o1
5716:
5717: ! check for a usual case again (save work)
5718: btst 7, %o2 ! if ((len & 7) == 0)
5719: be Lbcopy_done ! goto bcopy_done;
5720:
5721: btst 4, %o2 ! if ((len & 4)) == 0)
5722: be,a Lbcopy_mopw ! goto mop_up_word_and_byte;
5723: btst 2, %o2 ! [delay slot: if (len & 2)]
5724: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
5725: st %o4, [%o1]
5726: inc 4, %o0 ! dst += 4;
5727: inc 4, %o1 ! src += 4;
5728: btst 2, %o2 ! } [if (len & 2)]
5729:
5730: 1:
5731: ! mop up trailing word (if present) and byte (if present).
5732: Lbcopy_mopw:
5733: be Lbcopy_mopb ! no word, go mop up byte
5734: btst 1, %o2 ! [delay slot: if (len & 1)]
5735: ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
5736: be Lbcopy_done ! if ((len & 1) == 0) goto done;
5737: sth %o4, [%o1]
5738: ldsb [%o0 + 2], %o4 ! dst[2] = src[2];
5739: retl
1.80 mrg 5740: stb %o4, [%o1 + 2]
1.1 deraadt 5741: /* NOTREACHED */
5742:
5743: ! mop up trailing byte (if present).
5744: Lbcopy_mopb:
5745: bne,a 1f
5746: ldsb [%o0], %o4
5747:
5748: Lbcopy_done:
5749: retl
1.80 mrg 5750: nop
1.1 deraadt 5751:
5752: 1:
5753: retl
1.80 mrg 5754: stb %o4,[%o1]
1.1 deraadt 5755: /*
5756: * ovbcopy(src, dst, len): like bcopy, but regions may overlap.
5757: */
5758: ENTRY(ovbcopy)
5759: cmp %o0, %o1 ! src < dst?
5760: bgeu Lbcopy_start ! no, go copy forwards as via bcopy
5761: cmp %o2, BCOPY_SMALL! (check length for doublecopy first)
5762:
5763: /*
5764: * Since src comes before dst, and the regions might overlap,
5765: * we have to do the copy starting at the end and working backwards.
5766: */
5767: add %o2, %o0, %o0 ! src += len
5768: add %o2, %o1, %o1 ! dst += len
5769: bge,a Lback_fancy ! if len >= BCOPY_SMALL, go be fancy
5770: btst 3, %o0
5771:
5772: /*
5773: * Not much to copy, just do it a byte at a time.
5774: */
5775: deccc %o2 ! while (--len >= 0)
5776: bl 1f
5777: EMPTY
5778: 0:
5779: dec %o0 ! *--dst = *--src;
5780: ldsb [%o0], %o4
5781: dec %o1
5782: deccc %o2
5783: bge 0b
5784: stb %o4, [%o1]
5785: 1:
5786: retl
5787: nop
5788:
5789: /*
5790: * Plenty to copy, try to be optimal.
5791: * We only bother with word/halfword/byte copies here.
5792: */
5793: Lback_fancy:
5794: ! btst 3, %o0 ! done already
5795: bnz 1f ! if ((src & 3) == 0 &&
5796: btst 3, %o1 ! (dst & 3) == 0)
5797: bz,a Lback_words ! goto words;
5798: dec 4, %o2 ! (done early for word copy)
5799:
5800: 1:
5801: /*
5802: * See if the low bits match.
5803: */
5804: xor %o0, %o1, %o3 ! t = src ^ dst;
5805: btst 1, %o3
5806: bz,a 3f ! if (t & 1) == 0, can do better
5807: btst 1, %o0
5808:
5809: /*
5810: * Nope; gotta do byte copy.
5811: */
5812: 2:
5813: dec %o0 ! do {
5814: ldsb [%o0], %o4 ! *--dst = *--src;
5815: dec %o1
5816: deccc %o2 ! } while (--len != 0);
5817: bnz 2b
5818: stb %o4, [%o1]
5819: retl
5820: nop
5821:
5822: 3:
5823: /*
5824: * Can do halfword or word copy, but might have to copy 1 byte first.
5825: */
5826: ! btst 1, %o0 ! done earlier
5827: bz,a 4f ! if (src & 1) { /* copy 1 byte */
5828: btst 2, %o3 ! (done early)
5829: dec %o0 ! *--dst = *--src;
5830: ldsb [%o0], %o4
5831: dec %o1
5832: stb %o4, [%o1]
5833: dec %o2 ! len--;
5834: btst 2, %o3 ! }
5835:
5836: 4:
5837: /*
5838: * See if we can do a word copy ((t&2) == 0).
5839: */
5840: ! btst 2, %o3 ! done earlier
5841: bz,a 6f ! if (t & 2) == 0, can do word copy
5842: btst 2, %o0 ! (src&2, done early)
5843:
5844: /*
5845: * Gotta do halfword copy.
5846: */
5847: dec 2, %o2 ! len -= 2;
5848: 5:
5849: dec 2, %o0 ! do {
5850: ldsh [%o0], %o4 ! src -= 2;
5851: dec 2, %o1 ! dst -= 2;
5852: deccc 2, %o0 ! *(short *)dst = *(short *)src;
5853: bge 5b ! } while ((len -= 2) >= 0);
5854: sth %o4, [%o1]
5855: b Lback_mopb ! goto mop_up_byte;
5856: btst 1, %o2 ! (len&1, done early)
5857:
5858: 6:
5859: /*
5860: * We can do word copies, but we might have to copy
5861: * one halfword first.
5862: */
5863: ! btst 2, %o0 ! done already
5864: bz 7f ! if (src & 2) {
5865: dec 4, %o2 ! (len -= 4, done early)
5866: dec 2, %o0 ! src -= 2, dst -= 2;
5867: ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
5868: dec 2, %o1
5869: sth %o4, [%o1]
5870: dec 2, %o2 ! len -= 2;
5871: ! }
5872:
5873: 7:
5874: Lback_words:
5875: /*
5876: * Do word copies (backwards), then mop up trailing halfword
5877: * and byte if any.
5878: */
5879: ! dec 4, %o2 ! len -= 4, done already
5880: 0: ! do {
5881: dec 4, %o0 ! src -= 4;
5882: dec 4, %o1 ! src -= 4;
5883: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
5884: deccc 4, %o2 ! } while ((len -= 4) >= 0);
5885: bge 0b
5886: st %o4, [%o1]
5887:
5888: /*
5889: * Check for trailing shortword.
5890: */
5891: btst 2, %o2 ! if (len & 2) {
5892: bz,a 1f
5893: btst 1, %o2 ! (len&1, done early)
5894: dec 2, %o0 ! src -= 2, dst -= 2;
5895: ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
5896: dec 2, %o1
5897: sth %o4, [%o1] ! }
5898: btst 1, %o2
5899:
5900: /*
5901: * Check for trailing byte.
5902: */
5903: 1:
5904: Lback_mopb:
5905: ! btst 1, %o2 ! (done already)
5906: bnz,a 1f ! if (len & 1) {
5907: ldsb [%o0 - 1], %o4 ! b = src[-1];
5908: retl
5909: nop
5910: 1:
5911: retl ! dst[-1] = b;
5912: stb %o4, [%o1 - 1] ! }
5913:
1.79 mrg 5914: /*
5915: * kcopy() is exactly like bcopy except that it set pcb_onfault such that
5916: * when a fault occurs, it is able to return -1 to indicate this to the
5917: * caller.
5918: */
5919: ENTRY(kcopy)
1.111 pk 5920: sethi %hi(cpcb), %o5 ! cpcb->pcb_onfault = Lkcerr;
5921: ld [%o5 + %lo(cpcb)], %o5
1.79 mrg 5922: set Lkcerr, %o3
1.107 mycroft 5923: ld [%o5 + PCB_ONFAULT], %g1! save current onfault handler
1.79 mrg 5924: st %o3, [%o5 + PCB_ONFAULT]
5925:
5926: cmp %o2, BCOPY_SMALL
5927: Lkcopy_start:
5928: bge,a Lkcopy_fancy ! if >= this many, go be fancy.
1.106 pk 5929: btst 7, %o0 ! (part of being fancy)
1.79 mrg 5930:
5931: /*
5932: * Not much to copy, just do it a byte at a time.
5933: */
5934: deccc %o2 ! while (--len >= 0)
1.108 mycroft 5935: bl 1f
5936: EMPTY
1.79 mrg 5937: 0:
1.107 mycroft 5938: ldsb [%o0], %o4 ! *dst++ = *src++;
1.79 mrg 5939: inc %o0
5940: stb %o4, [%o1]
5941: deccc %o2
5942: bge 0b
1.106 pk 5943: inc %o1
1.79 mrg 5944: 1:
1.106 pk 5945: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.79 mrg 5946: retl
1.106 pk 5947: mov 0, %o0 ! delay slot: return success
1.79 mrg 5948: /* NOTREACHED */
5949:
5950: /*
5951: * Plenty of data to copy, so try to do it optimally.
5952: */
5953: Lkcopy_fancy:
5954: ! check for common case first: everything lines up.
5955: ! btst 7, %o0 ! done already
5956: bne 1f
1.108 mycroft 5957: EMPTY
1.79 mrg 5958: btst 7, %o1
5959: be,a Lkcopy_doubles
1.106 pk 5960: dec 8, %o2 ! if all lined up, len -= 8, goto bcopy_doubes
1.79 mrg 5961:
5962: ! If the low bits match, we can make these line up.
5963: 1:
5964: xor %o0, %o1, %o3 ! t = src ^ dst;
5965: btst 1, %o3 ! if (t & 1) {
5966: be,a 1f
1.106 pk 5967: btst 1, %o0 ! [delay slot: if (src & 1)]
1.79 mrg 5968:
5969: ! low bits do not match, must copy by bytes.
5970: 0:
5971: ldsb [%o0], %o4 ! do {
1.107 mycroft 5972: inc %o0 ! *dst++ = *src++;
5973: stb %o4, [%o1]
1.79 mrg 5974: deccc %o2
5975: bnz 0b ! } while (--len != 0);
1.107 mycroft 5976: inc %o1
1.106 pk 5977: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.79 mrg 5978: retl
1.106 pk 5979: mov 0, %o0 ! delay slot: return success
1.79 mrg 5980: /* NOTREACHED */
5981:
5982: ! lowest bit matches, so we can copy by words, if nothing else
5983: 1:
5984: be,a 1f ! if (src & 1) {
1.106 pk 5985: btst 2, %o3 ! [delay slot: if (t & 2)]
1.79 mrg 5986:
5987: ! although low bits match, both are 1: must copy 1 byte to align
5988: ldsb [%o0], %o4 ! *dst++ = *src++;
1.107 mycroft 5989: inc %o0
1.79 mrg 5990: stb %o4, [%o1]
1.107 mycroft 5991: dec %o2 ! len--;
1.79 mrg 5992: inc %o1
5993: btst 2, %o3 ! } [if (t & 2)]
5994: 1:
5995: be,a 1f ! if (t & 2) {
1.106 pk 5996: btst 2, %o0 ! [delay slot: if (src & 2)]
1.79 mrg 5997: dec 2, %o2 ! len -= 2;
5998: 0:
5999: ldsh [%o0], %o4 ! do {
1.107 mycroft 6000: inc 2, %o0 ! dst += 2, src += 2;
1.79 mrg 6001: sth %o4, [%o1] ! *(short *)dst = *(short *)src;
6002: deccc 2, %o2 ! } while ((len -= 2) >= 0);
6003: bge 0b
1.106 pk 6004: inc 2, %o1
1.79 mrg 6005: b Lkcopy_mopb ! goto mop_up_byte;
1.106 pk 6006: btst 1, %o2 ! } [delay slot: if (len & 1)]
1.79 mrg 6007: /* NOTREACHED */
6008:
6009: ! low two bits match, so we can copy by longwords
6010: 1:
6011: be,a 1f ! if (src & 2) {
1.106 pk 6012: btst 4, %o3 ! [delay slot: if (t & 4)]
1.79 mrg 6013:
6014: ! although low 2 bits match, they are 10: must copy one short to align
6015: ldsh [%o0], %o4 ! (*short *)dst = *(short *)src;
1.107 mycroft 6016: inc 2, %o0 ! dst += 2;
1.79 mrg 6017: sth %o4, [%o1]
1.107 mycroft 6018: dec 2, %o2 ! len -= 2;
1.79 mrg 6019: inc 2, %o1 ! src += 2;
6020: btst 4, %o3 ! } [if (t & 4)]
6021: 1:
6022: be,a 1f ! if (t & 4) {
1.106 pk 6023: btst 4, %o0 ! [delay slot: if (src & 4)]
1.79 mrg 6024: dec 4, %o2 ! len -= 4;
6025: 0:
6026: ld [%o0], %o4 ! do {
1.107 mycroft 6027: inc 4, %o0 ! dst += 4, src += 4;
1.79 mrg 6028: st %o4, [%o1] ! *(int *)dst = *(int *)src;
6029: deccc 4, %o2 ! } while ((len -= 4) >= 0);
6030: bge 0b
1.106 pk 6031: inc 4, %o1
1.79 mrg 6032: b Lkcopy_mopw ! goto mop_up_word_and_byte;
1.106 pk 6033: btst 2, %o2 ! } [delay slot: if (len & 2)]
1.79 mrg 6034: /* NOTREACHED */
6035:
6036: ! low three bits match, so we can copy by doublewords
6037: 1:
6038: be 1f ! if (src & 4) {
1.106 pk 6039: dec 8, %o2 ! [delay slot: len -= 8]
1.79 mrg 6040: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
1.107 mycroft 6041: inc 4, %o0 ! dst += 4, src += 4, len -= 4;
1.79 mrg 6042: st %o4, [%o1]
1.107 mycroft 6043: dec 4, %o2 ! }
1.79 mrg 6044: inc 4, %o1
6045: 1:
6046: Lkcopy_doubles:
6047: ! swap %o4 with %o2 during doubles copy, since %o5 is verboten
6048: mov %o2, %o4
6049: Lkcopy_doubles2:
6050: ldd [%o0], %o2 ! do {
1.107 mycroft 6051: inc 8, %o0 ! dst += 8, src += 8;
1.79 mrg 6052: std %o2, [%o1] ! *(double *)dst = *(double *)src;
6053: deccc 8, %o4 ! } while ((len -= 8) >= 0);
6054: bge Lkcopy_doubles2
6055: inc 8, %o1
6056: mov %o4, %o2 ! restore len
6057:
6058: ! check for a usual case again (save work)
6059: btst 7, %o2 ! if ((len & 7) == 0)
6060: be Lkcopy_done ! goto bcopy_done;
6061:
1.106 pk 6062: btst 4, %o2 ! if ((len & 4)) == 0)
1.79 mrg 6063: be,a Lkcopy_mopw ! goto mop_up_word_and_byte;
1.106 pk 6064: btst 2, %o2 ! [delay slot: if (len & 2)]
1.79 mrg 6065: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
1.107 mycroft 6066: inc 4, %o0 ! dst += 4;
1.79 mrg 6067: st %o4, [%o1]
6068: inc 4, %o1 ! src += 4;
6069: btst 2, %o2 ! } [if (len & 2)]
6070:
6071: 1:
6072: ! mop up trailing word (if present) and byte (if present).
6073: Lkcopy_mopw:
6074: be Lkcopy_mopb ! no word, go mop up byte
1.106 pk 6075: btst 1, %o2 ! [delay slot: if (len & 1)]
1.79 mrg 6076: ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
6077: be Lkcopy_done ! if ((len & 1) == 0) goto done;
1.106 pk 6078: sth %o4, [%o1]
1.79 mrg 6079: ldsb [%o0 + 2], %o4 ! dst[2] = src[2];
6080: stb %o4, [%o1 + 2]
1.106 pk 6081: st %g1, [%o5 + PCB_ONFAULT]! restore onfault
1.79 mrg 6082: retl
1.106 pk 6083: mov 0, %o0 ! delay slot: return success
1.79 mrg 6084: /* NOTREACHED */
6085:
6086: ! mop up trailing byte (if present).
6087: Lkcopy_mopb:
6088: bne,a 1f
1.106 pk 6089: ldsb [%o0], %o4
1.79 mrg 6090:
6091: Lkcopy_done:
1.106 pk 6092: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.79 mrg 6093: retl
1.106 pk 6094: mov 0, %o0 ! delay slot: return success
1.108 mycroft 6095: /* NOTREACHED */
1.79 mrg 6096:
6097: 1:
1.107 mycroft 6098: stb %o4, [%o1]
6099: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.79 mrg 6100: retl
1.107 mycroft 6101: mov 0, %o0 ! delay slot: return success
1.108 mycroft 6102: /* NOTREACHED */
1.107 mycroft 6103:
1.79 mrg 6104: Lkcerr:
1.107 mycroft 6105: retl
1.138 chs 6106: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.108 mycroft 6107: /* NOTREACHED */
1.1 deraadt 6108:
6109: /*
6110: * savefpstate(f) struct fpstate *f;
6111: *
6112: * Store the current FPU state. The first `st %fsr' may cause a trap;
6113: * our trap handler knows how to recover (by `returning' to savefpcont).
6114: */
6115: ENTRY(savefpstate)
6116: rd %psr, %o1 ! enable FP before we begin
6117: set PSR_EF, %o2
6118: or %o1, %o2, %o1
6119: wr %o1, 0, %psr
6120: /* do some setup work while we wait for PSR_EF to turn on */
6121: set FSR_QNE, %o5 ! QNE = 0x2000, too big for immediate
6122: clr %o3 ! qsize = 0;
6123: nop ! (still waiting for PSR_EF)
6124: special_fp_store:
6125: st %fsr, [%o0 + FS_FSR] ! f->fs_fsr = getfsr();
6126: /*
6127: * Even if the preceding instruction did not trap, the queue
6128: * is not necessarily empty: this state save might be happening
6129: * because user code tried to store %fsr and took the FPU
6130: * from `exception pending' mode to `exception' mode.
6131: * So we still have to check the blasted QNE bit.
6132: * With any luck it will usually not be set.
6133: */
6134: ld [%o0 + FS_FSR], %o4 ! if (f->fs_fsr & QNE)
6135: btst %o5, %o4
6136: bnz Lfp_storeq ! goto storeq;
6137: std %f0, [%o0 + FS_REGS + (4*0)] ! f->fs_f0 = etc;
6138: Lfp_finish:
6139: st %o3, [%o0 + FS_QSIZE] ! f->fs_qsize = qsize;
6140: std %f2, [%o0 + FS_REGS + (4*2)]
6141: std %f4, [%o0 + FS_REGS + (4*4)]
6142: std %f6, [%o0 + FS_REGS + (4*6)]
6143: std %f8, [%o0 + FS_REGS + (4*8)]
6144: std %f10, [%o0 + FS_REGS + (4*10)]
6145: std %f12, [%o0 + FS_REGS + (4*12)]
6146: std %f14, [%o0 + FS_REGS + (4*14)]
6147: std %f16, [%o0 + FS_REGS + (4*16)]
6148: std %f18, [%o0 + FS_REGS + (4*18)]
6149: std %f20, [%o0 + FS_REGS + (4*20)]
6150: std %f22, [%o0 + FS_REGS + (4*22)]
6151: std %f24, [%o0 + FS_REGS + (4*24)]
6152: std %f26, [%o0 + FS_REGS + (4*26)]
6153: std %f28, [%o0 + FS_REGS + (4*28)]
6154: retl
6155: std %f30, [%o0 + FS_REGS + (4*30)]
6156:
6157: /*
6158: * Store the (now known nonempty) FP queue.
6159: * We have to reread the fsr each time in order to get the new QNE bit.
6160: */
6161: Lfp_storeq:
6162: add %o0, FS_QUEUE, %o1 ! q = &f->fs_queue[0];
6163: 1:
6164: std %fq, [%o1 + %o3] ! q[qsize++] = fsr_qfront();
6165: st %fsr, [%o0 + FS_FSR] ! reread fsr
6166: ld [%o0 + FS_FSR], %o4 ! if fsr & QNE, loop
6167: btst %o5, %o4
6168: bnz 1b
6169: inc 8, %o3
6170: b Lfp_finish ! set qsize and finish storing fregs
6171: srl %o3, 3, %o3 ! (but first fix qsize)
6172:
6173: /*
6174: * The fsr store trapped. Do it again; this time it will not trap.
6175: * We could just have the trap handler return to the `st %fsr', but
6176: * if for some reason it *does* trap, that would lock us into a tight
6177: * loop. This way we panic instead. Whoopee.
6178: */
6179: savefpcont:
6180: b special_fp_store + 4 ! continue
6181: st %fsr, [%o0 + FS_FSR] ! but first finish the %fsr store
6182:
6183: /*
6184: * Load FPU state.
6185: */
6186: ENTRY(loadfpstate)
6187: rd %psr, %o1 ! enable FP before we begin
6188: set PSR_EF, %o2
6189: or %o1, %o2, %o1
6190: wr %o1, 0, %psr
6191: nop; nop; nop ! paranoia
6192: ldd [%o0 + FS_REGS + (4*0)], %f0
6193: ldd [%o0 + FS_REGS + (4*2)], %f2
6194: ldd [%o0 + FS_REGS + (4*4)], %f4
6195: ldd [%o0 + FS_REGS + (4*6)], %f6
6196: ldd [%o0 + FS_REGS + (4*8)], %f8
6197: ldd [%o0 + FS_REGS + (4*10)], %f10
6198: ldd [%o0 + FS_REGS + (4*12)], %f12
6199: ldd [%o0 + FS_REGS + (4*14)], %f14
6200: ldd [%o0 + FS_REGS + (4*16)], %f16
6201: ldd [%o0 + FS_REGS + (4*18)], %f18
6202: ldd [%o0 + FS_REGS + (4*20)], %f20
6203: ldd [%o0 + FS_REGS + (4*22)], %f22
6204: ldd [%o0 + FS_REGS + (4*24)], %f24
6205: ldd [%o0 + FS_REGS + (4*26)], %f26
6206: ldd [%o0 + FS_REGS + (4*28)], %f28
6207: ldd [%o0 + FS_REGS + (4*30)], %f30
6208: retl
6209: ld [%o0 + FS_FSR], %fsr ! setfsr(f->fs_fsr);
6210:
6211: /*
6212: * ienab_bis(bis) int bis;
6213: * ienab_bic(bic) int bic;
6214: *
1.167 pk 6215: * Set and clear bits in the sun4/sun4c interrupt register.
1.52 pk 6216: */
6217:
6218: #if defined(SUN4) || defined(SUN4C)
6219: /*
1.1 deraadt 6220: * Since there are no read-modify-write instructions for this,
6221: * and one of the interrupts is nonmaskable, we must disable traps.
6222: */
6223: ENTRY(ienab_bis)
6224: ! %o0 = bits to set
6225: rd %psr, %o2
6226: wr %o2, PSR_ET, %psr ! disable traps
6227: nop; nop ! 3-instr delay until ET turns off
1.62 pk 6228: sethi %hi(INTRREG_VA), %o3
6229: ldub [%o3 + %lo(INTRREG_VA)], %o4
6230: or %o4, %o0, %o4 ! *INTRREG_VA |= bis;
6231: stb %o4, [%o3 + %lo(INTRREG_VA)]
1.1 deraadt 6232: wr %o2, 0, %psr ! reenable traps
6233: nop
6234: retl
6235: nop
6236:
6237: ENTRY(ienab_bic)
6238: ! %o0 = bits to clear
6239: rd %psr, %o2
6240: wr %o2, PSR_ET, %psr ! disable traps
6241: nop; nop
1.62 pk 6242: sethi %hi(INTRREG_VA), %o3
6243: ldub [%o3 + %lo(INTRREG_VA)], %o4
6244: andn %o4, %o0, %o4 ! *INTRREG_VA &=~ bic;
6245: stb %o4, [%o3 + %lo(INTRREG_VA)]
1.1 deraadt 6246: wr %o2, 0, %psr ! reenable traps
6247: nop
6248: retl
6249: nop
1.167 pk 6250: #endif /* SUN4 || SUN4C */
1.52 pk 6251:
6252: #if defined(SUN4M)
6253: /*
6254: * raise(cpu, level)
6255: */
6256: ENTRY(raise)
1.149 uwe 6257: #if !defined(MSIIEP) /* normal suns */
1.52 pk 6258: ! *(ICR_PI_SET + cpu*_MAXNBPG) = PINTR_SINTRLEV(level)
6259: sethi %hi(1 << 16), %o2
6260: sll %o2, %o1, %o2
6261: set ICR_PI_SET, %o1
6262: set _MAXNBPG, %o3
6263: 1:
6264: subcc %o0, 1, %o0
6265: bpos,a 1b
6266: add %o1, %o3, %o1
6267: retl
6268: st %o2, [%o1]
1.197 wiz 6269: #else /* MSIIEP - ignore %o0, only one CPU ever */
1.149 uwe 6270: mov 1, %o2
6271: sethi %hi(MSIIEP_PCIC_VA), %o0
6272: sll %o2, %o1, %o2
6273: retl
6274: sth %o2, [%o0 + PCIC_SOFT_INTR_SET_REG]
6275: #endif
1.62 pk 6276:
6277: /*
1.94 pk 6278: * Read Synchronous Fault Status registers.
6279: * On entry: %l1 == PC, %l3 == fault type, %l4 == storage, %l7 == return address
6280: * Only use %l5 and %l6.
6281: * Note: not C callable.
6282: */
1.111 pk 6283: _ENTRY(_C_LABEL(srmmu_get_syncflt))
6284: _ENTRY(_C_LABEL(hypersparc_get_syncflt))
1.94 pk 6285: set SRMMU_SFAR, %l5
6286: lda [%l5] ASI_SRMMU, %l5 ! sync virt addr; must be read first
6287: st %l5, [%l4 + 4] ! => dump.sfva
6288: set SRMMU_SFSR, %l5
6289: lda [%l5] ASI_SRMMU, %l5 ! get sync fault status register
6290: jmp %l7 + 8 ! return to caller
6291: st %l5, [%l4] ! => dump.sfsr
6292:
1.111 pk 6293: _ENTRY(_C_LABEL(viking_get_syncflt))
6294: _ENTRY(_C_LABEL(ms1_get_syncflt))
6295: _ENTRY(_C_LABEL(swift_get_syncflt))
6296: _ENTRY(_C_LABEL(turbosparc_get_syncflt))
6297: _ENTRY(_C_LABEL(cypress_get_syncflt))
1.62 pk 6298: cmp %l3, T_TEXTFAULT
6299: be,a 1f
1.94 pk 6300: mov %l1, %l5 ! use PC if type == T_TEXTFAULT
1.62 pk 6301:
1.94 pk 6302: set SRMMU_SFAR, %l5
6303: lda [%l5] ASI_SRMMU, %l5 ! sync virt addr; must be read first
1.62 pk 6304: 1:
1.94 pk 6305: st %l5, [%l4 + 4] ! => dump.sfva
1.62 pk 6306:
1.94 pk 6307: set SRMMU_SFSR, %l5
6308: lda [%l5] ASI_SRMMU, %l5 ! get sync fault status register
6309: jmp %l7 + 8 ! return to caller
6310: st %l5, [%l4] ! => dump.sfsr
1.62 pk 6311:
1.162 uwe 6312: #if defined(MULTIPROCESSOR) && 0 /* notyet */
1.142 mrg 6313: /*
6314: * Read Synchronous Fault Status registers.
6315: * On entry: %o0 == &sfsr, %o1 == &sfar
6316: */
6317: _ENTRY(_C_LABEL(smp_get_syncflt))
6318: save %sp, -CCFSZ, %sp
6319:
6320: sethi %hi(CPUINFO_VA), %o4
6321: ld [%l4 + %lo(CPUINFO_VA+CPUINFO_GETSYNCFLT)], %o5
6322: clr %l1
6323: clr %l3
6324: jmpl %o5, %l7
6325: or %o4, %lo(CPUINFO_SYNCFLTDUMP), %l4
6326:
6327: ! load values out of the dump
6328: ld [%o4 + %lo(CPUINFO_VA+CPUINFO_SYNCFLTDUMP)], %o5
6329: st %o5, [%i0]
6330: ld [%o4 + %lo(CPUINFO_VA+CPUINFO_SYNCFLTDUMP+4)], %o5
6331: st %o5, [%i1]
6332: ret
6333: restore
6334: #endif /* MULTIPROCESSOR */
1.62 pk 6335:
1.94 pk 6336: /*
6337: * Read Asynchronous Fault Status registers.
6338: * On entry: %o0 == &afsr, %o1 == &afar
6339: * Return 0 if async register are present.
6340: */
1.111 pk 6341: _ENTRY(_C_LABEL(srmmu_get_asyncflt))
1.94 pk 6342: set SRMMU_AFAR, %o4
6343: lda [%o4] ASI_SRMMU, %o4 ! get async fault address
6344: set SRMMU_AFSR, %o3 !
6345: st %o4, [%o1]
6346: lda [%o3] ASI_SRMMU, %o3 ! get async fault status
6347: st %o3, [%o0]
6348: retl
6349: clr %o0 ! return value
1.62 pk 6350:
1.111 pk 6351: _ENTRY(_C_LABEL(cypress_get_asyncflt))
6352: _ENTRY(_C_LABEL(hypersparc_get_asyncflt))
1.94 pk 6353: set SRMMU_AFSR, %o3 ! must read status before fault on HS
6354: lda [%o3] ASI_SRMMU, %o3 ! get async fault status
6355: st %o3, [%o0]
6356: btst AFSR_AFO, %o3 ! and only read fault address
6357: bz 1f ! if valid.
6358: set SRMMU_AFAR, %o4
6359: lda [%o4] ASI_SRMMU, %o4 ! get async fault address
6360: clr %o0 ! return value
1.62 pk 6361: retl
1.94 pk 6362: st %o4, [%o1]
1.62 pk 6363: 1:
6364: retl
1.94 pk 6365: clr %o0 ! return value
1.62 pk 6366:
1.111 pk 6367: _ENTRY(_C_LABEL(no_asyncflt_regs))
1.62 pk 6368: retl
1.94 pk 6369: mov 1, %o0 ! return value
1.86 pk 6370:
1.111 pk 6371: _ENTRY(_C_LABEL(hypersparc_pure_vcache_flush))
1.86 pk 6372: /*
6373: * Flush entire on-chip instruction cache, which is
6374: * a pure vitually-indexed/virtually-tagged cache.
6375: */
6376: retl
6377: sta %g0, [%g0] ASI_HICACHECLR
1.62 pk 6378:
1.52 pk 6379: #endif /* SUN4M */
1.1 deraadt 6380:
1.149 uwe 6381: #if !defined(MSIIEP) /* normal suns */
1.1 deraadt 6382: /*
1.29 deraadt 6383: * void lo_microtime(struct timeval *tv)
1.1 deraadt 6384: *
6385: * LBL's sparc bsd 'microtime': We don't need to spl (so this routine
6386: * can be a leaf routine) and we don't keep a 'last' timeval (there
6387: * can't be two calls to this routine in a microsecond). This seems to
6388: * be about 20 times faster than the Sun code on an SS-2. - vj
6389: *
6390: * Read time values from slowest-changing to fastest-changing,
6391: * then re-read out to slowest. If the values read before
6392: * the innermost match those read after, the innermost value
6393: * is consistent with the outer values. If not, it may not
6394: * be and we must retry. Typically this loop runs only once;
6395: * occasionally it runs twice, and only rarely does it run longer.
6396: */
1.30 deraadt 6397: #if defined(SUN4)
1.29 deraadt 6398: ENTRY(lo_microtime)
1.30 deraadt 6399: #else
6400: ENTRY(microtime)
6401: #endif
1.111 pk 6402: sethi %hi(_C_LABEL(time)), %g2
1.68 mycroft 6403:
6404: #if defined(SUN4M) && !(defined(SUN4C) || defined(SUN4))
6405: sethi %hi(TIMERREG_VA+4), %g3
6406: or %g3, %lo(TIMERREG_VA+4), %g3
6407: #elif (defined(SUN4C) || defined(SUN4)) && !defined(SUN4M)
6408: sethi %hi(TIMERREG_VA), %g3
6409: or %g3, %lo(TIMERREG_VA), %g3
6410: #else
1.1 deraadt 6411: sethi %hi(TIMERREG_VA), %g3
1.62 pk 6412: or %g3, %lo(TIMERREG_VA), %g3
1.68 mycroft 6413: NOP_ON_4_4C_1:
1.62 pk 6414: add %g3, 4, %g3
1.68 mycroft 6415: #endif
1.62 pk 6416:
1.69 mycroft 6417: 2:
1.111 pk 6418: ldd [%g2+%lo(_C_LABEL(time))], %o2 ! time.tv_sec & time.tv_usec
1.62 pk 6419: ld [%g3], %o4 ! usec counter
1.111 pk 6420: ldd [%g2+%lo(_C_LABEL(time))], %g4 ! see if time values changed
1.1 deraadt 6421: cmp %g4, %o2
1.52 pk 6422: bne 2b ! if time.tv_sec changed
1.1 deraadt 6423: cmp %g5, %o3
1.52 pk 6424: bne 2b ! if time.tv_usec changed
1.1 deraadt 6425: tst %o4
6426:
1.52 pk 6427: bpos 3f ! reached limit?
1.1 deraadt 6428: srl %o4, TMR_SHIFT, %o4 ! convert counter to usec
1.111 pk 6429: sethi %hi(_C_LABEL(tick)), %g4 ! bump usec by 1 tick
6430: ld [%g4+%lo(_C_LABEL(tick))], %o1
1.1 deraadt 6431: set TMR_MASK, %g5
6432: add %o1, %o3, %o3
6433: and %o4, %g5, %o4
1.52 pk 6434: 3:
1.1 deraadt 6435: add %o4, %o3, %o3
6436: set 1000000, %g5 ! normalize usec value
6437: cmp %o3, %g5
1.52 pk 6438: bl,a 4f
1.155 uwe 6439: st %o2, [%o0]
1.1 deraadt 6440: add %o2, 1, %o2 ! overflow
6441: sub %o3, %g5, %o3
1.155 uwe 6442: st %o2, [%o0]
1.52 pk 6443: 4:
1.1 deraadt 6444: retl
6445: st %o3, [%o0+4]
1.149 uwe 6446:
6447: #else /* MSIIEP */
6448: /* XXX: uwe: can be merged with 4c/4m version above */
6449: /*
6450: * ms-IIep version of
6451: * void microtime(struct timeval *tv)
6452: *
6453: * This is similar to 4c/4m microtime. The difference is that
1.197 wiz 6454: * counter uses 31 bits and ticks every 4 CPU cycles (CPU is @100MHz)
1.149 uwe 6455: * the magic to divide by 25 is stolen from gcc
6456: */
6457: ENTRY(microtime)
6458: sethi %hi(_C_LABEL(time)), %g2
6459:
6460: sethi %hi(MSIIEP_PCIC_VA), %g3
6461: or %g3, PCIC_SCCR_REG, %g3
6462:
6463: 2:
6464: ldd [%g2+%lo(_C_LABEL(time))], %o2 ! time.tv_sec & time.tv_usec
6465: ld [%g3], %o4 ! system (timer) counter
6466: ldd [%g2+%lo(_C_LABEL(time))], %g4 ! see if time values changed
6467: cmp %g4, %o2
6468: bne 2b ! if time.tv_sec changed
6469: cmp %g5, %o3
6470: bne 2b ! if time.tv_usec changed
6471: tst %o4
6472: !! %o2 - time.tv_sec; %o3 - time.tv_usec; %o4 - timer counter
6473:
6474: !!! BEGIN ms-IIep specific code
6475: bpos 3f ! if limit not reached yet
6476: clr %g4 ! then use timer as is
6477:
1.155 uwe 6478: set 0x80000000, %g5
1.149 uwe 6479: sethi %hi(_C_LABEL(tick)), %g4
1.155 uwe 6480: bclr %g5, %o4 ! cleat limit reached flag
1.149 uwe 6481: ld [%g4+%lo(_C_LABEL(tick))], %g4
6482:
6483: !! %g4 - either 0 or tick (if timer has hit the limit)
6484: 3:
6485: inc -1, %o4 ! timer is 1-based, adjust
1.155 uwe 6486: !! divide by 25 magic stolen from a gcc output
6487: set 1374389535, %g5
1.149 uwe 6488: umul %o4, %g5, %g0
6489: rd %y, %o4
6490: srl %o4, 3, %o4
6491: add %o4, %g4, %o4 ! may be bump usec by tick
6492: !!! END ms-IIep specific code
6493:
6494: add %o3, %o4, %o3 ! add timer to time.tv_usec
6495: set 1000000, %g5 ! normalize usec value
6496: cmp %o3, %g5
1.156 uwe 6497: bl,a 4f
6498: st %o2, [%o0]
1.149 uwe 6499: inc %o2 ! overflow into tv_sec
6500: sub %o3, %g5, %o3
1.155 uwe 6501: st %o2, [%o0]
1.156 uwe 6502: 4: retl
1.155 uwe 6503: st %o3, [%o0 + 4]
1.149 uwe 6504: #endif /* MSIIEP */
1.1 deraadt 6505:
1.54 pk 6506: /*
6507: * delay function
6508: *
6509: * void delay(N) -- delay N microseconds
6510: *
6511: * Register usage: %o0 = "N" number of usecs to go (counts down to zero)
6512: * %o1 = "timerblurb" (stays constant)
6513: * %o2 = counter for 1 usec (counts down from %o1 to zero)
6514: *
6515: */
6516:
6517: ENTRY(delay) ! %o0 = n
1.57 pk 6518: subcc %o0, %g0, %g0
6519: be 2f
6520:
1.111 pk 6521: sethi %hi(_C_LABEL(timerblurb)), %o1
6522: ld [%o1 + %lo(_C_LABEL(timerblurb))], %o1 ! %o1 = timerblurb
1.53 pk 6523:
1.57 pk 6524: addcc %o1, %g0, %o2 ! %o2 = cntr (start @ %o1), clear CCs
1.54 pk 6525: ! first time through only
6526:
6527: ! delay 1 usec
6528: 1: bne 1b ! come back here if not done
6529: subcc %o2, 1, %o2 ! %o2 = %o2 - 1 [delay slot]
1.53 pk 6530:
1.54 pk 6531: subcc %o0, 1, %o0 ! %o0 = %o0 - 1
6532: bne 1b ! done yet?
6533: addcc %o1, %g0, %o2 ! reinit %o2 and CCs [delay slot]
6534: ! harmless if not branching
1.57 pk 6535: 2:
1.54 pk 6536: retl ! return
6537: nop ! [delay slot]
1.53 pk 6538:
1.60 pk 6539: #if defined(KGDB) || defined(DDB) || defined(DIAGNOSTIC)
1.1 deraadt 6540: /*
6541: * Write all windows (user or otherwise), except the current one.
6542: *
6543: * THIS COULD BE DONE IN USER CODE
6544: */
6545: ENTRY(write_all_windows)
6546: /*
6547: * g2 = g1 = nwindows - 1;
6548: * while (--g1 > 0) save();
6549: * while (--g2 > 0) restore();
6550: */
1.111 pk 6551: sethi %hi(_C_LABEL(nwindows)), %g1
6552: ld [%g1 + %lo(_C_LABEL(nwindows))], %g1
1.1 deraadt 6553: dec %g1
6554: mov %g1, %g2
6555:
6556: 1: deccc %g1
6557: bg,a 1b
6558: save %sp, -64, %sp
6559:
6560: 2: deccc %g2
6561: bg,a 2b
6562: restore
6563:
6564: retl
6565: nop
6566: #endif /* KGDB */
6567:
1.8 pk 6568: ENTRY(setjmp)
6569: std %sp, [%o0+0] ! stack pointer & return pc
6570: st %fp, [%o0+8] ! frame pointer
6571: retl
6572: clr %o0
6573:
6574: Lpanic_ljmp:
6575: .asciz "longjmp botch"
1.52 pk 6576: _ALIGN
1.8 pk 6577:
6578: ENTRY(longjmp)
6579: addcc %o1, %g0, %g6 ! compute v ? v : 1 in a global register
6580: be,a 0f
6581: mov 1, %g6
6582: 0:
6583: mov %o0, %g1 ! save a in another global register
6584: ld [%g1+8], %g7 /* get caller's frame */
6585: 1:
6586: cmp %fp, %g7 ! compare against desired frame
6587: bl,a 1b ! if below,
6588: restore ! pop frame and loop
6589: be,a 2f ! if there,
6590: ldd [%g1+0], %o2 ! fetch return %sp and pc, and get out
6591:
6592: Llongjmpbotch:
6593: ! otherwise, went too far; bomb out
6594: save %sp, -CCFSZ, %sp /* preserve current window */
6595: sethi %hi(Lpanic_ljmp), %o0
1.111 pk 6596: call _C_LABEL(panic)
1.8 pk 6597: or %o0, %lo(Lpanic_ljmp), %o0;
6598: unimp 0
6599:
6600: 2:
6601: cmp %o2, %sp ! %sp must not decrease
6602: bge,a 3f
6603: mov %o2, %sp ! it is OK, put it in place
6604: b,a Llongjmpbotch
1.52 pk 6605: 3:
1.8 pk 6606: jmp %o3 + 8 ! success, return %g6
6607: mov %g6, %o0
6608:
1.1 deraadt 6609: .data
1.153 pk 6610: .globl _C_LABEL(kernel_top)
6611: _C_LABEL(kernel_top):
1.117 christos 6612: .word 0
6613: .globl _C_LABEL(bootinfo)
6614: _C_LABEL(bootinfo):
1.8 pk 6615: .word 0
1.1 deraadt 6616:
1.111 pk 6617: .globl _C_LABEL(proc0paddr)
6618: _C_LABEL(proc0paddr):
6619: .word _C_LABEL(u0) ! KVA of proc0 uarea
1.1 deraadt 6620:
6621: /* interrupt counters XXX THESE BELONG ELSEWHERE (if anywhere) */
1.111 pk 6622: .globl _C_LABEL(intrcnt), _C_LABEL(eintrcnt)
6623: .globl _C_LABEL(intrnames), _C_LABEL(eintrnames)
6624: _C_LABEL(intrnames):
1.1 deraadt 6625: .asciz "spur"
6626: .asciz "lev1"
6627: .asciz "lev2"
6628: .asciz "lev3"
6629: .asciz "lev4"
6630: .asciz "lev5"
6631: .asciz "lev6"
6632: .asciz "lev7"
6633: .asciz "lev8"
6634: .asciz "lev9"
6635: .asciz "clock"
6636: .asciz "lev11"
6637: .asciz "lev12"
6638: .asciz "lev13"
6639: .asciz "prof"
1.111 pk 6640: _C_LABEL(eintrnames):
1.52 pk 6641: _ALIGN
1.111 pk 6642: _C_LABEL(intrcnt):
1.1 deraadt 6643: .skip 4*15
1.111 pk 6644: _C_LABEL(eintrcnt):
1.1 deraadt 6645:
1.111 pk 6646: .comm _C_LABEL(nwindows), 4
6647: .comm _C_LABEL(romp), 4
CVSweb <webmaster@jp.NetBSD.org>