Annotation of src/sys/arch/sparc/sparc/locore.s, Revision 1.215
1.215 ! pk 1: /* $NetBSD: locore.s,v 1.214 2004/07/04 09:54:20 pk Exp $ */
1.70 mrg 2:
1.1 deraadt 3: /*
1.52 pk 4: * Copyright (c) 1996 Paul Kranenburg
5: * Copyright (c) 1996
1.55 abrown 6: * The President and Fellows of Harvard College. All rights reserved.
1.1 deraadt 7: * Copyright (c) 1992, 1993
8: * The Regents of the University of California. All rights reserved.
9: *
10: * This software was developed by the Computer Systems Engineering group
11: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
12: * contributed to Berkeley.
13: *
14: * All advertising materials mentioning features or use of this software
15: * must display the following acknowledgement:
16: * This product includes software developed by the University of
17: * California, Lawrence Berkeley Laboratory.
1.52 pk 18: * This product includes software developed by Harvard University.
1.1 deraadt 19: *
20: * Redistribution and use in source and binary forms, with or without
21: * modification, are permitted provided that the following conditions
22: * are met:
23: * 1. Redistributions of source code must retain the above copyright
24: * notice, this list of conditions and the following disclaimer.
25: * 2. Redistributions in binary form must reproduce the above copyright
26: * notice, this list of conditions and the following disclaimer in the
27: * documentation and/or other materials provided with the distribution.
28: * 3. All advertising materials mentioning features or use of this software
29: * must display the following acknowledgement:
30: * This product includes software developed by the University of
31: * California, Berkeley and its contributors.
1.52 pk 32: * This product includes software developed by Harvard University.
33: * This product includes software developed by Paul Kranenburg.
1.1 deraadt 34: * 4. Neither the name of the University nor the names of its contributors
35: * may be used to endorse or promote products derived from this software
36: * without specific prior written permission.
37: *
38: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48: * SUCH DAMAGE.
49: *
1.10 deraadt 50: * @(#)locore.s 8.4 (Berkeley) 12/10/93
1.1 deraadt 51: */
52:
1.85 jonathan 53: #include "opt_ddb.h"
1.140 pk 54: #include "opt_kgdb.h"
1.84 thorpej 55: #include "opt_compat_svr4.h"
1.116 christos 56: #include "opt_compat_sunos.h"
1.97 pk 57: #include "opt_multiprocessor.h"
1.134 pk 58: #include "opt_lockdebug.h"
1.80 mrg 59:
1.47 mycroft 60: #include "assym.h"
1.52 pk 61: #include <machine/param.h>
1.111 pk 62: #include <machine/asm.h>
1.1 deraadt 63: #include <sparc/sparc/intreg.h>
64: #include <sparc/sparc/timerreg.h>
1.52 pk 65: #include <sparc/sparc/vaddrs.h>
1.1 deraadt 66: #ifdef notyet
67: #include <sparc/dev/zsreg.h>
68: #endif
69: #include <machine/ctlreg.h>
1.173 pk 70: #include <machine/intr.h>
1.1 deraadt 71: #include <machine/psl.h>
72: #include <machine/signal.h>
73: #include <machine/trap.h>
1.92 pk 74: #include <sys/syscall.h>
1.1 deraadt 75:
76: /*
77: * GNU assembler does not understand `.empty' directive; Sun assembler
78: * gripes about labels without it. To allow cross-compilation using
79: * the Sun assembler, and because .empty directives are useful documentation,
80: * we use this trick.
81: */
82: #ifdef SUN_AS
83: #define EMPTY .empty
84: #else
85: #define EMPTY /* .empty */
86: #endif
87:
88: /* use as needed to align things on longword boundaries */
1.52 pk 89: #define _ALIGN .align 4
1.1 deraadt 90:
91: /*
92: * CCFSZ (C Compiler Frame SiZe) is the size of a stack frame required if
93: * a function is to call C code. It should be just 64, but Sun defined
94: * their frame with space to hold arguments 0 through 5 (plus some junk),
1.63 pk 95: * and varargs routines (such as printf) demand this, and gcc uses this
1.1 deraadt 96: * area at times anyway.
97: */
98: #define CCFSZ 96
99:
1.195 pk 100: /* We rely on the fact that %lo(CPUINFO_VA) is zero */
101: .if CPUINFO_VA & 0x1fff
102: BARF
103: .endif
104:
1.1 deraadt 105: /*
106: * A handy macro for maintaining instrumentation counters.
107: * Note that this clobbers %o0 and %o1. Normal usage is
108: * something like:
109: * foointr:
110: * TRAP_SETUP(...) ! makes %o registers safe
1.111 pk 111: * INCR(cnt+V_FOO) ! count a foo
1.1 deraadt 112: */
113: #define INCR(what) \
114: sethi %hi(what), %o0; \
115: ld [%o0 + %lo(what)], %o1; \
116: inc %o1; \
117: st %o1, [%o0 + %lo(what)]
118:
119: /*
120: * Another handy macro: load one register window, given `base' address.
121: * This can be either a simple register (e.g., %sp) or include an initial
122: * offset (e.g., %g6 + PCB_RW).
123: */
124: #define LOADWIN(addr) \
125: ldd [addr], %l0; \
126: ldd [addr + 8], %l2; \
127: ldd [addr + 16], %l4; \
128: ldd [addr + 24], %l6; \
129: ldd [addr + 32], %i0; \
130: ldd [addr + 40], %i2; \
131: ldd [addr + 48], %i4; \
132: ldd [addr + 56], %i6
133:
134: /*
135: * To return from trap we need the two-instruction sequence
136: * `jmp %l1; rett %l2', which is defined here for convenience.
137: */
138: #define RETT jmp %l1; rett %l2
139:
140: .data
141: /*
142: * The interrupt stack.
143: *
144: * This is the very first thing in the data segment, and therefore has
145: * the lowest kernel stack address. We count on this in the interrupt
146: * trap-frame setup code, since we may need to switch from the kernel
147: * stack to the interrupt stack (iff we are not already on the interrupt
148: * stack). One sethi+cmp is all we need since this is so carefully
149: * arranged.
1.98 pk 150: *
151: * In SMP kernels, each CPU has its own interrupt stack and the computation
152: * to determine whether we're already on the interrupt stack is slightly
153: * more time consuming (see INTR_SETUP() below).
1.1 deraadt 154: */
1.111 pk 155: .globl _C_LABEL(intstack)
156: .globl _C_LABEL(eintstack)
157: _C_LABEL(intstack):
1.98 pk 158: .skip INT_STACK_SIZE ! 16k = 128 128-byte stack frames
1.111 pk 159: _C_LABEL(eintstack):
1.1 deraadt 160:
1.101 pk 161: _EINTSTACKP = CPUINFO_VA + CPUINFO_EINTSTACK
162:
1.1 deraadt 163: /*
1.131 thorpej 164: * CPUINFO_VA is a CPU-local virtual address; cpi->ci_self is a global
165: * virtual address for the same structure. It must be stored in p->p_cpu
166: * upon context switch.
167: */
1.179 pk 168: _CISELFP = CPUINFO_VA + CPUINFO_SELF
169: _CIFLAGS = CPUINFO_VA + CPUINFO_FLAGS
170:
1.197 wiz 171: /* Per-CPU AST and reschedule requests */
1.179 pk 172: _WANT_AST = CPUINFO_VA + CPUINFO_WANT_AST
173: _WANT_RESCHED = CPUINFO_VA + CPUINFO_WANT_RESCHED
1.131 thorpej 174:
175: /*
1.1 deraadt 176: * When a process exits and its u. area goes away, we set cpcb to point
177: * to this `u.', leaving us with something to use for an interrupt stack,
178: * and letting all the register save code have a pcb_uw to examine.
179: * This is also carefully arranged (to come just before u0, so that
180: * process 0's kernel stack can quietly overrun into it during bootup, if
181: * we feel like doing that).
182: */
1.111 pk 183: .globl _C_LABEL(idle_u)
184: _C_LABEL(idle_u):
1.13 deraadt 185: .skip USPACE
1.99 pk 186: /*
187: * On SMP kernels, there's an idle u-area for each CPU and we must
188: * read its location from cpuinfo.
189: */
1.111 pk 190: IDLE_UP = CPUINFO_VA + CPUINFO_IDLE_U
1.1 deraadt 191:
192: /*
193: * Process 0's u.
194: *
195: * This must be aligned on an 8 byte boundary.
196: */
1.111 pk 197: .globl _C_LABEL(u0)
198: _C_LABEL(u0): .skip USPACE
1.1 deraadt 199: estack0:
200:
201: #ifdef KGDB
202: /*
203: * Another item that must be aligned, easiest to put it here.
204: */
205: KGDB_STACK_SIZE = 2048
1.111 pk 206: .globl _C_LABEL(kgdb_stack)
207: _C_LABEL(kgdb_stack):
1.1 deraadt 208: .skip KGDB_STACK_SIZE ! hope this is enough
209: #endif
210:
211: /*
1.111 pk 212: * cpcb points to the current pcb (and hence u. area).
1.1 deraadt 213: * Initially this is the special one.
214: */
1.111 pk 215: cpcb = CPUINFO_VA + CPUINFO_CURPCB
1.1 deraadt 216:
1.185 thorpej 217: /* curlwp points to the current LWP that has the CPU */
218: curlwp = CPUINFO_VA + CPUINFO_CURLWP
1.104 pk 219:
1.52 pk 220: /*
1.197 wiz 221: * cputyp is the current CPU type, used to distinguish between
1.13 deraadt 222: * the many variations of different sun4* machines. It contains
223: * the value CPU_SUN4, CPU_SUN4C, or CPU_SUN4M.
1.9 deraadt 224: */
1.111 pk 225: .globl _C_LABEL(cputyp)
226: _C_LABEL(cputyp):
1.9 deraadt 227: .word 1
1.52 pk 228:
1.18 deraadt 229: #if defined(SUN4C) || defined(SUN4M)
1.111 pk 230: cputypval:
1.18 deraadt 231: .asciz "sun4c"
232: .ascii " "
1.111 pk 233: cputypvar:
1.37 pk 234: .asciz "compatible"
1.52 pk 235: _ALIGN
1.18 deraadt 236: #endif
237:
1.13 deraadt 238: /*
239: * There variables are pointed to by the cpp symbols PGSHIFT, NBPG,
240: * and PGOFSET.
241: */
1.111 pk 242: .globl _C_LABEL(pgshift), _C_LABEL(nbpg), _C_LABEL(pgofset)
243: _C_LABEL(pgshift):
1.52 pk 244: .word 0
1.111 pk 245: _C_LABEL(nbpg):
1.52 pk 246: .word 0
1.111 pk 247: _C_LABEL(pgofset):
1.52 pk 248: .word 0
249:
1.111 pk 250: .globl _C_LABEL(trapbase)
251: _C_LABEL(trapbase):
1.52 pk 252: .word 0
1.9 deraadt 253:
1.75 pk 254: #if 0
1.9 deraadt 255: #if defined(SUN4M)
256: _mapme:
257: .asciz "0 0 f8000000 15c6a0 map-pages"
258: #endif
1.75 pk 259: #endif
1.9 deraadt 260:
1.158 thorpej 261: #if !defined(SUN4D)
262: sun4d_notsup:
263: .asciz "cr .( NetBSD/sparc: this kernel does not support the sun4d) cr"
264: #endif
1.9 deraadt 265: #if !defined(SUN4M)
266: sun4m_notsup:
1.20 deraadt 267: .asciz "cr .( NetBSD/sparc: this kernel does not support the sun4m) cr"
1.9 deraadt 268: #endif
1.13 deraadt 269: #if !defined(SUN4C)
1.9 deraadt 270: sun4c_notsup:
1.20 deraadt 271: .asciz "cr .( NetBSD/sparc: this kernel does not support the sun4c) cr"
1.13 deraadt 272: #endif
273: #if !defined(SUN4)
274: sun4_notsup:
1.20 deraadt 275: ! the extra characters at the end are to ensure the zs fifo drains
276: ! before we halt. Sick, eh?
277: .asciz "NetBSD/sparc: this kernel does not support the sun4\n\r \b"
1.9 deraadt 278: #endif
1.52 pk 279: _ALIGN
1.9 deraadt 280:
1.1 deraadt 281: .text
282:
283: /*
1.26 deraadt 284: * The first thing in the real text segment is the trap vector table,
285: * which must be aligned on a 4096 byte boundary. The text segment
286: * starts beyond page 0 of KERNBASE so that there is a red zone
287: * between user and kernel space. Since the boot ROM loads us at
1.119 christos 288: * PROM_LOADADDR, it is far easier to start at KERNBASE+PROM_LOADADDR than to
1.26 deraadt 289: * buck the trend. This is two or four pages in (depending on if
290: * pagesize is 8192 or 4096). We place two items in this area:
1.75 pk 291: * the message buffer (phys addr 0) and the cpu_softc structure for
292: * the first processor in the system (phys addr 0x2000).
293: * Because the message buffer is in our "red zone" between user and
1.26 deraadt 294: * kernel space we remap it in configure() to another location and
295: * invalidate the mapping at KERNBASE.
296: */
297:
1.1 deraadt 298: /*
299: * Each trap has room for four instructions, of which one perforce must
300: * be a branch. On entry the hardware has copied pc and npc to %l1 and
301: * %l2 respectively. We use two more to read the psr into %l0, and to
302: * put the trap type value into %l3 (with a few exceptions below).
303: * We could read the trap type field of %tbr later in the code instead,
304: * but there is no need, and that would require more instructions
305: * (read+mask, vs 1 `mov' here).
306: *
307: * I used to generate these numbers by address arithmetic, but gas's
308: * expression evaluator has about as much sense as your average slug
309: * (oddly enough, the code looks about as slimy too). Thus, all the
310: * trap numbers are given as arguments to the trap macros. This means
311: * there is one line per trap. Sigh.
312: *
313: * Note that only the local registers may be used, since the trap
314: * window is potentially the last window. Its `in' registers are
315: * the previous window's outs (as usual), but more important, its
316: * `out' registers may be in use as the `topmost' window's `in' registers.
317: * The global registers are of course verboten (well, until we save
318: * them away).
319: *
320: * Hardware interrupt vectors can be `linked'---the linkage is to regular
321: * C code---or rewired to fast in-window handlers. The latter are good
322: * for unbuffered hardware like the Zilog serial chip and the AMD audio
323: * chip, where many interrupts can be handled trivially with pseudo-DMA or
324: * similar. Only one `fast' interrupt can be used per level, however, and
325: * direct and `fast' interrupts are incompatible. Routines in intr.c
326: * handle setting these, with optional paranoia.
327: */
328:
329: /* regular vectored traps */
330: #define VTRAP(type, label) \
331: mov (type), %l3; b label; mov %psr, %l0; nop
332:
333: /* hardware interrupts (can be linked or made `fast') */
1.52 pk 334: #define HARDINT44C(lev) \
1.111 pk 335: mov (lev), %l3; b _C_LABEL(sparc_interrupt44c); mov %psr, %l0; nop
1.52 pk 336:
337: /* hardware interrupts (can be linked or made `fast') */
338: #define HARDINT4M(lev) \
1.111 pk 339: mov (lev), %l3; b _C_LABEL(sparc_interrupt4m); mov %psr, %l0; nop
1.1 deraadt 340:
341: /* software interrupts (may not be made direct, sorry---but you
342: should not be using them trivially anyway) */
1.52 pk 343: #define SOFTINT44C(lev, bit) \
344: mov (lev), %l3; mov (bit), %l4; b softintr_sun44c; mov %psr, %l0
345:
346: /* There's no SOFTINT4M(): both hard and soft vector the same way */
1.1 deraadt 347:
348: /* traps that just call trap() */
349: #define TRAP(type) VTRAP(type, slowtrap)
350:
351: /* architecturally undefined traps (cause panic) */
352: #define UTRAP(type) VTRAP(type, slowtrap)
353:
354: /* software undefined traps (may be replaced) */
355: #define STRAP(type) VTRAP(type, slowtrap)
356:
357: /* breakpoint acts differently under kgdb */
358: #ifdef KGDB
359: #define BPT VTRAP(T_BREAKPOINT, bpt)
1.52 pk 360: #define BPT_KGDB_EXEC VTRAP(T_KGDB_EXEC, bpt)
361: #else
362: #define BPT TRAP(T_BREAKPOINT)
363: #define BPT_KGDB_EXEC TRAP(T_KGDB_EXEC)
364: #endif
365:
366: /* special high-speed 1-instruction-shaved-off traps (get nothing in %l3) */
1.122 christos 367: #define SYSCALL b _C_LABEL(_syscall); mov %psr, %l0; nop; nop
1.52 pk 368: #define WINDOW_OF b window_of; mov %psr, %l0; nop; nop
369: #define WINDOW_UF b window_uf; mov %psr, %l0; nop; nop
370: #ifdef notyet
371: #define ZS_INTERRUPT b zshard; mov %psr, %l0; nop; nop
372: #else
373: #define ZS_INTERRUPT44C HARDINT44C(12)
374: #define ZS_INTERRUPT4M HARDINT4M(12)
375: #endif
376:
1.173 pk 377: #ifdef DEBUG
378: #define TRAP_TRACE(tt, tmp) \
379: sethi %hi(CPUINFO_VA + CPUINFO_TT), tmp; \
380: st tt, [tmp + %lo(CPUINFO_VA + CPUINFO_TT)];
381: #define TRAP_TRACE2(tt, tmp1, tmp2) \
382: mov tt, tmp1; \
383: TRAP_TRACE(tmp1, tmp2)
384: #else /* DEBUG */
385: #define TRAP_TRACE(tt,tmp) /**/
386: #define TRAP_TRACE2(tt,tmp1,tmp2) /**/
387: #endif /* DEBUG */
388:
1.111 pk 389: .globl _ASM_LABEL(start), _C_LABEL(kernel_text)
390: _C_LABEL(kernel_text) = start ! for kvm_mkdb(8)
391: _ASM_LABEL(start):
1.52 pk 392: /*
393: * Put sun4 traptable first, since it needs the most stringent aligment (8192)
394: */
395: #if defined(SUN4)
396: trapbase_sun4:
397: /* trap 0 is special since we cannot receive it */
398: b dostart; nop; nop; nop ! 00 = reset (fake)
399: VTRAP(T_TEXTFAULT, memfault_sun4) ! 01 = instr. fetch fault
400: TRAP(T_ILLINST) ! 02 = illegal instruction
401: TRAP(T_PRIVINST) ! 03 = privileged instruction
402: TRAP(T_FPDISABLED) ! 04 = fp instr, but EF bit off in psr
403: WINDOW_OF ! 05 = window overflow
404: WINDOW_UF ! 06 = window underflow
405: TRAP(T_ALIGN) ! 07 = address alignment error
406: VTRAP(T_FPE, fp_exception) ! 08 = fp exception
407: VTRAP(T_DATAFAULT, memfault_sun4) ! 09 = data fetch fault
408: TRAP(T_TAGOF) ! 0a = tag overflow
409: UTRAP(0x0b)
410: UTRAP(0x0c)
411: UTRAP(0x0d)
412: UTRAP(0x0e)
413: UTRAP(0x0f)
414: UTRAP(0x10)
415: SOFTINT44C(1, IE_L1) ! 11 = level 1 interrupt
416: HARDINT44C(2) ! 12 = level 2 interrupt
417: HARDINT44C(3) ! 13 = level 3 interrupt
418: SOFTINT44C(4, IE_L4) ! 14 = level 4 interrupt
419: HARDINT44C(5) ! 15 = level 5 interrupt
420: SOFTINT44C(6, IE_L6) ! 16 = level 6 interrupt
421: HARDINT44C(7) ! 17 = level 7 interrupt
422: HARDINT44C(8) ! 18 = level 8 interrupt
423: HARDINT44C(9) ! 19 = level 9 interrupt
424: HARDINT44C(10) ! 1a = level 10 interrupt
425: HARDINT44C(11) ! 1b = level 11 interrupt
426: ZS_INTERRUPT44C ! 1c = level 12 (zs) interrupt
427: HARDINT44C(13) ! 1d = level 13 interrupt
428: HARDINT44C(14) ! 1e = level 14 interrupt
429: VTRAP(15, nmi_sun4) ! 1f = nonmaskable interrupt
430: UTRAP(0x20)
431: UTRAP(0x21)
432: UTRAP(0x22)
433: UTRAP(0x23)
434: TRAP(T_CPDISABLED) ! 24 = coprocessor instr, EC bit off in psr
435: UTRAP(0x25)
436: UTRAP(0x26)
437: UTRAP(0x27)
438: TRAP(T_CPEXCEPTION) ! 28 = coprocessor exception
439: UTRAP(0x29)
440: UTRAP(0x2a)
441: UTRAP(0x2b)
442: UTRAP(0x2c)
443: UTRAP(0x2d)
444: UTRAP(0x2e)
445: UTRAP(0x2f)
446: UTRAP(0x30)
447: UTRAP(0x31)
448: UTRAP(0x32)
449: UTRAP(0x33)
450: UTRAP(0x34)
451: UTRAP(0x35)
452: UTRAP(0x36)
453: UTRAP(0x37)
454: UTRAP(0x38)
455: UTRAP(0x39)
456: UTRAP(0x3a)
457: UTRAP(0x3b)
458: UTRAP(0x3c)
459: UTRAP(0x3d)
460: UTRAP(0x3e)
461: UTRAP(0x3f)
462: UTRAP(0x40)
463: UTRAP(0x41)
464: UTRAP(0x42)
465: UTRAP(0x43)
466: UTRAP(0x44)
467: UTRAP(0x45)
468: UTRAP(0x46)
469: UTRAP(0x47)
470: UTRAP(0x48)
471: UTRAP(0x49)
472: UTRAP(0x4a)
473: UTRAP(0x4b)
474: UTRAP(0x4c)
475: UTRAP(0x4d)
476: UTRAP(0x4e)
477: UTRAP(0x4f)
478: UTRAP(0x50)
479: UTRAP(0x51)
480: UTRAP(0x52)
481: UTRAP(0x53)
482: UTRAP(0x54)
483: UTRAP(0x55)
484: UTRAP(0x56)
485: UTRAP(0x57)
486: UTRAP(0x58)
487: UTRAP(0x59)
488: UTRAP(0x5a)
489: UTRAP(0x5b)
490: UTRAP(0x5c)
491: UTRAP(0x5d)
492: UTRAP(0x5e)
493: UTRAP(0x5f)
494: UTRAP(0x60)
495: UTRAP(0x61)
496: UTRAP(0x62)
497: UTRAP(0x63)
498: UTRAP(0x64)
499: UTRAP(0x65)
500: UTRAP(0x66)
501: UTRAP(0x67)
502: UTRAP(0x68)
503: UTRAP(0x69)
504: UTRAP(0x6a)
505: UTRAP(0x6b)
506: UTRAP(0x6c)
507: UTRAP(0x6d)
508: UTRAP(0x6e)
509: UTRAP(0x6f)
510: UTRAP(0x70)
511: UTRAP(0x71)
512: UTRAP(0x72)
513: UTRAP(0x73)
514: UTRAP(0x74)
515: UTRAP(0x75)
516: UTRAP(0x76)
517: UTRAP(0x77)
518: UTRAP(0x78)
519: UTRAP(0x79)
520: UTRAP(0x7a)
521: UTRAP(0x7b)
522: UTRAP(0x7c)
523: UTRAP(0x7d)
524: UTRAP(0x7e)
525: UTRAP(0x7f)
526: SYSCALL ! 80 = sun syscall
527: BPT ! 81 = pseudo breakpoint instruction
528: TRAP(T_DIV0) ! 82 = divide by zero
529: TRAP(T_FLUSHWIN) ! 83 = flush windows
530: TRAP(T_CLEANWIN) ! 84 = provide clean windows
531: TRAP(T_RANGECHECK) ! 85 = ???
532: TRAP(T_FIXALIGN) ! 86 = fix up unaligned accesses
533: TRAP(T_INTOF) ! 87 = integer overflow
534: SYSCALL ! 88 = svr4 syscall
535: SYSCALL ! 89 = bsd syscall
536: BPT_KGDB_EXEC ! 8a = enter kernel gdb on kernel startup
537: STRAP(0x8b)
538: STRAP(0x8c)
539: STRAP(0x8d)
540: STRAP(0x8e)
541: STRAP(0x8f)
542: STRAP(0x90)
543: STRAP(0x91)
544: STRAP(0x92)
545: STRAP(0x93)
546: STRAP(0x94)
547: STRAP(0x95)
548: STRAP(0x96)
549: STRAP(0x97)
550: STRAP(0x98)
551: STRAP(0x99)
552: STRAP(0x9a)
553: STRAP(0x9b)
554: STRAP(0x9c)
555: STRAP(0x9d)
556: STRAP(0x9e)
557: STRAP(0x9f)
558: STRAP(0xa0)
559: STRAP(0xa1)
560: STRAP(0xa2)
561: STRAP(0xa3)
562: STRAP(0xa4)
563: STRAP(0xa5)
564: STRAP(0xa6)
565: STRAP(0xa7)
566: STRAP(0xa8)
567: STRAP(0xa9)
568: STRAP(0xaa)
569: STRAP(0xab)
570: STRAP(0xac)
571: STRAP(0xad)
572: STRAP(0xae)
573: STRAP(0xaf)
574: STRAP(0xb0)
575: STRAP(0xb1)
576: STRAP(0xb2)
577: STRAP(0xb3)
578: STRAP(0xb4)
579: STRAP(0xb5)
580: STRAP(0xb6)
581: STRAP(0xb7)
582: STRAP(0xb8)
583: STRAP(0xb9)
584: STRAP(0xba)
585: STRAP(0xbb)
586: STRAP(0xbc)
587: STRAP(0xbd)
588: STRAP(0xbe)
589: STRAP(0xbf)
590: STRAP(0xc0)
591: STRAP(0xc1)
592: STRAP(0xc2)
593: STRAP(0xc3)
594: STRAP(0xc4)
595: STRAP(0xc5)
596: STRAP(0xc6)
597: STRAP(0xc7)
598: STRAP(0xc8)
599: STRAP(0xc9)
600: STRAP(0xca)
601: STRAP(0xcb)
602: STRAP(0xcc)
603: STRAP(0xcd)
604: STRAP(0xce)
605: STRAP(0xcf)
606: STRAP(0xd0)
607: STRAP(0xd1)
608: STRAP(0xd2)
609: STRAP(0xd3)
610: STRAP(0xd4)
611: STRAP(0xd5)
612: STRAP(0xd6)
613: STRAP(0xd7)
614: STRAP(0xd8)
615: STRAP(0xd9)
616: STRAP(0xda)
617: STRAP(0xdb)
618: STRAP(0xdc)
619: STRAP(0xdd)
620: STRAP(0xde)
621: STRAP(0xdf)
622: STRAP(0xe0)
623: STRAP(0xe1)
624: STRAP(0xe2)
625: STRAP(0xe3)
626: STRAP(0xe4)
627: STRAP(0xe5)
628: STRAP(0xe6)
629: STRAP(0xe7)
630: STRAP(0xe8)
631: STRAP(0xe9)
632: STRAP(0xea)
633: STRAP(0xeb)
634: STRAP(0xec)
635: STRAP(0xed)
636: STRAP(0xee)
637: STRAP(0xef)
638: STRAP(0xf0)
639: STRAP(0xf1)
640: STRAP(0xf2)
641: STRAP(0xf3)
642: STRAP(0xf4)
643: STRAP(0xf5)
644: STRAP(0xf6)
645: STRAP(0xf7)
646: STRAP(0xf8)
647: STRAP(0xf9)
648: STRAP(0xfa)
649: STRAP(0xfb)
650: STRAP(0xfc)
651: STRAP(0xfd)
652: STRAP(0xfe)
653: STRAP(0xff)
654: #endif
655:
656: #if defined(SUN4C)
657: trapbase_sun4c:
658: /* trap 0 is special since we cannot receive it */
659: b dostart; nop; nop; nop ! 00 = reset (fake)
660: VTRAP(T_TEXTFAULT, memfault_sun4c) ! 01 = instr. fetch fault
661: TRAP(T_ILLINST) ! 02 = illegal instruction
662: TRAP(T_PRIVINST) ! 03 = privileged instruction
663: TRAP(T_FPDISABLED) ! 04 = fp instr, but EF bit off in psr
664: WINDOW_OF ! 05 = window overflow
665: WINDOW_UF ! 06 = window underflow
666: TRAP(T_ALIGN) ! 07 = address alignment error
667: VTRAP(T_FPE, fp_exception) ! 08 = fp exception
668: VTRAP(T_DATAFAULT, memfault_sun4c) ! 09 = data fetch fault
669: TRAP(T_TAGOF) ! 0a = tag overflow
670: UTRAP(0x0b)
671: UTRAP(0x0c)
672: UTRAP(0x0d)
673: UTRAP(0x0e)
674: UTRAP(0x0f)
675: UTRAP(0x10)
676: SOFTINT44C(1, IE_L1) ! 11 = level 1 interrupt
677: HARDINT44C(2) ! 12 = level 2 interrupt
678: HARDINT44C(3) ! 13 = level 3 interrupt
679: SOFTINT44C(4, IE_L4) ! 14 = level 4 interrupt
680: HARDINT44C(5) ! 15 = level 5 interrupt
681: SOFTINT44C(6, IE_L6) ! 16 = level 6 interrupt
682: HARDINT44C(7) ! 17 = level 7 interrupt
683: HARDINT44C(8) ! 18 = level 8 interrupt
684: HARDINT44C(9) ! 19 = level 9 interrupt
685: HARDINT44C(10) ! 1a = level 10 interrupt
686: HARDINT44C(11) ! 1b = level 11 interrupt
687: ZS_INTERRUPT44C ! 1c = level 12 (zs) interrupt
688: HARDINT44C(13) ! 1d = level 13 interrupt
689: HARDINT44C(14) ! 1e = level 14 interrupt
690: VTRAP(15, nmi_sun4c) ! 1f = nonmaskable interrupt
691: UTRAP(0x20)
692: UTRAP(0x21)
693: UTRAP(0x22)
694: UTRAP(0x23)
695: TRAP(T_CPDISABLED) ! 24 = coprocessor instr, EC bit off in psr
696: UTRAP(0x25)
697: UTRAP(0x26)
698: UTRAP(0x27)
699: TRAP(T_CPEXCEPTION) ! 28 = coprocessor exception
700: UTRAP(0x29)
701: UTRAP(0x2a)
702: UTRAP(0x2b)
703: UTRAP(0x2c)
704: UTRAP(0x2d)
705: UTRAP(0x2e)
706: UTRAP(0x2f)
707: UTRAP(0x30)
708: UTRAP(0x31)
709: UTRAP(0x32)
710: UTRAP(0x33)
711: UTRAP(0x34)
712: UTRAP(0x35)
713: UTRAP(0x36)
714: UTRAP(0x37)
715: UTRAP(0x38)
716: UTRAP(0x39)
717: UTRAP(0x3a)
718: UTRAP(0x3b)
719: UTRAP(0x3c)
720: UTRAP(0x3d)
721: UTRAP(0x3e)
722: UTRAP(0x3f)
723: UTRAP(0x40)
724: UTRAP(0x41)
725: UTRAP(0x42)
726: UTRAP(0x43)
727: UTRAP(0x44)
728: UTRAP(0x45)
729: UTRAP(0x46)
730: UTRAP(0x47)
731: UTRAP(0x48)
732: UTRAP(0x49)
733: UTRAP(0x4a)
734: UTRAP(0x4b)
735: UTRAP(0x4c)
736: UTRAP(0x4d)
737: UTRAP(0x4e)
738: UTRAP(0x4f)
739: UTRAP(0x50)
740: UTRAP(0x51)
741: UTRAP(0x52)
742: UTRAP(0x53)
743: UTRAP(0x54)
744: UTRAP(0x55)
745: UTRAP(0x56)
746: UTRAP(0x57)
747: UTRAP(0x58)
748: UTRAP(0x59)
749: UTRAP(0x5a)
750: UTRAP(0x5b)
751: UTRAP(0x5c)
752: UTRAP(0x5d)
753: UTRAP(0x5e)
754: UTRAP(0x5f)
755: UTRAP(0x60)
756: UTRAP(0x61)
757: UTRAP(0x62)
758: UTRAP(0x63)
759: UTRAP(0x64)
760: UTRAP(0x65)
761: UTRAP(0x66)
762: UTRAP(0x67)
763: UTRAP(0x68)
764: UTRAP(0x69)
765: UTRAP(0x6a)
766: UTRAP(0x6b)
767: UTRAP(0x6c)
768: UTRAP(0x6d)
769: UTRAP(0x6e)
770: UTRAP(0x6f)
771: UTRAP(0x70)
772: UTRAP(0x71)
773: UTRAP(0x72)
774: UTRAP(0x73)
775: UTRAP(0x74)
776: UTRAP(0x75)
777: UTRAP(0x76)
778: UTRAP(0x77)
779: UTRAP(0x78)
780: UTRAP(0x79)
781: UTRAP(0x7a)
782: UTRAP(0x7b)
783: UTRAP(0x7c)
784: UTRAP(0x7d)
785: UTRAP(0x7e)
786: UTRAP(0x7f)
787: SYSCALL ! 80 = sun syscall
788: BPT ! 81 = pseudo breakpoint instruction
789: TRAP(T_DIV0) ! 82 = divide by zero
790: TRAP(T_FLUSHWIN) ! 83 = flush windows
791: TRAP(T_CLEANWIN) ! 84 = provide clean windows
792: TRAP(T_RANGECHECK) ! 85 = ???
793: TRAP(T_FIXALIGN) ! 86 = fix up unaligned accesses
794: TRAP(T_INTOF) ! 87 = integer overflow
795: SYSCALL ! 88 = svr4 syscall
796: SYSCALL ! 89 = bsd syscall
797: BPT_KGDB_EXEC ! 8a = enter kernel gdb on kernel startup
798: STRAP(0x8b)
799: STRAP(0x8c)
800: STRAP(0x8d)
801: STRAP(0x8e)
802: STRAP(0x8f)
803: STRAP(0x90)
804: STRAP(0x91)
805: STRAP(0x92)
806: STRAP(0x93)
807: STRAP(0x94)
808: STRAP(0x95)
809: STRAP(0x96)
810: STRAP(0x97)
811: STRAP(0x98)
812: STRAP(0x99)
813: STRAP(0x9a)
814: STRAP(0x9b)
815: STRAP(0x9c)
816: STRAP(0x9d)
817: STRAP(0x9e)
818: STRAP(0x9f)
819: STRAP(0xa0)
820: STRAP(0xa1)
821: STRAP(0xa2)
822: STRAP(0xa3)
823: STRAP(0xa4)
824: STRAP(0xa5)
825: STRAP(0xa6)
826: STRAP(0xa7)
827: STRAP(0xa8)
828: STRAP(0xa9)
829: STRAP(0xaa)
830: STRAP(0xab)
831: STRAP(0xac)
832: STRAP(0xad)
833: STRAP(0xae)
834: STRAP(0xaf)
835: STRAP(0xb0)
836: STRAP(0xb1)
837: STRAP(0xb2)
838: STRAP(0xb3)
839: STRAP(0xb4)
840: STRAP(0xb5)
841: STRAP(0xb6)
842: STRAP(0xb7)
843: STRAP(0xb8)
844: STRAP(0xb9)
845: STRAP(0xba)
846: STRAP(0xbb)
847: STRAP(0xbc)
848: STRAP(0xbd)
849: STRAP(0xbe)
850: STRAP(0xbf)
851: STRAP(0xc0)
852: STRAP(0xc1)
853: STRAP(0xc2)
854: STRAP(0xc3)
855: STRAP(0xc4)
856: STRAP(0xc5)
857: STRAP(0xc6)
858: STRAP(0xc7)
859: STRAP(0xc8)
860: STRAP(0xc9)
861: STRAP(0xca)
862: STRAP(0xcb)
863: STRAP(0xcc)
864: STRAP(0xcd)
865: STRAP(0xce)
866: STRAP(0xcf)
867: STRAP(0xd0)
868: STRAP(0xd1)
869: STRAP(0xd2)
870: STRAP(0xd3)
871: STRAP(0xd4)
872: STRAP(0xd5)
873: STRAP(0xd6)
874: STRAP(0xd7)
875: STRAP(0xd8)
876: STRAP(0xd9)
877: STRAP(0xda)
878: STRAP(0xdb)
879: STRAP(0xdc)
880: STRAP(0xdd)
881: STRAP(0xde)
882: STRAP(0xdf)
883: STRAP(0xe0)
884: STRAP(0xe1)
885: STRAP(0xe2)
886: STRAP(0xe3)
887: STRAP(0xe4)
888: STRAP(0xe5)
889: STRAP(0xe6)
890: STRAP(0xe7)
891: STRAP(0xe8)
892: STRAP(0xe9)
893: STRAP(0xea)
894: STRAP(0xeb)
895: STRAP(0xec)
896: STRAP(0xed)
897: STRAP(0xee)
898: STRAP(0xef)
899: STRAP(0xf0)
900: STRAP(0xf1)
901: STRAP(0xf2)
902: STRAP(0xf3)
903: STRAP(0xf4)
904: STRAP(0xf5)
905: STRAP(0xf6)
906: STRAP(0xf7)
907: STRAP(0xf8)
908: STRAP(0xf9)
909: STRAP(0xfa)
910: STRAP(0xfb)
911: STRAP(0xfc)
912: STRAP(0xfd)
913: STRAP(0xfe)
914: STRAP(0xff)
1.1 deraadt 915: #endif
916:
1.52 pk 917: #if defined(SUN4M)
918: trapbase_sun4m:
1.1 deraadt 919: /* trap 0 is special since we cannot receive it */
920: b dostart; nop; nop; nop ! 00 = reset (fake)
1.52 pk 921: VTRAP(T_TEXTFAULT, memfault_sun4m) ! 01 = instr. fetch fault
1.208 pk 922: VTRAP(T_ILLINST, illinst4m) ! 02 = illegal instruction
1.1 deraadt 923: TRAP(T_PRIVINST) ! 03 = privileged instruction
924: TRAP(T_FPDISABLED) ! 04 = fp instr, but EF bit off in psr
925: WINDOW_OF ! 05 = window overflow
926: WINDOW_UF ! 06 = window underflow
927: TRAP(T_ALIGN) ! 07 = address alignment error
928: VTRAP(T_FPE, fp_exception) ! 08 = fp exception
1.52 pk 929: VTRAP(T_DATAFAULT, memfault_sun4m) ! 09 = data fetch fault
1.1 deraadt 930: TRAP(T_TAGOF) ! 0a = tag overflow
931: UTRAP(0x0b)
932: UTRAP(0x0c)
933: UTRAP(0x0d)
934: UTRAP(0x0e)
935: UTRAP(0x0f)
936: UTRAP(0x10)
1.52 pk 937: HARDINT4M(1) ! 11 = level 1 interrupt
938: HARDINT4M(2) ! 12 = level 2 interrupt
939: HARDINT4M(3) ! 13 = level 3 interrupt
940: HARDINT4M(4) ! 14 = level 4 interrupt
941: HARDINT4M(5) ! 15 = level 5 interrupt
942: HARDINT4M(6) ! 16 = level 6 interrupt
943: HARDINT4M(7) ! 17 = level 7 interrupt
944: HARDINT4M(8) ! 18 = level 8 interrupt
945: HARDINT4M(9) ! 19 = level 9 interrupt
946: HARDINT4M(10) ! 1a = level 10 interrupt
947: HARDINT4M(11) ! 1b = level 11 interrupt
948: ZS_INTERRUPT4M ! 1c = level 12 (zs) interrupt
949: HARDINT4M(13) ! 1d = level 13 interrupt
950: HARDINT4M(14) ! 1e = level 14 interrupt
951: VTRAP(15, nmi_sun4m) ! 1f = nonmaskable interrupt
1.1 deraadt 952: UTRAP(0x20)
1.190 pk 953: VTRAP(T_TEXTERROR, memfault_sun4m) ! 21 = instr. fetch error
1.1 deraadt 954: UTRAP(0x22)
955: UTRAP(0x23)
1.25 deraadt 956: TRAP(T_CPDISABLED) ! 24 = coprocessor instr, EC bit off in psr
1.1 deraadt 957: UTRAP(0x25)
958: UTRAP(0x26)
959: UTRAP(0x27)
1.25 deraadt 960: TRAP(T_CPEXCEPTION) ! 28 = coprocessor exception
1.190 pk 961: VTRAP(T_DATAERROR, memfault_sun4m) ! 29 = data fetch error
1.1 deraadt 962: UTRAP(0x2a)
1.52 pk 963: VTRAP(T_STOREBUFFAULT, memfault_sun4m) ! 2b = SuperSPARC store buffer fault
1.1 deraadt 964: UTRAP(0x2c)
965: UTRAP(0x2d)
966: UTRAP(0x2e)
967: UTRAP(0x2f)
968: UTRAP(0x30)
969: UTRAP(0x31)
970: UTRAP(0x32)
971: UTRAP(0x33)
972: UTRAP(0x34)
973: UTRAP(0x35)
1.25 deraadt 974: UTRAP(0x36)
1.1 deraadt 975: UTRAP(0x37)
976: UTRAP(0x38)
977: UTRAP(0x39)
978: UTRAP(0x3a)
979: UTRAP(0x3b)
980: UTRAP(0x3c)
981: UTRAP(0x3d)
982: UTRAP(0x3e)
983: UTRAP(0x3f)
1.25 deraadt 984: UTRAP(0x40)
1.1 deraadt 985: UTRAP(0x41)
986: UTRAP(0x42)
987: UTRAP(0x43)
988: UTRAP(0x44)
989: UTRAP(0x45)
990: UTRAP(0x46)
991: UTRAP(0x47)
992: UTRAP(0x48)
993: UTRAP(0x49)
994: UTRAP(0x4a)
995: UTRAP(0x4b)
996: UTRAP(0x4c)
997: UTRAP(0x4d)
998: UTRAP(0x4e)
999: UTRAP(0x4f)
1000: UTRAP(0x50)
1001: UTRAP(0x51)
1002: UTRAP(0x52)
1003: UTRAP(0x53)
1004: UTRAP(0x54)
1005: UTRAP(0x55)
1006: UTRAP(0x56)
1007: UTRAP(0x57)
1008: UTRAP(0x58)
1009: UTRAP(0x59)
1010: UTRAP(0x5a)
1011: UTRAP(0x5b)
1012: UTRAP(0x5c)
1013: UTRAP(0x5d)
1014: UTRAP(0x5e)
1015: UTRAP(0x5f)
1016: UTRAP(0x60)
1017: UTRAP(0x61)
1018: UTRAP(0x62)
1019: UTRAP(0x63)
1020: UTRAP(0x64)
1021: UTRAP(0x65)
1022: UTRAP(0x66)
1023: UTRAP(0x67)
1024: UTRAP(0x68)
1025: UTRAP(0x69)
1026: UTRAP(0x6a)
1027: UTRAP(0x6b)
1028: UTRAP(0x6c)
1029: UTRAP(0x6d)
1030: UTRAP(0x6e)
1031: UTRAP(0x6f)
1032: UTRAP(0x70)
1033: UTRAP(0x71)
1034: UTRAP(0x72)
1035: UTRAP(0x73)
1036: UTRAP(0x74)
1037: UTRAP(0x75)
1038: UTRAP(0x76)
1039: UTRAP(0x77)
1040: UTRAP(0x78)
1041: UTRAP(0x79)
1042: UTRAP(0x7a)
1043: UTRAP(0x7b)
1044: UTRAP(0x7c)
1045: UTRAP(0x7d)
1046: UTRAP(0x7e)
1047: UTRAP(0x7f)
1.3 deraadt 1048: SYSCALL ! 80 = sun syscall
1.1 deraadt 1049: BPT ! 81 = pseudo breakpoint instruction
1050: TRAP(T_DIV0) ! 82 = divide by zero
1051: TRAP(T_FLUSHWIN) ! 83 = flush windows
1052: TRAP(T_CLEANWIN) ! 84 = provide clean windows
1053: TRAP(T_RANGECHECK) ! 85 = ???
1054: TRAP(T_FIXALIGN) ! 86 = fix up unaligned accesses
1055: TRAP(T_INTOF) ! 87 = integer overflow
1.33 christos 1056: SYSCALL ! 88 = svr4 syscall
1.1 deraadt 1057: SYSCALL ! 89 = bsd syscall
1.33 christos 1058: BPT_KGDB_EXEC ! 8a = enter kernel gdb on kernel startup
1.171 pk 1059: TRAP(T_DBPAUSE) ! 8b = hold CPU for kernel debugger
1.1 deraadt 1060: STRAP(0x8c)
1061: STRAP(0x8d)
1062: STRAP(0x8e)
1063: STRAP(0x8f)
1064: STRAP(0x90)
1065: STRAP(0x91)
1066: STRAP(0x92)
1067: STRAP(0x93)
1068: STRAP(0x94)
1069: STRAP(0x95)
1070: STRAP(0x96)
1071: STRAP(0x97)
1072: STRAP(0x98)
1073: STRAP(0x99)
1074: STRAP(0x9a)
1075: STRAP(0x9b)
1076: STRAP(0x9c)
1077: STRAP(0x9d)
1078: STRAP(0x9e)
1079: STRAP(0x9f)
1080: STRAP(0xa0)
1081: STRAP(0xa1)
1082: STRAP(0xa2)
1083: STRAP(0xa3)
1084: STRAP(0xa4)
1085: STRAP(0xa5)
1086: STRAP(0xa6)
1087: STRAP(0xa7)
1088: STRAP(0xa8)
1089: STRAP(0xa9)
1090: STRAP(0xaa)
1091: STRAP(0xab)
1092: STRAP(0xac)
1093: STRAP(0xad)
1094: STRAP(0xae)
1095: STRAP(0xaf)
1096: STRAP(0xb0)
1097: STRAP(0xb1)
1098: STRAP(0xb2)
1099: STRAP(0xb3)
1100: STRAP(0xb4)
1101: STRAP(0xb5)
1102: STRAP(0xb6)
1103: STRAP(0xb7)
1104: STRAP(0xb8)
1105: STRAP(0xb9)
1106: STRAP(0xba)
1107: STRAP(0xbb)
1108: STRAP(0xbc)
1109: STRAP(0xbd)
1110: STRAP(0xbe)
1111: STRAP(0xbf)
1112: STRAP(0xc0)
1113: STRAP(0xc1)
1114: STRAP(0xc2)
1115: STRAP(0xc3)
1116: STRAP(0xc4)
1117: STRAP(0xc5)
1118: STRAP(0xc6)
1119: STRAP(0xc7)
1120: STRAP(0xc8)
1121: STRAP(0xc9)
1122: STRAP(0xca)
1123: STRAP(0xcb)
1124: STRAP(0xcc)
1125: STRAP(0xcd)
1126: STRAP(0xce)
1127: STRAP(0xcf)
1128: STRAP(0xd0)
1129: STRAP(0xd1)
1130: STRAP(0xd2)
1131: STRAP(0xd3)
1132: STRAP(0xd4)
1133: STRAP(0xd5)
1134: STRAP(0xd6)
1135: STRAP(0xd7)
1136: STRAP(0xd8)
1137: STRAP(0xd9)
1138: STRAP(0xda)
1139: STRAP(0xdb)
1140: STRAP(0xdc)
1141: STRAP(0xdd)
1142: STRAP(0xde)
1143: STRAP(0xdf)
1144: STRAP(0xe0)
1145: STRAP(0xe1)
1146: STRAP(0xe2)
1147: STRAP(0xe3)
1148: STRAP(0xe4)
1149: STRAP(0xe5)
1150: STRAP(0xe6)
1151: STRAP(0xe7)
1152: STRAP(0xe8)
1153: STRAP(0xe9)
1154: STRAP(0xea)
1155: STRAP(0xeb)
1156: STRAP(0xec)
1157: STRAP(0xed)
1158: STRAP(0xee)
1159: STRAP(0xef)
1160: STRAP(0xf0)
1161: STRAP(0xf1)
1162: STRAP(0xf2)
1163: STRAP(0xf3)
1164: STRAP(0xf4)
1165: STRAP(0xf5)
1166: STRAP(0xf6)
1167: STRAP(0xf7)
1168: STRAP(0xf8)
1169: STRAP(0xf9)
1170: STRAP(0xfa)
1171: STRAP(0xfb)
1172: STRAP(0xfc)
1173: STRAP(0xfd)
1174: STRAP(0xfe)
1175: STRAP(0xff)
1.52 pk 1176: #endif
1.1 deraadt 1177:
1.20 deraadt 1178: /*
1.52 pk 1179: * Pad the trap table to max page size.
1180: * Trap table size is 0x100 * 4instr * 4byte/instr = 4096 bytes;
1181: * need to .skip 4096 to pad to page size iff. the number of trap tables
1182: * defined above is odd.
1.20 deraadt 1183: */
1.65 mycroft 1184: #if (defined(SUN4) + defined(SUN4C) + defined(SUN4M)) % 2 == 1
1.20 deraadt 1185: .skip 4096
1.52 pk 1186: #endif
1.20 deraadt 1187:
1.173 pk 1188: /* redzones don't work currently in multi-processor mode */
1189: #if defined(DEBUG) && !defined(MULTIPROCESSOR)
1.1 deraadt 1190: /*
1191: * A hardware red zone is impossible. We simulate one in software by
1192: * keeping a `red zone' pointer; if %sp becomes less than this, we panic.
1193: * This is expensive and is only enabled when debugging.
1194: */
1.97 pk 1195:
1.99 pk 1196: /* `redzone' is located in the per-CPU information structure */
1.97 pk 1197: _redzone = CPUINFO_VA + CPUINFO_REDZONE
1198: .data
1.1 deraadt 1199: #define REDSTACK 2048 /* size of `panic: stack overflow' region */
1200: _redstack:
1201: .skip REDSTACK
1202: .text
1203: Lpanic_red:
1204: .asciz "stack overflow"
1.52 pk 1205: _ALIGN
1.1 deraadt 1206:
1207: /* set stack pointer redzone to base+minstack; alters base */
1208: #define SET_SP_REDZONE(base, tmp) \
1209: add base, REDSIZE, base; \
1210: sethi %hi(_redzone), tmp; \
1211: st base, [tmp + %lo(_redzone)]
1212:
1213: /* variant with a constant */
1214: #define SET_SP_REDZONE_CONST(const, tmp1, tmp2) \
1215: set (const) + REDSIZE, tmp1; \
1216: sethi %hi(_redzone), tmp2; \
1217: st tmp1, [tmp2 + %lo(_redzone)]
1218:
1.97 pk 1219: /* variant with a variable & offset */
1220: #define SET_SP_REDZONE_VAR(var, offset, tmp1, tmp2) \
1221: sethi %hi(var), tmp1; \
1222: ld [tmp1 + %lo(var)], tmp1; \
1223: sethi %hi(offset), tmp2; \
1224: add tmp1, tmp2, tmp1; \
1225: SET_SP_REDZONE(tmp1, tmp2)
1226:
1.1 deraadt 1227: /* check stack pointer against redzone (uses two temps) */
1228: #define CHECK_SP_REDZONE(t1, t2) \
1229: sethi %hi(_redzone), t1; \
1230: ld [t1 + %lo(_redzone)], t2; \
1231: cmp %sp, t2; /* if sp >= t2, not in red zone */ \
1232: bgeu 7f; nop; /* and can continue normally */ \
1233: /* move to panic stack */ \
1234: st %g0, [t1 + %lo(_redzone)]; \
1235: set _redstack + REDSTACK - 96, %sp; \
1236: /* prevent panic() from lowering ipl */ \
1.213 pk 1237: sethi %hi(_C_LABEL(panicstr)), t1; \
1.1 deraadt 1238: set Lpanic_red, t2; \
1.121 christos 1239: st t2, [t1 + %lo(_C_LABEL(panicstr))]; \
1.1 deraadt 1240: rd %psr, t1; /* t1 = splhigh() */ \
1241: or t1, PSR_PIL, t2; \
1242: wr t2, 0, %psr; \
1243: wr t2, PSR_ET, %psr; /* turn on traps */ \
1244: nop; nop; nop; \
1.4 deraadt 1245: save %sp, -CCFSZ, %sp; /* preserve current window */ \
1.1 deraadt 1246: sethi %hi(Lpanic_red), %o0; \
1.121 christos 1247: call _C_LABEL(panic); or %o0, %lo(Lpanic_red), %o0; \
1.1 deraadt 1248: 7:
1249:
1250: #else
1251:
1252: #define SET_SP_REDZONE(base, tmp)
1253: #define SET_SP_REDZONE_CONST(const, t1, t2)
1.98 pk 1254: #define SET_SP_REDZONE_VAR(var, offset, t1, t2)
1.1 deraadt 1255: #define CHECK_SP_REDZONE(t1, t2)
1.97 pk 1256: #endif /* DEBUG */
1.1 deraadt 1257:
1258: /*
1259: * The window code must verify user stack addresses before using them.
1260: * A user stack pointer is invalid if:
1261: * - it is not on an 8 byte boundary;
1262: * - its pages (a register window, being 64 bytes, can occupy
1263: * two pages) are not readable or writable.
1264: * We define three separate macros here for testing user stack addresses.
1265: *
1266: * PTE_OF_ADDR locates a PTE, branching to a `bad address'
1267: * handler if the stack pointer points into the hole in the
1268: * address space (i.e., top 3 bits are not either all 1 or all 0);
1269: * CMP_PTE_USER_READ compares the located PTE against `user read' mode;
1270: * CMP_PTE_USER_WRITE compares the located PTE against `user write' mode.
1271: * The compares give `equal' if read or write is OK.
1272: *
1273: * Note that the user stack pointer usually points into high addresses
1274: * (top 3 bits all 1), so that is what we check first.
1275: *
1276: * The code below also assumes that PTE_OF_ADDR is safe in a delay
1277: * slot; it is, at it merely sets its `pte' register to a temporary value.
1278: */
1.52 pk 1279: #if defined(SUN4) || defined(SUN4C)
1.1 deraadt 1280: /* input: addr, output: pte; aux: bad address label */
1.52 pk 1281: #define PTE_OF_ADDR4_4C(addr, pte, bad, page_offset) \
1.1 deraadt 1282: sra addr, PG_VSHIFT, pte; \
1283: cmp pte, -1; \
1.13 deraadt 1284: be,a 1f; andn addr, page_offset, pte; \
1.1 deraadt 1285: tst pte; \
1286: bne bad; EMPTY; \
1.13 deraadt 1287: andn addr, page_offset, pte; \
1.1 deraadt 1288: 1:
1289:
1290: /* input: pte; output: condition codes */
1.52 pk 1291: #define CMP_PTE_USER_READ4_4C(pte) \
1.1 deraadt 1292: lda [pte] ASI_PTE, pte; \
1293: srl pte, PG_PROTSHIFT, pte; \
1294: andn pte, (PG_W >> PG_PROTSHIFT), pte; \
1295: cmp pte, PG_PROTUREAD
1296:
1297: /* input: pte; output: condition codes */
1.52 pk 1298: #define CMP_PTE_USER_WRITE4_4C(pte) \
1.1 deraadt 1299: lda [pte] ASI_PTE, pte; \
1300: srl pte, PG_PROTSHIFT, pte; \
1301: cmp pte, PG_PROTUWRITE
1.9 deraadt 1302: #endif
1.1 deraadt 1303:
1304: /*
1.52 pk 1305: * The Sun4M does not have the memory hole that the 4C does. Thus all
1306: * we need to do here is clear the page offset from addr.
1307: */
1308: #if defined(SUN4M)
1309: #define PTE_OF_ADDR4M(addr, pte, bad, page_offset) \
1310: andn addr, page_offset, pte
1311:
1.94 pk 1312: /*
1313: * After obtaining the PTE through ASI_SRMMUFP, we read the Sync Fault
1314: * Status register. This is necessary on Hypersparcs which stores and
1315: * locks the fault address and status registers if the translation
1316: * fails (thanks to Chris Torek for finding this quirk).
1317: */
1318: #define CMP_PTE_USER_READ4M(pte, tmp) \
1.205 pk 1319: /*or pte, ASI_SRMMUFP_L3, pte; -- ASI_SRMMUFP_L3 == 0 */ \
1.52 pk 1320: lda [pte] ASI_SRMMUFP, pte; \
1.94 pk 1321: set SRMMU_SFSR, tmp; \
1.200 pk 1322: lda [tmp] ASI_SRMMU, %g0; \
1323: and pte, SRMMU_TETYPE, tmp; \
1324: /* Check for valid pte */ \
1325: cmp tmp, SRMMU_TEPTE; \
1326: bnz 8f; \
1327: and pte, SRMMU_PROT_MASK, pte; \
1328: /* check for one of: R_R, RW_RW, RX_RX and RWX_RWX */ \
1329: cmp pte, PPROT_X_X; \
1330: bcs,a 8f; \
1331: /* Now we have carry set if OK; turn it into Z bit */ \
1332: subxcc %g0, -1, %g0; \
1333: /* One more case to check: R_RW */ \
1334: cmp pte, PPROT_R_RW; \
1.59 pk 1335: 8:
1.52 pk 1336:
1.58 pk 1337:
1338: /* note: PTE bit 4 set implies no user writes */
1.94 pk 1339: #define CMP_PTE_USER_WRITE4M(pte, tmp) \
1.52 pk 1340: or pte, ASI_SRMMUFP_L3, pte; \
1341: lda [pte] ASI_SRMMUFP, pte; \
1.94 pk 1342: set SRMMU_SFSR, tmp; \
1343: lda [tmp] ASI_SRMMU, %g0; \
1.58 pk 1344: and pte, (SRMMU_TETYPE | 0x14), pte; \
1345: cmp pte, (SRMMU_TEPTE | PPROT_WRITE)
1.52 pk 1346: #endif /* 4m */
1347:
1348: #if defined(SUN4M) && !(defined(SUN4C) || defined(SUN4))
1.64 pk 1349:
1.62 pk 1350: #define PTE_OF_ADDR(addr, pte, bad, page_offset, label) \
1351: PTE_OF_ADDR4M(addr, pte, bad, page_offset)
1.94 pk 1352: #define CMP_PTE_USER_WRITE(pte, tmp, label) CMP_PTE_USER_WRITE4M(pte,tmp)
1353: #define CMP_PTE_USER_READ(pte, tmp, label) CMP_PTE_USER_READ4M(pte,tmp)
1.64 pk 1354:
1.52 pk 1355: #elif (defined(SUN4C) || defined(SUN4)) && !defined(SUN4M)
1.64 pk 1356:
1.62 pk 1357: #define PTE_OF_ADDR(addr, pte, bad, page_offset,label) \
1358: PTE_OF_ADDR4_4C(addr, pte, bad, page_offset)
1359: #define CMP_PTE_USER_WRITE(pte, tmp, label) CMP_PTE_USER_WRITE4_4C(pte)
1360: #define CMP_PTE_USER_READ(pte, tmp, label) CMP_PTE_USER_READ4_4C(pte)
1.64 pk 1361:
1.52 pk 1362: #else /* both defined, ugh */
1.64 pk 1363:
1.62 pk 1364: #define PTE_OF_ADDR(addr, pte, bad, page_offset, label) \
1365: label: b,a 2f; \
1366: PTE_OF_ADDR4M(addr, pte, bad, page_offset); \
1367: b,a 3f; \
1368: 2: \
1369: PTE_OF_ADDR4_4C(addr, pte, bad, page_offset); \
1370: 3:
1.52 pk 1371:
1.62 pk 1372: #define CMP_PTE_USER_READ(pte, tmp, label) \
1373: label: b,a 1f; \
1.94 pk 1374: CMP_PTE_USER_READ4M(pte,tmp); \
1.62 pk 1375: b,a 2f; \
1376: 1: \
1377: CMP_PTE_USER_READ4_4C(pte); \
1378: 2:
1.52 pk 1379:
1.62 pk 1380: #define CMP_PTE_USER_WRITE(pte, tmp, label) \
1381: label: b,a 1f; \
1.94 pk 1382: CMP_PTE_USER_WRITE4M(pte,tmp); \
1.62 pk 1383: b,a 2f; \
1384: 1: \
1385: CMP_PTE_USER_WRITE4_4C(pte); \
1386: 2:
1.52 pk 1387: #endif
1388:
1389:
1390: /*
1.1 deraadt 1391: * The calculations in PTE_OF_ADDR and CMP_PTE_USER_* are rather slow:
1392: * in particular, according to Gordon Irlam of the University of Adelaide
1393: * in Australia, these consume at least 18 cycles on an SS1 and 37 on an
1394: * SS2. Hence, we try to avoid them in the common case.
1395: *
1396: * A chunk of 64 bytes is on a single page if and only if:
1397: *
1.13 deraadt 1398: * ((base + 64 - 1) & ~(NBPG-1)) == (base & ~(NBPG-1))
1.1 deraadt 1399: *
1400: * Equivalently (and faster to test), the low order bits (base & 4095) must
1401: * be small enough so that the sum (base + 63) does not carry out into the
1402: * upper page-address bits, i.e.,
1403: *
1.13 deraadt 1404: * (base & (NBPG-1)) < (NBPG - 63)
1.1 deraadt 1405: *
1406: * so we allow testing that here. This macro is also assumed to be safe
1407: * in a delay slot (modulo overwriting its temporary).
1408: */
1.13 deraadt 1409: #define SLT_IF_1PAGE_RW(addr, tmp, page_offset) \
1410: and addr, page_offset, tmp; \
1411: sub page_offset, 62, page_offset; \
1412: cmp tmp, page_offset
1.1 deraadt 1413:
1414: /*
1415: * Every trap that enables traps must set up stack space.
1416: * If the trap is from user mode, this involves switching to the kernel
1417: * stack for the current process, and we must also set cpcb->pcb_uw
1418: * so that the window overflow handler can tell user windows from kernel
1419: * windows.
1420: *
1421: * The number of user windows is:
1422: *
1423: * cpcb->pcb_uw = (cpcb->pcb_wim - 1 - CWP) % nwindows
1424: *
1425: * (where pcb_wim = log2(current %wim) and CWP = low 5 bits of %psr).
1426: * We compute this expression by table lookup in uwtab[CWP - pcb_wim],
1427: * which has been set up as:
1428: *
1429: * for i in [-nwin+1 .. nwin-1]
1430: * uwtab[i] = (nwin - 1 - i) % nwin;
1431: *
1432: * (If you do not believe this works, try it for yourself.)
1433: *
1434: * We also keep one or two more tables:
1435: *
1436: * for i in 0..nwin-1
1437: * wmask[i] = 1 << ((i + 1) % nwindows);
1438: *
1439: * wmask[CWP] tells whether a `rett' would return into the invalid window.
1440: */
1441: .data
1442: .skip 32 ! alignment byte & negative indicies
1443: uwtab: .skip 32 ! u_char uwtab[-31..31];
1444: wmask: .skip 32 ! u_char wmask[0..31];
1445:
1446: .text
1447: /*
1448: * Things begin to grow uglier....
1449: *
1450: * Each trap handler may (always) be running in the trap window.
1451: * If this is the case, it cannot enable further traps until it writes
1452: * the register windows into the stack (or, if the stack is no good,
1453: * the current pcb).
1454: *
1455: * ASSUMPTIONS: TRAP_SETUP() is called with:
1456: * %l0 = %psr
1457: * %l1 = return pc
1458: * %l2 = return npc
1459: * %l3 = (some value that must not be altered)
1460: * which means we have 4 registers to work with.
1461: *
1462: * The `stackspace' argument is the number of stack bytes to allocate
1463: * for register-saving, and must be at least -64 (and typically more,
1464: * for global registers and %y).
1465: *
1466: * Trapframes should use -CCFSZ-80. (80 = sizeof(struct trapframe);
1467: * see trap.h. This basically means EVERYONE. Interrupt frames could
1468: * get away with less, but currently do not.)
1469: *
1470: * The basic outline here is:
1471: *
1472: * if (trap came from kernel mode) {
1473: * if (we are in the trap window)
1474: * save it away;
1475: * %sp = %fp - stackspace;
1476: * } else {
1477: * compute the number of user windows;
1478: * if (we are in the trap window)
1479: * save it away;
1480: * %sp = (top of kernel stack) - stackspace;
1481: * }
1482: *
1483: * Again, the number of user windows is:
1484: *
1485: * cpcb->pcb_uw = (cpcb->pcb_wim - 1 - CWP) % nwindows
1486: *
1487: * (where pcb_wim = log2(current %wim) and CWP is the low 5 bits of %psr),
1488: * and this is computed as `uwtab[CWP - pcb_wim]'.
1489: *
1490: * NOTE: if you change this code, you will have to look carefully
1491: * at the window overflow and underflow handlers and make sure they
1492: * have similar changes made as needed.
1493: */
1494: #define CALL_CLEAN_TRAP_WINDOW \
1495: sethi %hi(clean_trap_window), %l7; \
1496: jmpl %l7 + %lo(clean_trap_window), %l4; \
1497: mov %g7, %l7 /* save %g7 in %l7 for clean_trap_window */
1498:
1499: #define TRAP_SETUP(stackspace) \
1.173 pk 1500: TRAP_TRACE(%l3,%l5); \
1.1 deraadt 1501: rd %wim, %l4; \
1502: mov 1, %l5; \
1503: sll %l5, %l0, %l5; \
1504: btst PSR_PS, %l0; \
1505: bz 1f; \
1506: btst %l5, %l4; \
1507: /* came from kernel mode; cond codes indicate trap window */ \
1508: bz,a 3f; \
1509: add %fp, stackspace, %sp; /* want to just set %sp */ \
1510: CALL_CLEAN_TRAP_WINDOW; /* but maybe need to clean first */ \
1511: b 3f; \
1512: add %fp, stackspace, %sp; \
1513: 1: \
1514: /* came from user mode: compute pcb_nw */ \
1.111 pk 1515: sethi %hi(cpcb), %l6; \
1516: ld [%l6 + %lo(cpcb)], %l6; \
1.1 deraadt 1517: ld [%l6 + PCB_WIM], %l5; \
1518: and %l0, 31, %l4; \
1519: sub %l4, %l5, %l5; \
1520: set uwtab, %l4; \
1521: ldub [%l4 + %l5], %l5; \
1522: st %l5, [%l6 + PCB_UW]; \
1523: /* cond codes still indicate whether in trap window */ \
1524: bz,a 2f; \
1.13 deraadt 1525: sethi %hi(USPACE+(stackspace)), %l5; \
1.1 deraadt 1526: /* yes, in trap window; must clean it */ \
1527: CALL_CLEAN_TRAP_WINDOW; \
1.111 pk 1528: sethi %hi(cpcb), %l6; \
1529: ld [%l6 + %lo(cpcb)], %l6; \
1.13 deraadt 1530: sethi %hi(USPACE+(stackspace)), %l5; \
1.1 deraadt 1531: 2: \
1532: /* trap window is (now) clean: set %sp */ \
1.13 deraadt 1533: or %l5, %lo(USPACE+(stackspace)), %l5; \
1.1 deraadt 1534: add %l6, %l5, %sp; \
1535: SET_SP_REDZONE(%l6, %l5); \
1536: 3: \
1537: CHECK_SP_REDZONE(%l6, %l5)
1538:
1539: /*
1540: * Interrupt setup is almost exactly like trap setup, but we need to
1541: * go to the interrupt stack if (a) we came from user mode or (b) we
1542: * came from kernel mode on the kernel stack.
1543: */
1.142 mrg 1544: #if defined(MULTIPROCESSOR)
1.98 pk 1545: /*
1546: * SMP kernels: read `eintstack' from cpuinfo structure. Since the
1547: * location of the interrupt stack is not known in advance, we need
1548: * to check the current %fp against both ends of the stack space.
1549: */
1.97 pk 1550: #define INTR_SETUP(stackspace) \
1.173 pk 1551: TRAP_TRACE(%l3,%l5); \
1.97 pk 1552: rd %wim, %l4; \
1553: mov 1, %l5; \
1554: sll %l5, %l0, %l5; \
1555: btst PSR_PS, %l0; \
1556: bz 1f; \
1557: btst %l5, %l4; \
1558: /* came from kernel mode; cond codes still indicate trap window */ \
1559: bz,a 0f; \
1.101 pk 1560: sethi %hi(_EINTSTACKP), %l7; \
1.97 pk 1561: CALL_CLEAN_TRAP_WINDOW; \
1.101 pk 1562: sethi %hi(_EINTSTACKP), %l7; \
1.97 pk 1563: 0: /* now if not intstack > %fp >= eintstack, we were on the kernel stack */ \
1.101 pk 1564: ld [%l7 + %lo(_EINTSTACKP)], %l7; \
1.97 pk 1565: cmp %fp, %l7; \
1566: bge,a 3f; /* %fp >= eintstack */ \
1567: add %l7, stackspace, %sp; /* so switch to intstack */ \
1568: sethi %hi(INT_STACK_SIZE), %l6; \
1.98 pk 1569: sub %l7, %l6, %l6; \
1570: cmp %fp, %l6; \
1.97 pk 1571: blu,a 3f; /* %fp < intstack */ \
1572: add %l7, stackspace, %sp; /* so switch to intstack */ \
1573: b 4f; \
1574: add %fp, stackspace, %sp; /* else stay on intstack */ \
1575: 1: \
1576: /* came from user mode: compute pcb_nw */ \
1.111 pk 1577: sethi %hi(cpcb), %l6; \
1578: ld [%l6 + %lo(cpcb)], %l6; \
1.97 pk 1579: ld [%l6 + PCB_WIM], %l5; \
1580: and %l0, 31, %l4; \
1581: sub %l4, %l5, %l5; \
1582: set uwtab, %l4; \
1583: ldub [%l4 + %l5], %l5; \
1584: st %l5, [%l6 + PCB_UW]; \
1585: /* cond codes still indicate whether in trap window */ \
1586: bz,a 2f; \
1.101 pk 1587: sethi %hi(_EINTSTACKP), %l7; \
1.97 pk 1588: /* yes, in trap window; must save regs */ \
1589: CALL_CLEAN_TRAP_WINDOW; \
1.101 pk 1590: sethi %hi(_EINTSTACKP), %l7; \
1.97 pk 1591: 2: \
1.101 pk 1592: ld [%l7 + %lo(_EINTSTACKP)], %l7; \
1.97 pk 1593: add %l7, stackspace, %sp; \
1594: 3: \
1.101 pk 1595: SET_SP_REDZONE_VAR(_EINTSTACKP, -INT_STACK_SIZE, %l6, %l5); \
1.97 pk 1596: 4: \
1597: CHECK_SP_REDZONE(%l6, %l5)
1.98 pk 1598:
1.97 pk 1599: #else /* MULTIPROCESSOR */
1.98 pk 1600:
1.1 deraadt 1601: #define INTR_SETUP(stackspace) \
1.173 pk 1602: TRAP_TRACE(%l3,%l5); \
1.1 deraadt 1603: rd %wim, %l4; \
1604: mov 1, %l5; \
1605: sll %l5, %l0, %l5; \
1606: btst PSR_PS, %l0; \
1607: bz 1f; \
1608: btst %l5, %l4; \
1609: /* came from kernel mode; cond codes still indicate trap window */ \
1610: bz,a 0f; \
1.111 pk 1611: sethi %hi(_C_LABEL(eintstack)), %l7; \
1.1 deraadt 1612: CALL_CLEAN_TRAP_WINDOW; \
1.111 pk 1613: sethi %hi(_C_LABEL(eintstack)), %l7; \
1.1 deraadt 1614: 0: /* now if %fp >= eintstack, we were on the kernel stack */ \
1615: cmp %fp, %l7; \
1616: bge,a 3f; \
1617: add %l7, stackspace, %sp; /* so switch to intstack */ \
1618: b 4f; \
1619: add %fp, stackspace, %sp; /* else stay on intstack */ \
1620: 1: \
1621: /* came from user mode: compute pcb_nw */ \
1.111 pk 1622: sethi %hi(cpcb), %l6; \
1623: ld [%l6 + %lo(cpcb)], %l6; \
1.1 deraadt 1624: ld [%l6 + PCB_WIM], %l5; \
1625: and %l0, 31, %l4; \
1626: sub %l4, %l5, %l5; \
1627: set uwtab, %l4; \
1628: ldub [%l4 + %l5], %l5; \
1629: st %l5, [%l6 + PCB_UW]; \
1630: /* cond codes still indicate whether in trap window */ \
1631: bz,a 2f; \
1.111 pk 1632: sethi %hi(_C_LABEL(eintstack)), %l7; \
1.1 deraadt 1633: /* yes, in trap window; must save regs */ \
1634: CALL_CLEAN_TRAP_WINDOW; \
1.111 pk 1635: sethi %hi(_C_LABEL(eintstack)), %l7; \
1.1 deraadt 1636: 2: \
1637: add %l7, stackspace, %sp; \
1638: 3: \
1.111 pk 1639: SET_SP_REDZONE_CONST(_C_LABEL(intstack), %l6, %l5); \
1.1 deraadt 1640: 4: \
1641: CHECK_SP_REDZONE(%l6, %l5)
1.97 pk 1642: #endif /* MULTIPROCESSOR */
1.1 deraadt 1643:
1644: /*
1645: * Handler for making the trap window shiny clean.
1646: *
1647: * On entry:
1648: * cpcb->pcb_nw = number of user windows
1649: * %l0 = %psr
1650: * %l1 must not be clobbered
1651: * %l2 must not be clobbered
1652: * %l3 must not be clobbered
1653: * %l4 = address for `return'
1654: * %l7 = saved %g7 (we put this in a delay slot above, to save work)
1655: *
1656: * On return:
1657: * %wim has changed, along with cpcb->pcb_wim
1658: * %g7 has been restored
1659: *
1660: * Normally, we push only one window.
1661: */
1662: clean_trap_window:
1663: mov %g5, %l5 ! save %g5
1664: mov %g6, %l6 ! ... and %g6
1665: /* mov %g7, %l7 ! ... and %g7 (already done for us) */
1.111 pk 1666: sethi %hi(cpcb), %g6 ! get current pcb
1667: ld [%g6 + %lo(cpcb)], %g6
1.1 deraadt 1668:
1669: /* Figure out whether it is a user window (cpcb->pcb_uw > 0). */
1670: ld [%g6 + PCB_UW], %g7
1671: deccc %g7
1672: bge ctw_user
1673: save %g0, %g0, %g0 ! in any case, enter window to save
1674:
1675: /* The window to be pushed is a kernel window. */
1676: std %l0, [%sp + (0*8)]
1677: ctw_merge:
1678: std %l2, [%sp + (1*8)]
1679: std %l4, [%sp + (2*8)]
1680: std %l6, [%sp + (3*8)]
1681: std %i0, [%sp + (4*8)]
1682: std %i2, [%sp + (5*8)]
1683: std %i4, [%sp + (6*8)]
1684: std %i6, [%sp + (7*8)]
1685:
1686: /* Set up new window invalid mask, and update cpcb->pcb_wim. */
1687: rd %psr, %g7 ! g7 = (junk << 5) + new_cwp
1688: mov 1, %g5 ! g5 = 1 << new_cwp;
1689: sll %g5, %g7, %g5
1690: wr %g5, 0, %wim ! setwim(g5);
1691: and %g7, 31, %g7 ! cpcb->pcb_wim = g7 & 31;
1.111 pk 1692: sethi %hi(cpcb), %g6 ! re-get current pcb
1693: ld [%g6 + %lo(cpcb)], %g6
1.1 deraadt 1694: st %g7, [%g6 + PCB_WIM]
1695: nop
1696: restore ! back to trap window
1697:
1698: mov %l5, %g5 ! restore g5
1699: mov %l6, %g6 ! ... and g6
1700: jmp %l4 + 8 ! return to caller
1701: mov %l7, %g7 ! ... and g7
1702: /* NOTREACHED */
1703:
1704: ctw_user:
1705: /*
1706: * The window to be pushed is a user window.
1707: * We must verify the stack pointer (alignment & permissions).
1708: * See comments above definition of PTE_OF_ADDR.
1709: */
1710: st %g7, [%g6 + PCB_UW] ! cpcb->pcb_uw--;
1711: btst 7, %sp ! if not aligned,
1712: bne ctw_invalid ! choke on it
1713: EMPTY
1.13 deraadt 1714:
1.111 pk 1715: sethi %hi(_C_LABEL(pgofset)), %g6 ! trash %g6=curpcb
1716: ld [%g6 + %lo(_C_LABEL(pgofset))], %g6
1.62 pk 1717: PTE_OF_ADDR(%sp, %g7, ctw_invalid, %g6, NOP_ON_4M_1)
1718: CMP_PTE_USER_WRITE(%g7, %g5, NOP_ON_4M_2) ! likewise if not writable
1.1 deraadt 1719: bne ctw_invalid
1720: EMPTY
1.52 pk 1721: /* Note side-effect of SLT_IF_1PAGE_RW: decrements %g6 by 62 */
1.13 deraadt 1722: SLT_IF_1PAGE_RW(%sp, %g7, %g6)
1.1 deraadt 1723: bl,a ctw_merge ! all ok if only 1
1724: std %l0, [%sp]
1725: add %sp, 7*8, %g5 ! check last addr too
1.154 thorpej 1726: add %g6, 62, %g6 /* restore %g6 to `pgofset' */
1.62 pk 1727: PTE_OF_ADDR(%g5, %g7, ctw_invalid, %g6, NOP_ON_4M_3)
1728: CMP_PTE_USER_WRITE(%g7, %g6, NOP_ON_4M_4)
1.1 deraadt 1729: be,a ctw_merge ! all ok: store <l0,l1> and merge
1730: std %l0, [%sp]
1731:
1732: /*
1733: * The window we wanted to push could not be pushed.
1734: * Instead, save ALL user windows into the pcb.
1735: * We will notice later that we did this, when we
1736: * get ready to return from our trap or syscall.
1737: *
1738: * The code here is run rarely and need not be optimal.
1739: */
1740: ctw_invalid:
1741: /*
1742: * Reread cpcb->pcb_uw. We decremented this earlier,
1743: * so it is off by one.
1744: */
1.111 pk 1745: sethi %hi(cpcb), %g6 ! re-get current pcb
1746: ld [%g6 + %lo(cpcb)], %g6
1.13 deraadt 1747:
1.1 deraadt 1748: ld [%g6 + PCB_UW], %g7 ! (number of user windows) - 1
1749: add %g6, PCB_RW, %g5
1750:
1751: /* save g7+1 windows, starting with the current one */
1752: 1: ! do {
1753: std %l0, [%g5 + (0*8)] ! rw->rw_local[0] = l0;
1754: std %l2, [%g5 + (1*8)] ! ...
1755: std %l4, [%g5 + (2*8)]
1756: std %l6, [%g5 + (3*8)]
1757: std %i0, [%g5 + (4*8)]
1758: std %i2, [%g5 + (5*8)]
1759: std %i4, [%g5 + (6*8)]
1760: std %i6, [%g5 + (7*8)]
1761: deccc %g7 ! if (n > 0) save(), rw++;
1762: bge,a 1b ! } while (--n >= 0);
1763: save %g5, 64, %g5
1764:
1765: /* stash sp for bottommost window */
1766: st %sp, [%g5 + 64 + (7*8)]
1767:
1768: /* set up new wim */
1769: rd %psr, %g7 ! g7 = (junk << 5) + new_cwp;
1770: mov 1, %g5 ! g5 = 1 << new_cwp;
1771: sll %g5, %g7, %g5
1772: wr %g5, 0, %wim ! wim = g5;
1773: and %g7, 31, %g7
1774: st %g7, [%g6 + PCB_WIM] ! cpcb->pcb_wim = new_cwp;
1775:
1776: /* fix up pcb fields */
1777: ld [%g6 + PCB_UW], %g7 ! n = cpcb->pcb_uw;
1778: add %g7, 1, %g5
1779: st %g5, [%g6 + PCB_NSAVED] ! cpcb->pcb_nsaved = n + 1;
1780: st %g0, [%g6 + PCB_UW] ! cpcb->pcb_uw = 0;
1781:
1782: /* return to trap window */
1783: 1: deccc %g7 ! do {
1784: bge 1b ! restore();
1785: restore ! } while (--n >= 0);
1786:
1787: mov %l5, %g5 ! restore g5, g6, & g7, and return
1788: mov %l6, %g6
1789: jmp %l4 + 8
1790: mov %l7, %g7
1791: /* NOTREACHED */
1792:
1793:
1794: /*
1795: * Each memory access (text or data) fault, from user or kernel mode,
1796: * comes here. We read the error register and figure out what has
1797: * happened.
1798: *
1799: * This cannot be done from C code since we must not enable traps (and
1800: * hence may not use the `save' instruction) until we have decided that
1801: * the error is or is not an asynchronous one that showed up after a
1802: * synchronous error, but which must be handled before the sync err.
1803: *
1804: * Most memory faults are user mode text or data faults, which can cause
1805: * signal delivery or ptracing, for which we must build a full trapframe.
1806: * It does not seem worthwhile to work to avoid this in the other cases,
1807: * so we store all the %g registers on the stack immediately.
1808: *
1809: * On entry:
1810: * %l0 = %psr
1811: * %l1 = return pc
1812: * %l2 = return npc
1813: * %l3 = T_TEXTFAULT or T_DATAFAULT
1814: *
1815: * Internal:
1816: * %l4 = %y, until we call mem_access_fault (then onto trapframe)
1817: * %l5 = IE_reg_addr, if async mem error
1818: *
1819: */
1.52 pk 1820:
1821: #if defined(SUN4)
1822: memfault_sun4:
1.1 deraadt 1823: TRAP_SETUP(-CCFSZ-80)
1.111 pk 1824: INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
1.1 deraadt 1825:
1826: st %g1, [%sp + CCFSZ + 20] ! save g1
1827: rd %y, %l4 ! save y
1828:
1.19 deraadt 1829: /*
1830: * registers:
1831: * memerr.ctrl = memory error control reg., error if 0x80 set
1832: * memerr.vaddr = address of memory error
1833: * buserr = basically just like sun4c sync error reg but
1834: * no SER_WRITE bit (have to figure out from code).
1835: */
1.111 pk 1836: set _C_LABEL(par_err_reg), %o0 ! memerr ctrl addr -- XXX mapped?
1.20 deraadt 1837: ld [%o0], %o0 ! get it
1.19 deraadt 1838: std %g2, [%sp + CCFSZ + 24] ! save g2, g3
1839: ld [%o0], %o1 ! memerr ctrl register
1840: inc 4, %o0 ! now VA of memerr vaddr register
1841: std %g4, [%sp + CCFSZ + 32] ! (sneak g4,g5 in here)
1842: ld [%o0], %o2 ! memerr virt addr
1843: st %g0, [%o0] ! NOTE: this clears latching!!!
1844: btst ME_REG_IERR, %o1 ! memory error?
1845: ! XXX this value may not be correct
1846: ! as I got some parity errors and the
1847: ! correct bits were not on?
1848: std %g6, [%sp + CCFSZ + 40]
1.52 pk 1849: bz,a 0f ! no, just a regular fault
1.19 deraadt 1850: wr %l0, PSR_ET, %psr ! (and reenable traps)
1851:
1852: /* memory error = death for now XXX */
1853: clr %o3
1854: clr %o4
1.111 pk 1855: call _C_LABEL(memerr4_4c) ! memerr(0, ser, sva, 0, 0)
1.19 deraadt 1856: clr %o0
1.111 pk 1857: call _C_LABEL(prom_halt)
1.19 deraadt 1858: nop
1859:
1.52 pk 1860: 0:
1.19 deraadt 1861: /*
1862: * have to make SUN4 emulate SUN4C. 4C code expects
1863: * SER in %o1 and the offending VA in %o2, everything else is ok.
1864: * (must figure out if SER_WRITE should be set)
1865: */
1866: set AC_BUS_ERR, %o0 ! bus error register
1867: cmp %l3, T_TEXTFAULT ! text fault always on PC
1.50 pk 1868: be normal_mem_fault ! go
1.21 deraadt 1869: lduba [%o0] ASI_CONTROL, %o1 ! get its value
1.19 deraadt 1870:
1871: #define STORE_BIT 21 /* bit that indicates a store instruction for sparc */
1872: ld [%l1], %o3 ! offending instruction in %o3 [l1=pc]
1873: srl %o3, STORE_BIT, %o3 ! get load/store bit (wont fit simm13)
1874: btst 1, %o3 ! test for store operation
1875:
1876: bz normal_mem_fault ! if (z) is a load (so branch)
1877: sethi %hi(SER_WRITE), %o5 ! damn SER_WRITE wont fit simm13
1878: ! or %lo(SER_WRITE), %o5, %o5! not necessary since %lo is zero
1879: or %o5, %o1, %o1 ! set SER_WRITE
1880: #if defined(SUN4C) || defined(SUN4M)
1.52 pk 1881: ba,a normal_mem_fault
1882: !!nop ! XXX make efficient later
1.19 deraadt 1883: #endif /* SUN4C || SUN4M */
1884: #endif /* SUN4 */
1.52 pk 1885:
1886: memfault_sun4c:
1887: #if defined(SUN4C)
1888: TRAP_SETUP(-CCFSZ-80)
1.111 pk 1889: INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
1.52 pk 1890:
1891: st %g1, [%sp + CCFSZ + 20] ! save g1
1892: rd %y, %l4 ! save y
1893:
1894: /*
1895: * We know about the layout of the error registers here.
1896: * addr reg
1897: * ---- ---
1898: * a AC_SYNC_ERR
1899: * a+4 AC_SYNC_VA
1900: * a+8 AC_ASYNC_ERR
1901: * a+12 AC_ASYNC_VA
1902: */
1.19 deraadt 1903:
1.1 deraadt 1904: #if AC_SYNC_ERR + 4 != AC_SYNC_VA || \
1905: AC_SYNC_ERR + 8 != AC_ASYNC_ERR || AC_SYNC_ERR + 12 != AC_ASYNC_VA
1906: help help help ! I, I, I wanna be a lifeguard
1907: #endif
1908: set AC_SYNC_ERR, %o0
1909: std %g2, [%sp + CCFSZ + 24] ! save g2, g3
1910: lda [%o0] ASI_CONTROL, %o1 ! sync err reg
1911: inc 4, %o0
1912: std %g4, [%sp + CCFSZ + 32] ! (sneak g4,g5 in here)
1913: lda [%o0] ASI_CONTROL, %o2 ! sync virt addr
1914: btst SER_MEMERR, %o1 ! memory error?
1915: std %g6, [%sp + CCFSZ + 40]
1916: bz,a normal_mem_fault ! no, just a regular fault
1917: wr %l0, PSR_ET, %psr ! (and reenable traps)
1918:
1919: /*
1920: * We got a synchronous memory error. It could be one that
1921: * happened because there were two stores in a row, and the
1922: * first went into the write buffer, and the second caused this
1923: * synchronous trap; so there could now be a pending async error.
1924: * This is in fact the case iff the two va's differ.
1925: */
1926: inc 4, %o0
1927: lda [%o0] ASI_CONTROL, %o3 ! async err reg
1928: inc 4, %o0
1929: lda [%o0] ASI_CONTROL, %o4 ! async virt addr
1930: cmp %o2, %o4
1931: be,a 1f ! no, not an async err
1932: wr %l0, PSR_ET, %psr ! (and reenable traps)
1933:
1934: /*
1935: * Handle the async error; ignore the sync error for now
1936: * (we may end up getting it again, but so what?).
1937: * This code is essentially the same as that at `nmi' below,
1938: * but the register usage is different and we cannot merge.
1939: */
1.62 pk 1940: sethi %hi(INTRREG_VA), %l5 ! ienab_bic(IE_ALLIE);
1941: ldub [%l5 + %lo(INTRREG_VA)], %o0
1.1 deraadt 1942: andn %o0, IE_ALLIE, %o0
1.62 pk 1943: stb %o0, [%l5 + %lo(INTRREG_VA)]
1.1 deraadt 1944:
1945: /*
1946: * Now reenable traps and call C code.
1947: * %o1 through %o4 still hold the error reg contents.
1948: * If memerr() returns, return from the trap.
1949: */
1950: wr %l0, PSR_ET, %psr
1.111 pk 1951: call _C_LABEL(memerr4_4c) ! memerr(0, ser, sva, aer, ava)
1.1 deraadt 1952: clr %o0
1953:
1954: ld [%sp + CCFSZ + 20], %g1 ! restore g1 through g7
1955: wr %l0, 0, %psr ! and disable traps, 3 instr delay
1956: ldd [%sp + CCFSZ + 24], %g2
1957: ldd [%sp + CCFSZ + 32], %g4
1958: ldd [%sp + CCFSZ + 40], %g6
1959: /* now safe to set IE_ALLIE again */
1.62 pk 1960: ldub [%l5 + %lo(INTRREG_VA)], %o1
1.1 deraadt 1961: or %o1, IE_ALLIE, %o1
1.62 pk 1962: stb %o1, [%l5 + %lo(INTRREG_VA)]
1.1 deraadt 1963: b return_from_trap
1964: wr %l4, 0, %y ! restore y
1965:
1966: /*
1967: * Trap was a synchronous memory error.
1968: * %o1 through %o4 still hold the error reg contents.
1969: */
1970: 1:
1.111 pk 1971: call _C_LABEL(memerr4_4c) ! memerr(1, ser, sva, aer, ava)
1.1 deraadt 1972: mov 1, %o0
1973:
1974: ld [%sp + CCFSZ + 20], %g1 ! restore g1 through g7
1975: ldd [%sp + CCFSZ + 24], %g2
1976: ldd [%sp + CCFSZ + 32], %g4
1977: ldd [%sp + CCFSZ + 40], %g6
1978: wr %l4, 0, %y ! restore y
1979: b return_from_trap
1980: wr %l0, 0, %psr
1981: /* NOTREACHED */
1.52 pk 1982: #endif /* SUN4C */
1983:
1984: #if defined(SUN4M)
1985: memfault_sun4m:
1.94 pk 1986: sethi %hi(CPUINFO_VA), %l4
1987: ld [%l4 + %lo(CPUINFO_VA+CPUINFO_GETSYNCFLT)], %l5
1988: jmpl %l5, %l7
1989: or %l4, %lo(CPUINFO_SYNCFLTDUMP), %l4
1.52 pk 1990: TRAP_SETUP(-CCFSZ-80)
1.111 pk 1991: INCR(_C_LABEL(uvmexp)+V_FAULTS) ! cnt.v_faults++ (clobbers %o0,%o1)
1.52 pk 1992:
1993: st %g1, [%sp + CCFSZ + 20] ! save g1
1994: rd %y, %l4 ! save y
1995:
1996: std %g2, [%sp + CCFSZ + 24] ! save g2, g3
1.62 pk 1997: std %g4, [%sp + CCFSZ + 32] ! save g4, g5
1.94 pk 1998: std %g6, [%sp + CCFSZ + 40] ! sneak in g6, g7
1.52 pk 1999:
1.94 pk 2000: ! retrieve sync fault status/address
2001: sethi %hi(CPUINFO_VA+CPUINFO_SYNCFLTDUMP), %o0
2002: ld [%o0 + %lo(CPUINFO_VA+CPUINFO_SYNCFLTDUMP)], %o1
2003: ld [%o0 + %lo(CPUINFO_VA+CPUINFO_SYNCFLTDUMP+4)], %o2
1.52 pk 2004:
2005: wr %l0, PSR_ET, %psr ! reenable traps
2006:
2007: /* Finish stackframe, call C trap handler */
2008: std %l0, [%sp + CCFSZ + 0] ! set tf.tf_psr, tf.tf_pc
2009: mov %l3, %o0 ! (argument: type)
2010: st %l2, [%sp + CCFSZ + 8] ! set tf.tf_npc
2011: st %l4, [%sp + CCFSZ + 12] ! set tf.tf_y
2012: std %i0, [%sp + CCFSZ + 48] ! tf.tf_out[0], etc
2013: std %i2, [%sp + CCFSZ + 56]
2014: std %i4, [%sp + CCFSZ + 64]
2015: std %i6, [%sp + CCFSZ + 72]
1.111 pk 2016: ! mem_access_fault(type,sfsr,sfva,&tf);
2017: call _C_LABEL(mem_access_fault4m)
1.94 pk 2018: add %sp, CCFSZ, %o3 ! (argument: &tf)
1.52 pk 2019:
2020: ldd [%sp + CCFSZ + 0], %l0 ! load new values
2021: ldd [%sp + CCFSZ + 8], %l2
2022: wr %l3, 0, %y
2023: ld [%sp + CCFSZ + 20], %g1
2024: ldd [%sp + CCFSZ + 24], %g2
2025: ldd [%sp + CCFSZ + 32], %g4
2026: ldd [%sp + CCFSZ + 40], %g6
2027: ldd [%sp + CCFSZ + 48], %i0
2028: ldd [%sp + CCFSZ + 56], %i2
2029: ldd [%sp + CCFSZ + 64], %i4
2030: ldd [%sp + CCFSZ + 72], %i6
2031:
2032: b return_from_trap ! go return
2033: wr %l0, 0, %psr ! (but first disable traps again)
2034: #endif /* SUN4M */
1.1 deraadt 2035:
2036: normal_mem_fault:
2037: /*
2038: * Trap was some other error; call C code to deal with it.
2039: * Must finish trap frame (psr,pc,npc,%y,%o0..%o7) in case
2040: * we decide to deliver a signal or ptrace the process.
2041: * %g1..%g7 were already set up above.
2042: */
2043: std %l0, [%sp + CCFSZ + 0] ! set tf.tf_psr, tf.tf_pc
2044: mov %l3, %o0 ! (argument: type)
2045: st %l2, [%sp + CCFSZ + 8] ! set tf.tf_npc
2046: st %l4, [%sp + CCFSZ + 12] ! set tf.tf_y
2047: mov %l1, %o3 ! (argument: pc)
2048: std %i0, [%sp + CCFSZ + 48] ! tf.tf_out[0], etc
2049: std %i2, [%sp + CCFSZ + 56]
2050: mov %l0, %o4 ! (argument: psr)
2051: std %i4, [%sp + CCFSZ + 64]
2052: std %i6, [%sp + CCFSZ + 72]
1.111 pk 2053: call _C_LABEL(mem_access_fault)! mem_access_fault(type, ser, sva,
1.1 deraadt 2054: ! pc, psr, &tf);
2055: add %sp, CCFSZ, %o5 ! (argument: &tf)
2056:
2057: ldd [%sp + CCFSZ + 0], %l0 ! load new values
2058: ldd [%sp + CCFSZ + 8], %l2
2059: wr %l3, 0, %y
2060: ld [%sp + CCFSZ + 20], %g1
2061: ldd [%sp + CCFSZ + 24], %g2
2062: ldd [%sp + CCFSZ + 32], %g4
2063: ldd [%sp + CCFSZ + 40], %g6
2064: ldd [%sp + CCFSZ + 48], %i0
2065: ldd [%sp + CCFSZ + 56], %i2
2066: ldd [%sp + CCFSZ + 64], %i4
2067: ldd [%sp + CCFSZ + 72], %i6
2068:
2069: b return_from_trap ! go return
2070: wr %l0, 0, %psr ! (but first disable traps again)
2071:
1.208 pk 2072: illinst4m:
2073: /*
2074: * Cypress CPUs like to generate an Illegal Instruction trap
2075: * for FLUSH instructions. Since we turn FLUSHes into no-ops
2076: * (see also trap.c/emul.c), we check for this case here in
2077: * the trap window, saving the overhead of a slow trap.
2078: *
2079: * We have to be careful not to incur a trap while probing
2080: * for the instruction in user space. Use the Inhibit Fault
2081: * bit in the PCR register to prevent that.
2082: */
2083:
2084: btst PSR_PS, %l0 ! slowtrap() if from kernel
2085: bnz slowtrap
2086: EMPTY
2087:
2088: ! clear fault status
2089: set SRMMU_SFSR, %l7
2090: lda [%l7]ASI_SRMMU, %g0
2091:
2092: ! turn on the fault inhibit in PCR
2093: !set SRMMU_PCR, reg - SRMMU_PCR == 0, so use %g0
2094: lda [%g0]ASI_SRMMU, %l4
2095: or %l4, SRMMU_PCR_NF, %l5
2096: sta %l5, [%g0]ASI_SRMMU
2097:
2098: ! load the insn word as if user insn fetch
2099: lda [%l1]ASI_USERI, %l5
2100:
2101: sta %l4, [%g0]ASI_SRMMU ! restore PCR
2102:
2103: ! check fault status; if we have a fault, take a regular trap
2104: set SRMMU_SFAR, %l6
2105: lda [%l6]ASI_SRMMU, %g0 ! fault VA; must be read first
2106: lda [%l7]ASI_SRMMU, %l6 ! fault status
2107: andcc %l6, SFSR_FAV, %l6 ! get fault status bits
2108: bnz slowtrap
2109: EMPTY
2110:
2111: ! we got the insn; check whether it was a FLUSH
2112: ! instruction format: op=2, op3=0x3b (see also instr.h)
2113: set ((3 << 30) | (0x3f << 19)), %l7 ! extract op & op3 fields
2114: and %l5, %l7, %l6
2115: set ((2 << 30) | (0x3b << 19)), %l7 ! any FLUSH opcode
2116: cmp %l6, %l7
2117: bne slowtrap
2118: nop
2119:
2120: mov %l2, %l1 ! ADVANCE <pc,npc>
2121: mov %l0, %psr ! and return from trap
2122: add %l2, 4, %l2
2123: RETT
2124:
1.1 deraadt 2125:
2126: /*
2127: * fp_exception has to check to see if we are trying to save
2128: * the FP state, and if so, continue to save the FP state.
2129: *
2130: * We do not even bother checking to see if we were in kernel mode,
2131: * since users have no access to the special_fp_store instruction.
2132: *
2133: * This whole idea was stolen from Sprite.
2134: */
2135: fp_exception:
2136: set special_fp_store, %l4 ! see if we came from the special one
2137: cmp %l1, %l4 ! pc == special_fp_store?
2138: bne slowtrap ! no, go handle per usual
2139: EMPTY
2140: sethi %hi(savefpcont), %l4 ! yes, "return" to the special code
2141: or %lo(savefpcont), %l4, %l4
2142: jmp %l4
2143: rett %l4 + 4
2144:
2145: /*
2146: * slowtrap() builds a trap frame and calls trap().
2147: * This is called `slowtrap' because it *is*....
2148: * We have to build a full frame for ptrace(), for instance.
2149: *
2150: * Registers:
2151: * %l0 = %psr
2152: * %l1 = return pc
2153: * %l2 = return npc
2154: * %l3 = trap code
2155: */
2156: slowtrap:
2157: TRAP_SETUP(-CCFSZ-80)
2158: /*
2159: * Phew, ready to enable traps and call C code.
2160: */
2161: mov %l3, %o0 ! put type in %o0 for later
2162: Lslowtrap_reenter:
2163: wr %l0, PSR_ET, %psr ! traps on again
2164: std %l0, [%sp + CCFSZ] ! tf.tf_psr = psr; tf.tf_pc = ret_pc;
2165: rd %y, %l3
2166: std %l2, [%sp + CCFSZ + 8] ! tf.tf_npc = return_npc; tf.tf_y = %y;
2167: st %g1, [%sp + CCFSZ + 20]
2168: std %g2, [%sp + CCFSZ + 24]
2169: std %g4, [%sp + CCFSZ + 32]
2170: std %g6, [%sp + CCFSZ + 40]
2171: std %i0, [%sp + CCFSZ + 48]
2172: mov %l0, %o1 ! (psr)
2173: std %i2, [%sp + CCFSZ + 56]
2174: mov %l1, %o2 ! (pc)
2175: std %i4, [%sp + CCFSZ + 64]
2176: add %sp, CCFSZ, %o3 ! (&tf)
1.111 pk 2177: call _C_LABEL(trap) ! trap(type, psr, pc, &tf)
1.1 deraadt 2178: std %i6, [%sp + CCFSZ + 72]
2179:
2180: ldd [%sp + CCFSZ], %l0 ! load new values
2181: ldd [%sp + CCFSZ + 8], %l2
2182: wr %l3, 0, %y
2183: ld [%sp + CCFSZ + 20], %g1
2184: ldd [%sp + CCFSZ + 24], %g2
2185: ldd [%sp + CCFSZ + 32], %g4
2186: ldd [%sp + CCFSZ + 40], %g6
2187: ldd [%sp + CCFSZ + 48], %i0
2188: ldd [%sp + CCFSZ + 56], %i2
2189: ldd [%sp + CCFSZ + 64], %i4
2190: ldd [%sp + CCFSZ + 72], %i6
2191: b return_from_trap
2192: wr %l0, 0, %psr
2193:
2194: /*
2195: * Do a `software' trap by re-entering the trap code, possibly first
2196: * switching from interrupt stack to kernel stack. This is used for
2197: * scheduling and signal ASTs (which generally occur from softclock or
2198: * tty or net interrupts) and register window saves (which might occur
2199: * from anywhere).
2200: *
2201: * The current window is the trap window, and it is by definition clean.
2202: * We enter with the trap type in %o0. All we have to do is jump to
2203: * Lslowtrap_reenter above, but maybe after switching stacks....
2204: */
2205: softtrap:
1.142 mrg 2206: #if defined(MULTIPROCESSOR)
1.97 pk 2207: /*
2208: * The interrupt stack is not at a fixed location
2209: * and %sp must be checked against both ends.
2210: */
1.173 pk 2211: sethi %hi(_EINTSTACKP), %l6
2212: ld [%l6 + %lo(_EINTSTACKP)], %l7
1.97 pk 2213: cmp %sp, %l7
2214: bge Lslowtrap_reenter
2215: EMPTY
2216: set INT_STACK_SIZE, %l6
2217: sub %l7, %l6, %l7
2218: cmp %sp, %l7
2219: blu Lslowtrap_reenter
2220: EMPTY
2221: #else
1.111 pk 2222: sethi %hi(_C_LABEL(eintstack)), %l7
1.1 deraadt 2223: cmp %sp, %l7
2224: bge Lslowtrap_reenter
2225: EMPTY
1.97 pk 2226: #endif
1.111 pk 2227: sethi %hi(cpcb), %l6
2228: ld [%l6 + %lo(cpcb)], %l6
1.13 deraadt 2229: set USPACE-CCFSZ-80, %l5
1.1 deraadt 2230: add %l6, %l5, %l7
2231: SET_SP_REDZONE(%l6, %l5)
2232: b Lslowtrap_reenter
2233: mov %l7, %sp
2234:
2235: #ifdef KGDB
2236: /*
2237: * bpt is entered on all breakpoint traps.
2238: * If this is a kernel breakpoint, we do not want to call trap().
2239: * Among other reasons, this way we can set breakpoints in trap().
2240: */
2241: bpt:
2242: btst PSR_PS, %l0 ! breakpoint from kernel?
2243: bz slowtrap ! no, go do regular trap
2244: nop
2245:
1.137 mrg 2246: /* XXXSMP */
1.1 deraadt 2247: /*
2248: * Build a trap frame for kgdb_trap_glue to copy.
2249: * Enable traps but set ipl high so that we will not
2250: * see interrupts from within breakpoints.
2251: */
2252: TRAP_SETUP(-CCFSZ-80)
2253: or %l0, PSR_PIL, %l4 ! splhigh()
2254: wr %l4, 0, %psr ! the manual claims that this
2255: wr %l4, PSR_ET, %psr ! song and dance is necessary
2256: std %l0, [%sp + CCFSZ + 0] ! tf.tf_psr, tf.tf_pc
2257: mov %l3, %o0 ! trap type arg for kgdb_trap_glue
2258: rd %y, %l3
2259: std %l2, [%sp + CCFSZ + 8] ! tf.tf_npc, tf.tf_y
2260: rd %wim, %l3
2261: st %l3, [%sp + CCFSZ + 16] ! tf.tf_wim (a kgdb-only r/o field)
2262: st %g1, [%sp + CCFSZ + 20] ! tf.tf_global[1]
2263: std %g2, [%sp + CCFSZ + 24] ! etc
2264: std %g4, [%sp + CCFSZ + 32]
2265: std %g6, [%sp + CCFSZ + 40]
2266: std %i0, [%sp + CCFSZ + 48] ! tf.tf_in[0..1]
2267: std %i2, [%sp + CCFSZ + 56] ! etc
2268: std %i4, [%sp + CCFSZ + 64]
2269: std %i6, [%sp + CCFSZ + 72]
2270:
2271: /*
2272: * Now call kgdb_trap_glue(); if it returns, call trap().
2273: */
2274: mov %o0, %l3 ! gotta save trap type
1.111 pk 2275: call _C_LABEL(kgdb_trap_glue)! kgdb_trap_glue(type, &trapframe)
1.1 deraadt 2276: add %sp, CCFSZ, %o1 ! (&trapframe)
2277:
2278: /*
2279: * Use slowtrap to call trap---but first erase our tracks
2280: * (put the registers back the way they were).
2281: */
2282: mov %l3, %o0 ! slowtrap will need trap type
2283: ld [%sp + CCFSZ + 12], %l3
2284: wr %l3, 0, %y
2285: ld [%sp + CCFSZ + 20], %g1
2286: ldd [%sp + CCFSZ + 24], %g2
2287: ldd [%sp + CCFSZ + 32], %g4
2288: b Lslowtrap_reenter
2289: ldd [%sp + CCFSZ + 40], %g6
2290:
2291: /*
2292: * Enter kernel breakpoint. Write all the windows (not including the
2293: * current window) into the stack, so that backtrace works. Copy the
2294: * supplied trap frame to the kgdb stack and switch stacks.
2295: *
2296: * kgdb_trap_glue(type, tf0)
2297: * int type;
2298: * struct trapframe *tf0;
2299: */
1.111 pk 2300: _ENTRY(_C_LABEL(kgdb_trap_glue))
1.1 deraadt 2301: save %sp, -CCFSZ, %sp
2302:
1.111 pk 2303: call _C_LABEL(write_all_windows)
1.1 deraadt 2304: mov %sp, %l4 ! %l4 = current %sp
2305:
2306: /* copy trapframe to top of kgdb stack */
1.127 pk 2307: set _C_LABEL(kgdb_stack) + KGDB_STACK_SIZE - 80, %l0
1.1 deraadt 2308: ! %l0 = tfcopy -> end_of_kgdb_stack
2309: mov 80, %l1
2310: 1: ldd [%i1], %l2
2311: inc 8, %i1
2312: deccc 8, %l1
2313: std %l2, [%l0]
2314: bg 1b
2315: inc 8, %l0
2316:
2317: #ifdef DEBUG
2318: /* save old red zone and then turn it off */
2319: sethi %hi(_redzone), %l7
2320: ld [%l7 + %lo(_redzone)], %l6
2321: st %g0, [%l7 + %lo(_redzone)]
2322: #endif
2323: /* switch to kgdb stack */
2324: add %l0, -CCFSZ-80, %sp
2325:
2326: /* if (kgdb_trap(type, tfcopy)) kgdb_rett(tfcopy); */
2327: mov %i0, %o0
1.111 pk 2328: call _C_LABEL(kgdb_trap)
1.1 deraadt 2329: add %l0, -80, %o1
2330: tst %o0
2331: bnz,a kgdb_rett
2332: add %l0, -80, %g1
2333:
2334: /*
2335: * kgdb_trap() did not handle the trap at all so the stack is
2336: * still intact. A simple `restore' will put everything back,
2337: * after we reset the stack pointer.
2338: */
2339: mov %l4, %sp
2340: #ifdef DEBUG
2341: st %l6, [%l7 + %lo(_redzone)] ! restore red zone
2342: #endif
2343: ret
2344: restore
2345:
2346: /*
2347: * Return from kgdb trap. This is sort of special.
2348: *
2349: * We know that kgdb_trap_glue wrote the window above it, so that we will
2350: * be able to (and are sure to have to) load it up. We also know that we
2351: * came from kernel land and can assume that the %fp (%i6) we load here
2352: * is proper. We must also be sure not to lower ipl (it is at splhigh())
2353: * until we have traps disabled, due to the SPARC taking traps at the
2354: * new ipl before noticing that PSR_ET has been turned off. We are on
2355: * the kgdb stack, so this could be disastrous.
2356: *
2357: * Note that the trapframe argument in %g1 points into the current stack
2358: * frame (current window). We abandon this window when we move %g1->tf_psr
2359: * into %psr, but we will not have loaded the new %sp yet, so again traps
2360: * must be disabled.
2361: */
2362: kgdb_rett:
2363: rd %psr, %g4 ! turn off traps
2364: wr %g4, PSR_ET, %psr
2365: /* use the three-instruction delay to do something useful */
2366: ld [%g1], %g2 ! pick up new %psr
2367: ld [%g1 + 12], %g3 ! set %y
2368: wr %g3, 0, %y
2369: #ifdef DEBUG
2370: st %l6, [%l7 + %lo(_redzone)] ! and restore red zone
2371: #endif
2372: wr %g0, 0, %wim ! enable window changes
2373: nop; nop; nop
2374: /* now safe to set the new psr (changes CWP, leaves traps disabled) */
2375: wr %g2, 0, %psr ! set rett psr (including cond codes)
2376: /* 3 instruction delay before we can use the new window */
2377: /*1*/ ldd [%g1 + 24], %g2 ! set new %g2, %g3
2378: /*2*/ ldd [%g1 + 32], %g4 ! set new %g4, %g5
2379: /*3*/ ldd [%g1 + 40], %g6 ! set new %g6, %g7
2380:
2381: /* now we can use the new window */
2382: mov %g1, %l4
2383: ld [%l4 + 4], %l1 ! get new pc
2384: ld [%l4 + 8], %l2 ! get new npc
2385: ld [%l4 + 20], %g1 ! set new %g1
2386:
2387: /* set up returnee's out registers, including its %sp */
2388: ldd [%l4 + 48], %i0
2389: ldd [%l4 + 56], %i2
2390: ldd [%l4 + 64], %i4
2391: ldd [%l4 + 72], %i6
2392:
2393: /* load returnee's window, making the window above it be invalid */
2394: restore
2395: restore %g0, 1, %l1 ! move to inval window and set %l1 = 1
2396: rd %psr, %l0
2397: sll %l1, %l0, %l1
2398: wr %l1, 0, %wim ! %wim = 1 << (%psr & 31)
1.111 pk 2399: sethi %hi(cpcb), %l1
2400: ld [%l1 + %lo(cpcb)], %l1
1.1 deraadt 2401: and %l0, 31, %l0 ! CWP = %psr & 31;
2402: st %l0, [%l1 + PCB_WIM] ! cpcb->pcb_wim = CWP;
2403: save %g0, %g0, %g0 ! back to window to reload
2404: LOADWIN(%sp)
2405: save %g0, %g0, %g0 ! back to trap window
2406: /* note, we have not altered condition codes; safe to just rett */
2407: RETT
2408: #endif
2409:
2410: /*
2411: * syscall() builds a trap frame and calls syscall().
2412: * sun_syscall is same but delivers sun system call number
2413: * XXX should not have to save&reload ALL the registers just for
2414: * ptrace...
2415: */
1.122 christos 2416: _C_LABEL(_syscall):
1.1 deraadt 2417: TRAP_SETUP(-CCFSZ-80)
1.173 pk 2418: #ifdef DEBUG
2419: or %g1, 0x1000, %l6 ! mark syscall
2420: TRAP_TRACE(%l6,%l5)
2421: #endif
1.1 deraadt 2422: wr %l0, PSR_ET, %psr
2423: std %l0, [%sp + CCFSZ + 0] ! tf_psr, tf_pc
2424: rd %y, %l3
2425: std %l2, [%sp + CCFSZ + 8] ! tf_npc, tf_y
2426: st %g1, [%sp + CCFSZ + 20] ! tf_g[1]
2427: std %g2, [%sp + CCFSZ + 24] ! tf_g[2], tf_g[3]
2428: std %g4, [%sp + CCFSZ + 32] ! etc
2429: std %g6, [%sp + CCFSZ + 40]
2430: mov %g1, %o0 ! (code)
2431: std %i0, [%sp + CCFSZ + 48]
2432: add %sp, CCFSZ, %o1 ! (&tf)
2433: std %i2, [%sp + CCFSZ + 56]
2434: mov %l1, %o2 ! (pc)
2435: std %i4, [%sp + CCFSZ + 64]
1.111 pk 2436: call _C_LABEL(syscall) ! syscall(code, &tf, pc, suncompat)
1.1 deraadt 2437: std %i6, [%sp + CCFSZ + 72]
2438: ! now load em all up again, sigh
2439: ldd [%sp + CCFSZ + 0], %l0 ! new %psr, new pc
2440: ldd [%sp + CCFSZ + 8], %l2 ! new npc, new %y
2441: wr %l3, 0, %y
1.51 pk 2442: /* see `proc_trampoline' for the reason for this label */
2443: return_from_syscall:
1.1 deraadt 2444: ld [%sp + CCFSZ + 20], %g1
2445: ldd [%sp + CCFSZ + 24], %g2
2446: ldd [%sp + CCFSZ + 32], %g4
2447: ldd [%sp + CCFSZ + 40], %g6
2448: ldd [%sp + CCFSZ + 48], %i0
2449: ldd [%sp + CCFSZ + 56], %i2
2450: ldd [%sp + CCFSZ + 64], %i4
2451: ldd [%sp + CCFSZ + 72], %i6
2452: b return_from_trap
2453: wr %l0, 0, %psr
2454:
2455: /*
2456: * Interrupts. Software interrupts must be cleared from the software
2457: * interrupt enable register. Rather than calling ienab_bic for each,
2458: * we do them in-line before enabling traps.
2459: *
2460: * After preliminary setup work, the interrupt is passed to each
2461: * registered handler in turn. These are expected to return nonzero if
2462: * they took care of the interrupt. If a handler claims the interrupt,
2463: * we exit (hardware interrupts are latched in the requestor so we'll
2464: * just take another interrupt in the unlikely event of simultaneous
2465: * interrupts from two different devices at the same level). If we go
2466: * through all the registered handlers and no one claims it, we report a
2467: * stray interrupt. This is more or less done as:
2468: *
2469: * for (ih = intrhand[intlev]; ih; ih = ih->ih_next)
2470: * if ((*ih->ih_fun)(ih->ih_arg ? ih->ih_arg : &frame))
2471: * return;
2472: * strayintr(&frame);
2473: *
2474: * Software interrupts are almost the same with three exceptions:
2475: * (1) we clear the interrupt from the software interrupt enable
2476: * register before calling any handler (we have to clear it first
2477: * to avoid an interrupt-losing race),
2478: * (2) we always call all the registered handlers (there is no way
2479: * to tell if the single bit in the software interrupt register
2480: * represents one or many requests)
2481: * (3) we never announce a stray interrupt (because of (1), another
2482: * interrupt request can come in while we're in the handler. If
1.52 pk 2483: * the handler deals with everything for both the original & the
1.1 deraadt 2484: * new request, we'll erroneously report a stray interrupt when
2485: * we take the software interrupt for the new request.
2486: *
2487: * Inputs:
2488: * %l0 = %psr
2489: * %l1 = return pc
2490: * %l2 = return npc
2491: * %l3 = interrupt level
2492: * (software interrupt only) %l4 = bits to clear in interrupt register
2493: *
2494: * Internal:
2495: * %l4, %l5: local variables
2496: * %l6 = %y
2497: * %l7 = %g1
2498: * %g2..%g7 go to stack
2499: *
2500: * An interrupt frame is built in the space for a full trapframe;
2501: * this contains the psr, pc, npc, and interrupt level.
2502: */
1.52 pk 2503: softintr_sun44c:
1.215 ! pk 2504: /*
! 2505: * Entry point for level 1, 4 or 6 interrupts on sun4/sun4c
! 2506: * which may be software interrupts. Check the interrupt
! 2507: * register to see whether we're dealing software or hardware
! 2508: * interrupt.
! 2509: */
1.62 pk 2510: sethi %hi(INTRREG_VA), %l6
2511: ldub [%l6 + %lo(INTRREG_VA)], %l5
1.215 ! pk 2512: btst %l5, %l4 ! is IE_L{1,4,6} set?
! 2513: bz sparc_interrupt44c ! if not, must be a hw intr
! 2514: andn %l5, %l4, %l5 ! clear soft intr bit
1.62 pk 2515: stb %l5, [%l6 + %lo(INTRREG_VA)]
1.52 pk 2516:
2517: softintr_common:
1.1 deraadt 2518: INTR_SETUP(-CCFSZ-80)
2519: std %g2, [%sp + CCFSZ + 24] ! save registers
1.181 uwe 2520: INCR(_C_LABEL(uvmexp)+V_SOFT) ! cnt.v_intr++; (clobbers %o0,%o1)
1.1 deraadt 2521: mov %g1, %l7
2522: rd %y, %l6
2523: std %g4, [%sp + CCFSZ + 32]
2524: andn %l0, PSR_PIL, %l4 ! %l4 = psr & ~PSR_PIL |
2525: sll %l3, 8, %l5 ! intlev << IPLSHIFT
2526: std %g6, [%sp + CCFSZ + 40]
2527: or %l5, %l4, %l4 ! ;
2528: wr %l4, 0, %psr ! the manual claims this
2529: wr %l4, PSR_ET, %psr ! song and dance is necessary
2530: std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe
2531: sll %l3, 2, %l5
1.111 pk 2532: set _C_LABEL(intrcnt), %l4 ! intrcnt[intlev]++;
1.1 deraadt 2533: ld [%l4 + %l5], %o0
2534: std %l2, [%sp + CCFSZ + 8]
2535: inc %o0
2536: st %o0, [%l4 + %l5]
1.169 pk 2537: set _C_LABEL(sintrhand), %l4! %l4 = sintrhand[intlev];
1.1 deraadt 2538: ld [%l4 + %l5], %l4
1.175 pk 2539:
2540: #if defined(MULTIPROCESSOR)
1.177 pk 2541: /* Grab the kernel lock for interrupt levels <= IPL_CLOCK */
2542: cmp %l3, IPL_CLOCK
1.183 pk 2543: bgeu 3f
1.177 pk 2544: st %fp, [%sp + CCFSZ + 16]
1.175 pk 2545: call _C_LABEL(intr_lock_kernel)
2546: nop
2547: #endif
2548:
1.1 deraadt 2549: b 3f
2550: st %fp, [%sp + CCFSZ + 16]
2551:
1.166 pk 2552: 1: ld [%l4 + 12], %o2 ! ih->ih_classipl
2553: rd %psr, %o3 ! (bits already shifted to PIL field)
2554: andn %o3, PSR_PIL, %o3 ! %o3 = psr & ~PSR_PIL
2555: wr %o3, %o2, %psr ! splraise(ih->ih_classipl)
2556: ld [%l4], %o1
1.1 deraadt 2557: ld [%l4 + 4], %o0
1.166 pk 2558: nop ! one more isns before touching ICC
1.1 deraadt 2559: tst %o0
2560: bz,a 2f
2561: add %sp, CCFSZ, %o0
2562: 2: jmpl %o1, %o7 ! (void)(*ih->ih_fun)(...)
2563: ld [%l4 + 8], %l4 ! and ih = ih->ih_next
2564: 3: tst %l4 ! while ih != NULL
2565: bnz 1b
2566: nop
1.175 pk 2567:
2568: #if defined(MULTIPROCESSOR)
1.177 pk 2569: cmp %l3, IPL_CLOCK
1.183 pk 2570: bgeu 0f
1.175 pk 2571: nop
2572: call _C_LABEL(intr_unlock_kernel)
2573: nop
2574: 0:
2575: #endif
2576:
1.1 deraadt 2577: mov %l7, %g1
2578: wr %l6, 0, %y
2579: ldd [%sp + CCFSZ + 24], %g2
2580: ldd [%sp + CCFSZ + 32], %g4
2581: ldd [%sp + CCFSZ + 40], %g6
2582: b return_from_trap
2583: wr %l0, 0, %psr
2584:
2585: /*
1.52 pk 2586: * _sparc_interrupt{44c,4m} is exported for paranoia checking
2587: * (see intr.c).
1.1 deraadt 2588: */
1.52 pk 2589: #if defined(SUN4M)
1.111 pk 2590: _ENTRY(_C_LABEL(sparc_interrupt4m))
1.149 uwe 2591: #if !defined(MSIIEP) /* "normal" sun4m */
1.201 pk 2592: sethi %hi(CPUINFO_VA), %l6
2593: ld [%l6 + CPUINFO_INTREG], %l7
1.160 uwe 2594: mov 1, %l4
1.201 pk 2595: ld [%l7 + ICR_PI_PEND_OFFSET], %l5 ! get pending interrupts
1.160 uwe 2596: sll %l4, %l3, %l4 ! hw intr bits are in the lower halfword
2597:
2598: btst %l4, %l5 ! has pending hw intr at this level?
2599: bnz sparc_interrupt_common
1.52 pk 2600: nop
2601:
1.160 uwe 2602: ! both softint pending and clear bits are in upper halfwords of
2603: ! their respective registers so shift the test bit in %l4 up there
2604: sll %l4, 16, %l4
1.199 pk 2605:
1.201 pk 2606: st %l4, [%l7 + ICR_PI_CLR_OFFSET] ! ack soft intr
1.199 pk 2607: #if defined(MULTIPROCESSOR)
2608: cmp %l3, 14
2609: be lev14_softint
2610: #endif
1.201 pk 2611: /* Drain hw reg; might be necessary for Ross CPUs */
2612: ld [%l7 + ICR_PI_PEND_OFFSET], %g0
1.199 pk 2613:
1.161 uwe 2614: #ifdef DIAGNOSTIC
1.160 uwe 2615: btst %l4, %l5 ! make sure softint pending bit is set
2616: bnz softintr_common
2617: /* FALLTHROUGH to sparc_interrupt4m_bogus */
2618: #else
2619: b softintr_common
2620: #endif
1.199 pk 2621: nop
1.160 uwe 2622:
1.149 uwe 2623: #else /* MSIIEP */
2624: sethi %hi(MSIIEP_PCIC_VA), %l6
2625: mov 1, %l4
2626: ld [%l6 + PCIC_PROC_IPR_REG], %l5 ! get pending interrupts
1.160 uwe 2627: sll %l4, %l3, %l4 ! hw intr bits are in the lower halfword
2628:
2629: btst %l4, %l5 ! has pending hw intr at this level?
1.149 uwe 2630: bnz sparc_interrupt_common
2631: nop
2632:
1.160 uwe 2633: #ifdef DIAGNOSTIC
2634: ! softint pending bits are in the upper halfword, but softint
2635: ! clear bits are in the lower halfword so we want the bit in %l4
2636: ! kept in the lower half and instead shift pending bits right
2637: srl %l5, 16, %l7
2638: btst %l4, %l7 ! make sure softint pending bit is set
2639: bnz softintr_common
2640: sth %l4, [%l6 + PCIC_SOFT_INTR_CLEAR_REG]
2641: /* FALLTHROUGH to sparc_interrupt4m_bogus */
2642: #else
1.149 uwe 2643: b softintr_common
2644: sth %l4, [%l6 + PCIC_SOFT_INTR_CLEAR_REG]
1.160 uwe 2645: #endif
2646:
1.149 uwe 2647: #endif /* MSIIEP */
1.160 uwe 2648:
2649: #ifdef DIAGNOSTIC
2650: /*
2651: * sparc_interrupt4m detected that neither hardware nor software
2652: * interrupt pending bit is set for this interrupt. Report this
2653: * situation, this is most probably a symptom of a driver bug.
2654: */
2655: sparc_interrupt4m_bogus:
2656: INTR_SETUP(-CCFSZ-80)
2657: std %g2, [%sp + CCFSZ + 24] ! save registers
2658: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
2659: mov %g1, %l7
2660: rd %y, %l6
2661: std %g4, [%sp + CCFSZ + 32]
2662: andn %l0, PSR_PIL, %l4 ! %l4 = psr & ~PSR_PIL |
2663: sll %l3, 8, %l5 ! intlev << IPLSHIFT
2664: std %g6, [%sp + CCFSZ + 40]
2665: or %l5, %l4, %l4 ! ;
2666: wr %l4, 0, %psr ! the manual claims this
2667: wr %l4, PSR_ET, %psr ! song and dance is necessary
2668: std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe
2669: sll %l3, 2, %l5
2670: set _C_LABEL(intrcnt), %l4 ! intrcnt[intlev]++;
2671: ld [%l4 + %l5], %o0
2672: std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe
2673: inc %o0
2674: st %o0, [%l4 + %l5]
2675:
2676: st %fp, [%sp + CCFSZ + 16]
2677:
2678: /* Unhandled interrupts while cold cause IPL to be raised to `high' */
2679: sethi %hi(_C_LABEL(cold)), %o0
2680: ld [%o0 + %lo(_C_LABEL(cold))], %o0
2681: tst %o0 ! if (cold) {
2682: bnz,a 1f ! splhigh();
2683: or %l0, 0xf00, %l0 ! } else
2684:
2685: call _C_LABEL(bogusintr) ! strayintr(&intrframe)
2686: add %sp, CCFSZ, %o0
2687: /* all done: restore registers and go return */
2688: 1:
2689: mov %l7, %g1
2690: wr %l6, 0, %y
2691: ldd [%sp + CCFSZ + 24], %g2
2692: ldd [%sp + CCFSZ + 32], %g4
2693: ldd [%sp + CCFSZ + 40], %g6
2694: b return_from_trap
2695: wr %l0, 0, %psr
2696: #endif /* DIAGNOSTIC */
1.149 uwe 2697: #endif /* SUN4M */
1.52 pk 2698:
1.111 pk 2699: _ENTRY(_C_LABEL(sparc_interrupt44c))
2700: sparc_interrupt_common:
1.1 deraadt 2701: INTR_SETUP(-CCFSZ-80)
2702: std %g2, [%sp + CCFSZ + 24] ! save registers
1.111 pk 2703: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
1.1 deraadt 2704: mov %g1, %l7
2705: rd %y, %l6
2706: std %g4, [%sp + CCFSZ + 32]
2707: andn %l0, PSR_PIL, %l4 ! %l4 = psr & ~PSR_PIL |
2708: sll %l3, 8, %l5 ! intlev << IPLSHIFT
2709: std %g6, [%sp + CCFSZ + 40]
2710: or %l5, %l4, %l4 ! ;
2711: wr %l4, 0, %psr ! the manual claims this
2712: wr %l4, PSR_ET, %psr ! song and dance is necessary
2713: std %l0, [%sp + CCFSZ + 0] ! set up intrframe/clockframe
2714: sll %l3, 2, %l5
1.111 pk 2715: set _C_LABEL(intrcnt), %l4 ! intrcnt[intlev]++;
1.1 deraadt 2716: ld [%l4 + %l5], %o0
2717: std %l2, [%sp + CCFSZ + 8] ! set up intrframe/clockframe
2718: inc %o0
2719: st %o0, [%l4 + %l5]
1.111 pk 2720: set _C_LABEL(intrhand), %l4 ! %l4 = intrhand[intlev];
1.1 deraadt 2721: ld [%l4 + %l5], %l4
1.137 mrg 2722:
1.175 pk 2723: #if defined(MULTIPROCESSOR)
1.177 pk 2724: /* Grab the kernel lock for interrupt levels <= IPL_CLOCK */
2725: cmp %l3, IPL_CLOCK
1.183 pk 2726: bgeu 3f
1.177 pk 2727: st %fp, [%sp + CCFSZ + 16]
1.137 mrg 2728: call _C_LABEL(intr_lock_kernel)
2729: nop
2730: #endif
1.1 deraadt 2731: b 3f
2732: st %fp, [%sp + CCFSZ + 16]
2733:
1.166 pk 2734: 1: ld [%l4 + 12], %o2 ! ih->ih_classipl
2735: rd %psr, %o3 ! (bits already shifted to PIL field)
2736: andn %o3, PSR_PIL, %o3 ! %o3 = psr & ~PSR_PIL
2737: wr %o3, %o2, %psr ! splraise(ih->ih_classipl)
2738: ld [%l4], %o1
1.1 deraadt 2739: ld [%l4 + 4], %o0
1.166 pk 2740: nop ! one more isns before touching ICC
1.1 deraadt 2741: tst %o0
2742: bz,a 2f
2743: add %sp, CCFSZ, %o0
2744: 2: jmpl %o1, %o7 ! handled = (*ih->ih_fun)(...)
2745: ld [%l4 + 8], %l4 ! and ih = ih->ih_next
2746: tst %o0
2747: bnz 4f ! if (handled) break
2748: nop
2749: 3: tst %l4
2750: bnz 1b ! while (ih)
2751: nop
1.76 pk 2752:
2753: /* Unhandled interrupts while cold cause IPL to be raised to `high' */
1.111 pk 2754: sethi %hi(_C_LABEL(cold)), %o0
2755: ld [%o0 + %lo(_C_LABEL(cold))], %o0
1.76 pk 2756: tst %o0 ! if (cold) {
2757: bnz,a 4f ! splhigh();
2758: or %l0, 0xf00, %l0 ! } else
2759:
1.111 pk 2760: call _C_LABEL(strayintr) ! strayintr(&intrframe)
1.1 deraadt 2761: add %sp, CCFSZ, %o0
2762: /* all done: restore registers and go return */
1.137 mrg 2763: 4:
1.175 pk 2764: #if defined(MULTIPROCESSOR)
1.177 pk 2765: cmp %l3, IPL_CLOCK
1.183 pk 2766: bgeu 0f
1.170 pk 2767: nop
1.137 mrg 2768: call _C_LABEL(intr_unlock_kernel)
2769: nop
1.170 pk 2770: 0:
1.137 mrg 2771: #endif
2772: mov %l7, %g1
1.1 deraadt 2773: wr %l6, 0, %y
2774: ldd [%sp + CCFSZ + 24], %g2
2775: ldd [%sp + CCFSZ + 32], %g4
2776: ldd [%sp + CCFSZ + 40], %g6
2777: b return_from_trap
2778: wr %l0, 0, %psr
2779:
1.199 pk 2780: #if defined(MULTIPROCESSOR)
2781: /*
2782: * Level 14 software interrupt: fast IPI
1.201 pk 2783: * <%l0,%l1,%l2> = <psr, pc, npc>
2784: * %l3 = int level
2785: * %l6 = &cpuinfo
1.199 pk 2786: */
2787: lev14_softint:
1.202 pk 2788: set _C_LABEL(lev14_evcnt), %l7 ! lev14_evcnt.ev_count++;
2789: ldd [%l7 + EV_COUNT], %l4
2790: inccc %l5
2791: addx %l4, %g0, %l4
2792: std %l4, [%l7 + EV_COUNT]
1.199 pk 2793:
2794: ld [%l6 + CPUINFO_XMSG_TRAP], %l7
2795: #ifdef DIAGNOSTIC
2796: tst %l7
2797: bz sparc_interrupt4m_bogus
2798: nop
2799: #endif
2800: jmp %l7
2801: ld [%l6 + CPUINFO_XMSG_ARG0], %l3 ! prefetch 1st arg
2802:
2803: /*
2804: * Fast flush handlers. xcalled from other CPUs throught soft interrupt 14
2805: * On entry: %l6 = CPUINFO_VA
2806: * %l3 = first argument
2807: *
2808: * As always, these fast trap handlers should preserve all registers
2809: * except %l3 to %l7
2810: */
2811: _ENTRY(_C_LABEL(ft_tlb_flush))
2812: ! <%l3 already fetched for us> ! va
2813: ld [%l6 + CPUINFO_XMSG_ARG2], %l5 ! level
2814: andn %l3, 0xfff, %l3 ! %l3 = (va&~0xfff | lvl);
2815: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! context
2816: or %l3, %l5, %l3
2817:
2818: mov SRMMU_CXR, %l7 !
2819: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2820: sta %l4, [%l7]ASI_SRMMU ! set new context
2821:
2822: sta %g0, [%l3]ASI_SRMMUFP ! flush TLB
2823:
2824: ft_rett:
2825: ! common return from Fast Flush handlers
2826: ! enter here with %l5 = ctx to restore, %l6 = CPUINFO_VA, %l7 = ctx reg
2827: mov 1, %l4 !
2828: sta %l5, [%l7]ASI_SRMMU ! restore context
2829: st %l4, [%l6 + CPUINFO_XMSG_CMPLT] ! completed = 1
2830:
2831: mov %l0, %psr ! return from trap
2832: nop
2833: RETT
2834:
2835: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_page))
2836: ! <%l3 already fetched for us> ! va
2837: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! context
2838:
2839: mov SRMMU_CXR, %l7 !
2840: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2841: sta %l4, [%l7]ASI_SRMMU ! set new context
2842:
2843: set 4096, %l4 ! N = page size
2844: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2845: 1:
2846: sta %g0, [%l3]ASI_IDCACHELFP ! flush cache line
2847: subcc %l4, %l7, %l4 ! p += linesz;
1.204 pk 2848: bgu 1b ! while ((N -= linesz) > 0)
1.199 pk 2849: add %l3, %l7, %l3
2850:
2851: ld [%l6 + CPUINFO_XMSG_ARG0], %l3 ! reload va
2852: !or %l3, ASI_SRMMUFP_L3(=0), %l3 ! va |= ASI_SRMMUFP_L3
2853: sta %g0, [%l3]ASI_SRMMUFP ! flush TLB
2854:
2855: b ft_rett
2856: mov SRMMU_CXR, %l7 ! reload ctx register
2857:
2858: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_segment))
2859: ! <%l3 already fetched for us> ! vr
2860: ld [%l6 + CPUINFO_XMSG_ARG1], %l5 ! vs
2861: ld [%l6 + CPUINFO_XMSG_ARG2], %l4 ! context
2862:
2863: sll %l3, 24, %l3 ! va = VSTOVA(vr,vs)
2864: sll %l5, 18, %l5
2865: or %l3, %l5, %l3
2866:
2867: mov SRMMU_CXR, %l7 !
2868: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2869: sta %l4, [%l7]ASI_SRMMU ! set new context
2870:
2871: ld [%l6 + CPUINFO_CACHE_NLINES], %l4
2872: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2873: 1:
2874: sta %g0, [%l3]ASI_IDCACHELFS ! flush cache line
2875: deccc %l4 ! p += linesz;
1.204 pk 2876: bgu 1b ! while (--nlines > 0)
1.199 pk 2877: add %l3, %l7, %l3
2878:
2879: b ft_rett
2880: mov SRMMU_CXR, %l7 ! reload ctx register
2881:
2882: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_region))
2883: ! <%l3 already fetched for us> ! vr
2884: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! context
2885:
2886: sll %l3, 24, %l3 ! va = VRTOVA(vr)
2887:
2888: mov SRMMU_CXR, %l7 !
2889: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2890: sta %l4, [%l7]ASI_SRMMU ! set new context
2891:
2892: ld [%l6 + CPUINFO_CACHE_NLINES], %l4
2893: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2894: 1:
2895: sta %g0, [%l3]ASI_IDCACHELFR ! flush cache line
2896: deccc %l4 ! p += linesz;
1.204 pk 2897: bgu 1b ! while (--nlines > 0)
1.199 pk 2898: add %l3, %l7, %l3
2899:
2900: b ft_rett
2901: mov SRMMU_CXR, %l7 ! reload ctx register
2902:
2903: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_context))
2904: ! <%l3 already fetched for us> ! context
2905:
2906: mov SRMMU_CXR, %l7 !
2907: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2908: sta %l3, [%l7]ASI_SRMMU ! set new context
2909:
2910: ld [%l6 + CPUINFO_CACHE_NLINES], %l4
2911: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2912: mov %g0, %l3 ! va = 0
2913: 1:
2914: sta %g0, [%l3]ASI_IDCACHELFC ! flush cache line
2915: deccc %l4 ! p += linesz;
1.204 pk 2916: bgu 1b ! while (--nlines > 0)
1.199 pk 2917: add %l3, %l7, %l3
2918:
2919: b ft_rett
2920: mov SRMMU_CXR, %l7 ! reload ctx register
2921:
2922: _ENTRY(_C_LABEL(ft_srmmu_vcache_flush_range))
2923: ! <%l3 already fetched for us> ! va
2924: ld [%l6 + CPUINFO_XMSG_ARG2], %l4 ! context
2925:
2926: mov SRMMU_CXR, %l7 !
2927: lda [%l7]ASI_SRMMU, %l5 ! %l5 = old context
2928: sta %l4, [%l7]ASI_SRMMU ! set new context
2929:
2930: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! size
1.204 pk 2931: and %l3, 7, %l7 ! double-word alignment
2932: andn %l3, 7, %l3 ! off = va & 7; va &= ~7
2933: add %l4, %l7, %l4 ! sz += off
2934:
1.199 pk 2935: ld [%l6 + CPUINFO_CACHE_LINESZ], %l7
2936: 1:
2937: sta %g0, [%l3]ASI_IDCACHELFP ! flush cache line
2938: subcc %l4, %l7, %l4 ! p += linesz;
1.204 pk 2939: bgu 1b ! while ((sz -= linesz) > 0)
1.199 pk 2940: add %l3, %l7, %l3
2941:
2942: /* Flush TLB on all pages we visited */
2943: ld [%l6 + CPUINFO_XMSG_ARG0], %l3 ! reload va
2944: ld [%l6 + CPUINFO_XMSG_ARG1], %l4 ! reload sz
2945: add %l3, %l4, %l4 ! %l4 = round_page(va + sz)
2946: add %l4, 0xfff, %l4
2947: andn %l4, 0xfff, %l4
2948: andn %l3, 0xfff, %l3 ! va &= ~PGOFSET;
2949: sub %l4, %l3, %l4 ! and finally: size rounded
2950: ! to page boundary
1.202 pk 2951: set 4096, %l7 ! page size
1.199 pk 2952:
2953: 2:
2954: !or %l3, ASI_SRMMUFP_L3(=0), %l3 ! va |= ASI_SRMMUFP_L3
2955: sta %g0, [%l3]ASI_SRMMUFP ! flush TLB
2956: subcc %l4, %l7, %l4 ! while ((sz -= PGSIZE) > 0)
1.204 pk 2957: bgu 2b
1.199 pk 2958: add %l3, %l7, %l3
2959:
2960: b ft_rett
2961: mov SRMMU_CXR, %l7 ! reload ctx register
2962:
1.206 pk 2963: _ENTRY(_C_LABEL(ft_want_ast))
2964: mov 1, %l4 ! ack xcall in all cases
2965: st %l4, [%l6 + CPUINFO_XMSG_CMPLT] ! completed = 1
2966:
2967: btst PSR_PS, %l0 ! if from user mode
2968: be,a slowtrap ! call trap(T_AST)
2969: mov T_AST, %l3
2970:
2971: mov %l0, %psr ! else return from trap
2972: nop ! AST will be noticed on out way out
2973: RETT
1.199 pk 2974: #endif /* MULTIPROCESSOR */
2975:
1.1 deraadt 2976: #ifdef notyet
2977: /*
2978: * Level 12 (ZS serial) interrupt. Handle it quickly, schedule a
2979: * software interrupt, and get out. Do the software interrupt directly
2980: * if we would just take it on the way out.
2981: *
2982: * Input:
2983: * %l0 = %psr
2984: * %l1 = return pc
2985: * %l2 = return npc
2986: * Internal:
2987: * %l3 = zs device
2988: * %l4, %l5 = temporary
2989: * %l6 = rr3 (or temporary data) + 0x100 => need soft int
2990: * %l7 = zs soft status
2991: */
2992: zshard:
2993: #endif /* notyet */
2994:
2995: /*
2996: * Level 15 interrupt. An async memory error has occurred;
1.212 pooka 2997: * take care of it (typically by panicking, but hey...).
1.1 deraadt 2998: * %l0 = %psr
2999: * %l1 = return pc
3000: * %l2 = return npc
3001: * %l3 = 15 * 4 (why? just because!)
3002: *
3003: * Internal:
3004: * %l4 = %y
3005: * %l5 = %g1
3006: * %l6 = %g6
3007: * %l7 = %g7
3008: * g2, g3, g4, g5 go to stack
3009: *
3010: * This code is almost the same as that in mem_access_fault,
3011: * except that we already know the problem is not a `normal' fault,
3012: * and that we must be extra-careful with interrupt enables.
3013: */
1.52 pk 3014:
3015: #if defined(SUN4)
3016: nmi_sun4:
1.1 deraadt 3017: INTR_SETUP(-CCFSZ-80)
1.111 pk 3018: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
1.1 deraadt 3019: /*
3020: * Level 15 interrupts are nonmaskable, so with traps off,
3021: * disable all interrupts to prevent recursion.
3022: */
1.62 pk 3023: sethi %hi(INTRREG_VA), %o0
3024: ldub [%o0 + %lo(INTRREG_VA)], %o1
1.157 uwe 3025: andn %o1, IE_ALLIE, %o1
1.62 pk 3026: stb %o1, [%o0 + %lo(INTRREG_VA)]
1.1 deraadt 3027: wr %l0, PSR_ET, %psr ! okay, turn traps on again
3028:
3029: std %g2, [%sp + CCFSZ + 0] ! save g2, g3
3030: rd %y, %l4 ! save y
3031:
1.19 deraadt 3032: std %g4, [%sp + CCFSZ + 8] ! save g4, g5
3033: mov %g1, %l5 ! save g1, g6, g7
3034: mov %g6, %l6
3035: mov %g7, %l7
3036: #if defined(SUN4C) || defined(SUN4M)
1.52 pk 3037: b,a nmi_common
1.19 deraadt 3038: #endif /* SUN4C || SUN4M */
1.52 pk 3039: #endif
3040:
3041: #if defined(SUN4C)
3042: nmi_sun4c:
3043: INTR_SETUP(-CCFSZ-80)
1.111 pk 3044: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
1.52 pk 3045: /*
3046: * Level 15 interrupts are nonmaskable, so with traps off,
3047: * disable all interrupts to prevent recursion.
3048: */
1.62 pk 3049: sethi %hi(INTRREG_VA), %o0
3050: ldub [%o0 + %lo(INTRREG_VA)], %o1
1.157 uwe 3051: andn %o1, IE_ALLIE, %o1
1.62 pk 3052: stb %o1, [%o0 + %lo(INTRREG_VA)]
1.52 pk 3053: wr %l0, PSR_ET, %psr ! okay, turn traps on again
3054:
3055: std %g2, [%sp + CCFSZ + 0] ! save g2, g3
3056: rd %y, %l4 ! save y
3057:
3058: ! must read the sync error register too.
1.1 deraadt 3059: set AC_SYNC_ERR, %o0
3060: lda [%o0] ASI_CONTROL, %o1 ! sync err reg
3061: inc 4, %o0
3062: lda [%o0] ASI_CONTROL, %o2 ! sync virt addr
3063: std %g4, [%sp + CCFSZ + 8] ! save g4,g5
3064: mov %g1, %l5 ! save g1,g6,g7
3065: mov %g6, %l6
3066: mov %g7, %l7
3067: inc 4, %o0
3068: lda [%o0] ASI_CONTROL, %o3 ! async err reg
3069: inc 4, %o0
3070: lda [%o0] ASI_CONTROL, %o4 ! async virt addr
1.52 pk 3071: #if defined(SUN4M)
3072: !!b,a nmi_common
3073: #endif /* SUN4M */
3074: #endif /* SUN4C */
3075:
3076: nmi_common:
1.1 deraadt 3077: ! and call C code
1.111 pk 3078: call _C_LABEL(memerr4_4c) ! memerr(0, ser, sva, aer, ava)
1.95 pk 3079: clr %o0
1.1 deraadt 3080:
3081: mov %l5, %g1 ! restore g1 through g7
3082: ldd [%sp + CCFSZ + 0], %g2
3083: ldd [%sp + CCFSZ + 8], %g4
3084: wr %l0, 0, %psr ! re-disable traps
3085: mov %l6, %g6
3086: mov %l7, %g7
3087:
3088: ! set IE_ALLIE again (safe, we disabled traps again above)
1.62 pk 3089: sethi %hi(INTRREG_VA), %o0
3090: ldub [%o0 + %lo(INTRREG_VA)], %o1
1.1 deraadt 3091: or %o1, IE_ALLIE, %o1
1.62 pk 3092: stb %o1, [%o0 + %lo(INTRREG_VA)]
1.1 deraadt 3093: b return_from_trap
3094: wr %l4, 0, %y ! restore y
3095:
1.52 pk 3096: #if defined(SUN4M)
3097: nmi_sun4m:
3098: INTR_SETUP(-CCFSZ-80)
1.111 pk 3099: INCR(_C_LABEL(uvmexp)+V_INTR) ! cnt.v_intr++; (clobbers %o0,%o1)
1.94 pk 3100:
3101: /* Read the Pending Interrupts register */
1.96 pk 3102: sethi %hi(CPUINFO_VA+CPUINFO_INTREG), %l6
3103: ld [%l6 + %lo(CPUINFO_VA+CPUINFO_INTREG)], %l6
3104: ld [%l6 + ICR_PI_PEND_OFFSET], %l5 ! get pending interrupts
3105:
1.111 pk 3106: set _C_LABEL(nmi_soft), %o3 ! assume a softint
1.105 pk 3107: set PINTR_IC, %o1 ! hard lvl 15 bit
3108: sethi %hi(PINTR_SINTRLEV(15)), %o0 ! soft lvl 15 bit
1.94 pk 3109: btst %o0, %l5 ! soft level 15?
1.101 pk 3110: bnz,a 1f !
1.105 pk 3111: mov %o0, %o1 ! shift int clear bit to SOFTINT 15
3112:
1.154 thorpej 3113: set _C_LABEL(nmi_hard), %o3 /* it's a hardint; switch handler */
1.94 pk 3114:
1.52 pk 3115: /*
3116: * Level 15 interrupts are nonmaskable, so with traps off,
3117: * disable all interrupts to prevent recursion.
3118: */
3119: sethi %hi(ICR_SI_SET), %o0
1.101 pk 3120: set SINTR_MA, %o2
3121: st %o2, [%o0 + %lo(ICR_SI_SET)]
1.142 mrg 3122: #if defined(MULTIPROCESSOR) && defined(DDB)
3123: b 2f
3124: clr %o0
3125: #endif
1.52 pk 3126:
1.101 pk 3127: 1:
1.142 mrg 3128: #if defined(MULTIPROCESSOR) && defined(DDB)
3129: /*
3130: * Setup a trapframe for nmi_soft; this might be an IPI telling
3131: * us to pause, so lets save some state for DDB to get at.
3132: */
3133: std %l0, [%sp + CCFSZ] ! tf.tf_psr = psr; tf.tf_pc = ret_pc;
3134: rd %y, %l3
3135: std %l2, [%sp + CCFSZ + 8] ! tf.tf_npc = return_npc; tf.tf_y = %y;
3136: st %g1, [%sp + CCFSZ + 20]
3137: std %g2, [%sp + CCFSZ + 24]
3138: std %g4, [%sp + CCFSZ + 32]
3139: std %g6, [%sp + CCFSZ + 40]
3140: std %i0, [%sp + CCFSZ + 48]
3141: std %i2, [%sp + CCFSZ + 56]
3142: std %i4, [%sp + CCFSZ + 64]
3143: std %i6, [%sp + CCFSZ + 72]
3144: add %sp, CCFSZ, %o0
3145: 2:
3146: #else
3147: clr %o0
3148: #endif
1.105 pk 3149: /*
3150: * Now clear the NMI. Apparently, we must allow some time
3151: * to let the bits sink in..
3152: */
1.96 pk 3153: st %o1, [%l6 + ICR_PI_CLR_OFFSET]
1.105 pk 3154: nop; nop; nop;
3155: ld [%l6 + ICR_PI_PEND_OFFSET], %g0 ! drain register!?
1.172 pk 3156: nop;
1.52 pk 3157:
1.172 pk 3158: or %l0, PSR_PIL, %o4 ! splhigh()
3159: wr %o4, 0, %psr !
3160: wr %o4, PSR_ET, %psr ! turn traps on again
1.52 pk 3161:
1.142 mrg 3162: std %g2, [%sp + CCFSZ + 80] ! save g2, g3
1.52 pk 3163: rd %y, %l4 ! save y
1.142 mrg 3164: std %g4, [%sp + CCFSZ + 88] ! save g4,g5
1.52 pk 3165:
3166: /* Finish stackframe, call C trap handler */
3167: mov %g1, %l5 ! save g1,g6,g7
3168: mov %g6, %l6
3169:
1.142 mrg 3170: jmpl %o3, %o7 ! nmi_hard(0) or nmi_soft(&tf)
3171: mov %g7, %l7
1.105 pk 3172:
1.52 pk 3173: mov %l5, %g1 ! restore g1 through g7
1.142 mrg 3174: ldd [%sp + CCFSZ + 80], %g2
3175: ldd [%sp + CCFSZ + 88], %g4
1.52 pk 3176: wr %l0, 0, %psr ! re-disable traps
3177: mov %l6, %g6
3178: mov %l7, %g7
3179:
1.105 pk 3180: !cmp %o0, 0 ! was this a soft nmi
3181: !be 4f
1.154 thorpej 3182: /* XXX - we need to unblock `mask all ints' only on a hard nmi */
1.101 pk 3183:
1.52 pk 3184: ! enable interrupts again (safe, we disabled traps again above)
3185: sethi %hi(ICR_SI_CLR), %o0
3186: set SINTR_MA, %o1
3187: st %o1, [%o0 + %lo(ICR_SI_CLR)]
3188:
1.101 pk 3189: 4:
1.52 pk 3190: b return_from_trap
3191: wr %l4, 0, %y ! restore y
3192: #endif /* SUN4M */
3193:
3194: #ifdef GPROF
3195: .globl window_of, winof_user
3196: .globl window_uf, winuf_user, winuf_ok, winuf_invalid
3197: .globl return_from_trap, rft_kernel, rft_user, rft_invalid
3198: .globl softtrap, slowtrap
1.122 christos 3199: .globl clean_trap_window, _C_LABEL(_syscall)
1.52 pk 3200: #endif
1.1 deraadt 3201:
3202: /*
3203: * Window overflow trap handler.
3204: * %l0 = %psr
3205: * %l1 = return pc
3206: * %l2 = return npc
3207: */
3208: window_of:
3209: #ifdef TRIVIAL_WINDOW_OVERFLOW_HANDLER
3210: /* a trivial version that assumes %sp is ok */
3211: /* (for testing only!) */
3212: save %g0, %g0, %g0
3213: std %l0, [%sp + (0*8)]
3214: rd %psr, %l0
3215: mov 1, %l1
3216: sll %l1, %l0, %l0
3217: wr %l0, 0, %wim
3218: std %l2, [%sp + (1*8)]
3219: std %l4, [%sp + (2*8)]
3220: std %l6, [%sp + (3*8)]
3221: std %i0, [%sp + (4*8)]
3222: std %i2, [%sp + (5*8)]
3223: std %i4, [%sp + (6*8)]
3224: std %i6, [%sp + (7*8)]
3225: restore
3226: RETT
3227: #else
3228: /*
3229: * This is similar to TRAP_SETUP, but we do not want to spend
3230: * a lot of time, so we have separate paths for kernel and user.
3231: * We also know for sure that the window has overflowed.
3232: */
1.173 pk 3233: TRAP_TRACE2(5,%l6,%l5)
1.1 deraadt 3234: btst PSR_PS, %l0
3235: bz winof_user
3236: sethi %hi(clean_trap_window), %l7
3237:
3238: /*
3239: * Overflow from kernel mode. Call clean_trap_window to
3240: * do the dirty work, then just return, since we know prev
3241: * window is valid. clean_trap_windows might dump all *user*
3242: * windows into the pcb, but we do not care: there is at
3243: * least one kernel window (a trap or interrupt frame!)
3244: * above us.
3245: */
3246: jmpl %l7 + %lo(clean_trap_window), %l4
3247: mov %g7, %l7 ! for clean_trap_window
3248:
3249: wr %l0, 0, %psr ! put back the @%*! cond. codes
3250: nop ! (let them settle in)
3251: RETT
3252:
3253: winof_user:
3254: /*
3255: * Overflow from user mode.
3256: * If clean_trap_window dumps the registers into the pcb,
3257: * rft_user will need to call trap(), so we need space for
3258: * a trap frame. We also have to compute pcb_nw.
3259: *
3260: * SHOULD EXPAND IN LINE TO AVOID BUILDING TRAP FRAME ON
3261: * `EASY' SAVES
3262: */
1.111 pk 3263: sethi %hi(cpcb), %l6
3264: ld [%l6 + %lo(cpcb)], %l6
1.1 deraadt 3265: ld [%l6 + PCB_WIM], %l5
3266: and %l0, 31, %l3
3267: sub %l3, %l5, %l5 /* l5 = CWP - pcb_wim */
3268: set uwtab, %l4
3269: ldub [%l4 + %l5], %l5 /* l5 = uwtab[l5] */
3270: st %l5, [%l6 + PCB_UW]
3271: jmpl %l7 + %lo(clean_trap_window), %l4
3272: mov %g7, %l7 ! for clean_trap_window
1.111 pk 3273: sethi %hi(cpcb), %l6
3274: ld [%l6 + %lo(cpcb)], %l6
1.13 deraadt 3275: set USPACE-CCFSZ-80, %l5
1.1 deraadt 3276: add %l6, %l5, %sp /* over to kernel stack */
3277: CHECK_SP_REDZONE(%l6, %l5)
3278:
3279: /*
3280: * Copy return_from_trap far enough to allow us
3281: * to jump directly to rft_user_or_recover_pcb_windows
3282: * (since we know that is where we are headed).
3283: */
3284: ! and %l0, 31, %l3 ! still set (clean_trap_window
3285: ! leaves this register alone)
3286: set wmask, %l6
3287: ldub [%l6 + %l3], %l5 ! %l5 = 1 << ((CWP + 1) % nwindows)
3288: b rft_user_or_recover_pcb_windows
3289: rd %wim, %l4 ! (read %wim first)
3290: #endif /* end `real' version of window overflow trap handler */
3291:
3292: /*
3293: * Window underflow trap handler.
3294: * %l0 = %psr
3295: * %l1 = return pc
3296: * %l2 = return npc
3297: *
3298: * A picture:
3299: *
3300: * T R I X
3301: * 0 0 0 1 0 0 0 (%wim)
3302: * [bit numbers increase towards the right;
3303: * `restore' moves right & `save' moves left]
3304: *
3305: * T is the current (Trap) window, R is the window that attempted
3306: * a `Restore' instruction, I is the Invalid window, and X is the
3307: * window we want to make invalid before we return.
3308: *
3309: * Since window R is valid, we cannot use rft_user to restore stuff
3310: * for us. We have to duplicate its logic. YUCK.
3311: *
3312: * Incidentally, TRIX are for kids. Silly rabbit!
3313: */
3314: window_uf:
3315: #ifdef TRIVIAL_WINDOW_UNDERFLOW_HANDLER
3316: wr %g0, 0, %wim ! allow us to enter I
3317: restore ! to R
3318: nop
3319: nop
3320: restore ! to I
3321: restore %g0, 1, %l1 ! to X
3322: rd %psr, %l0
3323: sll %l1, %l0, %l0
3324: wr %l0, 0, %wim
3325: save %g0, %g0, %g0 ! back to I
3326: LOADWIN(%sp)
3327: save %g0, %g0, %g0 ! back to R
3328: save %g0, %g0, %g0 ! back to T
3329: RETT
3330: #else
1.173 pk 3331: TRAP_TRACE2(6,%l6,%l5)
1.1 deraadt 3332: wr %g0, 0, %wim ! allow us to enter I
3333: btst PSR_PS, %l0
3334: restore ! enter window R
3335: bz winuf_user
3336: restore ! enter window I
3337:
3338: /*
3339: * Underflow from kernel mode. Just recover the
3340: * registers and go (except that we have to update
3341: * the blasted user pcb fields).
3342: */
3343: restore %g0, 1, %l1 ! enter window X, then set %l1 to 1
3344: rd %psr, %l0 ! cwp = %psr & 31;
3345: and %l0, 31, %l0
3346: sll %l1, %l0, %l1 ! wim = 1 << cwp;
3347: wr %l1, 0, %wim ! setwim(wim);
1.111 pk 3348: sethi %hi(cpcb), %l1
3349: ld [%l1 + %lo(cpcb)], %l1
1.1 deraadt 3350: st %l0, [%l1 + PCB_WIM] ! cpcb->pcb_wim = cwp;
3351: save %g0, %g0, %g0 ! back to window I
3352: LOADWIN(%sp)
3353: save %g0, %g0, %g0 ! back to R
3354: save %g0, %g0, %g0 ! and then to T
3355: wr %l0, 0, %psr ! fix those cond codes....
3356: nop ! (let them settle in)
3357: RETT
3358:
3359: winuf_user:
3360: /*
3361: * Underflow from user mode.
3362: *
3363: * We cannot use rft_user (as noted above) because
3364: * we must re-execute the `restore' instruction.
3365: * Since it could be, e.g., `restore %l0,0,%l0',
3366: * it is not okay to touch R's registers either.
3367: *
3368: * We are now in window I.
3369: */
3370: btst 7, %sp ! if unaligned, it is invalid
3371: bne winuf_invalid
3372: EMPTY
3373:
1.111 pk 3374: sethi %hi(_C_LABEL(pgofset)), %l4
3375: ld [%l4 + %lo(_C_LABEL(pgofset))], %l4
1.62 pk 3376: PTE_OF_ADDR(%sp, %l7, winuf_invalid, %l4, NOP_ON_4M_5)
3377: CMP_PTE_USER_READ(%l7, %l5, NOP_ON_4M_6) ! if first page not readable,
1.1 deraadt 3378: bne winuf_invalid ! it is invalid
3379: EMPTY
1.13 deraadt 3380: SLT_IF_1PAGE_RW(%sp, %l7, %l4) ! first page is readable
1.1 deraadt 3381: bl,a winuf_ok ! if only one page, enter window X
3382: restore %g0, 1, %l1 ! and goto ok, & set %l1 to 1
3383: add %sp, 7*8, %l5
1.13 deraadt 3384: add %l4, 62, %l4
1.62 pk 3385: PTE_OF_ADDR(%l5, %l7, winuf_invalid, %l4, NOP_ON_4M_7)
3386: CMP_PTE_USER_READ(%l7, %l5, NOP_ON_4M_8) ! check second page too
1.1 deraadt 3387: be,a winuf_ok ! enter window X and goto ok
3388: restore %g0, 1, %l1 ! (and then set %l1 to 1)
3389:
3390: winuf_invalid:
3391: /*
3392: * We were unable to restore the window because %sp
3393: * is invalid or paged out. Return to the trap window
3394: * and call trap(T_WINUF). This will save R to the user
3395: * stack, then load both R and I into the pcb rw[] area,
3396: * and return with pcb_nsaved set to -1 for success, 0 for
3397: * failure. `Failure' indicates that someone goofed with the
3398: * trap registers (e.g., signals), so that we need to return
3399: * from the trap as from a syscall (probably to a signal handler)
3400: * and let it retry the restore instruction later. Note that
3401: * window R will have been pushed out to user space, and thus
3402: * be the invalid window, by the time we get back here. (We
3403: * continue to label it R anyway.) We must also set %wim again,
3404: * and set pcb_uw to 1, before enabling traps. (Window R is the
3405: * only window, and it is a user window).
3406: */
3407: save %g0, %g0, %g0 ! back to R
3408: save %g0, 1, %l4 ! back to T, then %l4 = 1
1.111 pk 3409: sethi %hi(cpcb), %l6
3410: ld [%l6 + %lo(cpcb)], %l6
1.1 deraadt 3411: st %l4, [%l6 + PCB_UW] ! pcb_uw = 1
3412: ld [%l6 + PCB_WIM], %l5 ! get log2(%wim)
3413: sll %l4, %l5, %l4 ! %l4 = old %wim
3414: wr %l4, 0, %wim ! window I is now invalid again
1.13 deraadt 3415: set USPACE-CCFSZ-80, %l5
1.1 deraadt 3416: add %l6, %l5, %sp ! get onto kernel stack
3417: CHECK_SP_REDZONE(%l6, %l5)
3418:
3419: /*
3420: * Okay, call trap(T_WINUF, psr, pc, &tf).
3421: * See `slowtrap' above for operation.
3422: */
3423: wr %l0, PSR_ET, %psr
3424: std %l0, [%sp + CCFSZ + 0] ! tf.tf_psr, tf.tf_pc
3425: rd %y, %l3
3426: std %l2, [%sp + CCFSZ + 8] ! tf.tf_npc, tf.tf_y
3427: mov T_WINUF, %o0
3428: st %g1, [%sp + CCFSZ + 20] ! tf.tf_global[1]
3429: mov %l0, %o1
3430: std %g2, [%sp + CCFSZ + 24] ! etc
3431: mov %l1, %o2
3432: std %g4, [%sp + CCFSZ + 32]
3433: add %sp, CCFSZ, %o3
3434: std %g6, [%sp + CCFSZ + 40]
3435: std %i0, [%sp + CCFSZ + 48] ! tf.tf_out[0], etc
3436: std %i2, [%sp + CCFSZ + 56]
3437: std %i4, [%sp + CCFSZ + 64]
1.111 pk 3438: call _C_LABEL(trap) ! trap(T_WINUF, pc, psr, &tf)
1.1 deraadt 3439: std %i6, [%sp + CCFSZ + 72] ! tf.tf_out[6]
3440:
3441: ldd [%sp + CCFSZ + 0], %l0 ! new psr, pc
3442: ldd [%sp + CCFSZ + 8], %l2 ! new npc, %y
3443: wr %l3, 0, %y
3444: ld [%sp + CCFSZ + 20], %g1
3445: ldd [%sp + CCFSZ + 24], %g2
3446: ldd [%sp + CCFSZ + 32], %g4
3447: ldd [%sp + CCFSZ + 40], %g6
3448: ldd [%sp + CCFSZ + 48], %i0 ! %o0 for window R, etc
3449: ldd [%sp + CCFSZ + 56], %i2
3450: ldd [%sp + CCFSZ + 64], %i4
3451: wr %l0, 0, %psr ! disable traps: test must be atomic
3452: ldd [%sp + CCFSZ + 72], %i6
1.111 pk 3453: sethi %hi(cpcb), %l6
3454: ld [%l6 + %lo(cpcb)], %l6
1.1 deraadt 3455: ld [%l6 + PCB_NSAVED], %l7 ! if nsaved is -1, we have our regs
3456: tst %l7
3457: bl,a 1f ! got them
3458: wr %g0, 0, %wim ! allow us to enter windows R, I
3459: b,a return_from_trap
3460:
3461: /*
3462: * Got 'em. Load 'em up.
3463: */
3464: 1:
3465: mov %g6, %l3 ! save %g6; set %g6 = cpcb
3466: mov %l6, %g6
3467: st %g0, [%g6 + PCB_NSAVED] ! and clear magic flag
3468: restore ! from T to R
3469: restore ! from R to I
3470: restore %g0, 1, %l1 ! from I to X, then %l1 = 1
3471: rd %psr, %l0 ! cwp = %psr;
3472: sll %l1, %l0, %l1
3473: wr %l1, 0, %wim ! make window X invalid
3474: and %l0, 31, %l0
3475: st %l0, [%g6 + PCB_WIM] ! cpcb->pcb_wim = cwp;
3476: nop ! unnecessary? old wim was 0...
3477: save %g0, %g0, %g0 ! back to I
3478: LOADWIN(%g6 + PCB_RW + 64) ! load from rw[1]
3479: save %g0, %g0, %g0 ! back to R
3480: LOADWIN(%g6 + PCB_RW) ! load from rw[0]
3481: save %g0, %g0, %g0 ! back to T
3482: wr %l0, 0, %psr ! restore condition codes
3483: mov %l3, %g6 ! fix %g6
3484: RETT
3485:
3486: /*
3487: * Restoring from user stack, but everything has checked out
3488: * as good. We are now in window X, and %l1 = 1. Window R
3489: * is still valid and holds user values.
3490: */
3491: winuf_ok:
3492: rd %psr, %l0
3493: sll %l1, %l0, %l1
3494: wr %l1, 0, %wim ! make this one invalid
1.111 pk 3495: sethi %hi(cpcb), %l2
3496: ld [%l2 + %lo(cpcb)], %l2
1.1 deraadt 3497: and %l0, 31, %l0
3498: st %l0, [%l2 + PCB_WIM] ! cpcb->pcb_wim = cwp;
3499: save %g0, %g0, %g0 ! back to I
3500: LOADWIN(%sp)
3501: save %g0, %g0, %g0 ! back to R
3502: save %g0, %g0, %g0 ! back to T
3503: wr %l0, 0, %psr ! restore condition codes
3504: nop ! it takes three to tangle
3505: RETT
3506: #endif /* end `real' version of window underflow trap handler */
3507:
3508: /*
3509: * Various return-from-trap routines (see return_from_trap).
3510: */
3511:
3512: /*
3513: * Return from trap, to kernel.
3514: * %l0 = %psr
3515: * %l1 = return pc
3516: * %l2 = return npc
3517: * %l4 = %wim
3518: * %l5 = bit for previous window
3519: */
3520: rft_kernel:
3521: btst %l5, %l4 ! if (wim & l5)
3522: bnz 1f ! goto reload;
3523: wr %l0, 0, %psr ! but first put !@#*% cond codes back
3524:
3525: /* previous window is valid; just rett */
3526: nop ! wait for cond codes to settle in
3527: RETT
3528:
3529: /*
3530: * Previous window is invalid.
3531: * Update %wim and then reload l0..i7 from frame.
3532: *
3533: * T I X
3534: * 0 0 1 0 0 (%wim)
3535: * [see picture in window_uf handler]
3536: *
3537: * T is the current (Trap) window, I is the Invalid window,
3538: * and X is the window we want to make invalid. Window X
3539: * currently has no useful values.
3540: */
3541: 1:
3542: wr %g0, 0, %wim ! allow us to enter window I
3543: nop; nop; nop ! (it takes a while)
3544: restore ! enter window I
3545: restore %g0, 1, %l1 ! enter window X, then %l1 = 1
3546: rd %psr, %l0 ! CWP = %psr & 31;
3547: and %l0, 31, %l0
3548: sll %l1, %l0, %l1 ! wim = 1 << CWP;
3549: wr %l1, 0, %wim ! setwim(wim);
1.111 pk 3550: sethi %hi(cpcb), %l1
3551: ld [%l1 + %lo(cpcb)], %l1
1.1 deraadt 3552: st %l0, [%l1 + PCB_WIM] ! cpcb->pcb_wim = l0 & 31;
3553: save %g0, %g0, %g0 ! back to window I
3554: LOADWIN(%sp)
3555: save %g0, %g0, %g0 ! back to window T
3556: /*
3557: * Note that the condition codes are still set from
3558: * the code at rft_kernel; we can simply return.
3559: */
3560: RETT
3561:
3562: /*
3563: * Return from trap, to user. Checks for scheduling trap (`ast') first;
3564: * will re-enter trap() if set. Note that we may have to switch from
3565: * the interrupt stack to the kernel stack in this case.
3566: * %l0 = %psr
3567: * %l1 = return pc
3568: * %l2 = return npc
3569: * %l4 = %wim
3570: * %l5 = bit for previous window
3571: * %l6 = cpcb
3572: * If returning to a valid window, just set psr and return.
3573: */
3574: rft_user:
1.179 pk 3575: ! sethi %hi(_WANT_AST)), %l7 ! (done below)
3576: ld [%l7 + %lo(_WANT_AST)], %l7
1.1 deraadt 3577: tst %l7 ! want AST trap?
3578: bne,a softtrap ! yes, re-enter trap with type T_AST
3579: mov T_AST, %o0
3580:
3581: btst %l5, %l4 ! if (wim & l5)
3582: bnz 1f ! goto reload;
3583: wr %l0, 0, %psr ! restore cond codes
3584: nop ! (three instruction delay)
3585: RETT
3586:
3587: /*
3588: * Previous window is invalid.
3589: * Before we try to load it, we must verify its stack pointer.
3590: * This is much like the underflow handler, but a bit easier
3591: * since we can use our own local registers.
3592: */
3593: 1:
3594: btst 7, %fp ! if unaligned, address is invalid
3595: bne rft_invalid
3596: EMPTY
3597:
1.111 pk 3598: sethi %hi(_C_LABEL(pgofset)), %l3
3599: ld [%l3 + %lo(_C_LABEL(pgofset))], %l3
1.62 pk 3600: PTE_OF_ADDR(%fp, %l7, rft_invalid, %l3, NOP_ON_4M_9)
3601: CMP_PTE_USER_READ(%l7, %l5, NOP_ON_4M_10) ! try first page
1.1 deraadt 3602: bne rft_invalid ! no good
3603: EMPTY
1.13 deraadt 3604: SLT_IF_1PAGE_RW(%fp, %l7, %l3)
1.1 deraadt 3605: bl,a rft_user_ok ! only 1 page: ok
3606: wr %g0, 0, %wim
3607: add %fp, 7*8, %l5
1.13 deraadt 3608: add %l3, 62, %l3
1.62 pk 3609: PTE_OF_ADDR(%l5, %l7, rft_invalid, %l3, NOP_ON_4M_11)
3610: CMP_PTE_USER_READ(%l7, %l5, NOP_ON_4M_12) ! check 2nd page too
1.1 deraadt 3611: be,a rft_user_ok
3612: wr %g0, 0, %wim
3613:
3614: /*
3615: * The window we wanted to pull could not be pulled. Instead,
3616: * re-enter trap with type T_RWRET. This will pull the window
3617: * into cpcb->pcb_rw[0] and set cpcb->pcb_nsaved to -1, which we
3618: * will detect when we try to return again.
3619: */
3620: rft_invalid:
3621: b softtrap
3622: mov T_RWRET, %o0
3623:
3624: /*
3625: * The window we want to pull can be pulled directly.
3626: */
3627: rft_user_ok:
3628: ! wr %g0, 0, %wim ! allow us to get into it
3629: wr %l0, 0, %psr ! fix up the cond codes now
3630: nop; nop; nop
3631: restore ! enter window I
3632: restore %g0, 1, %l1 ! enter window X, then %l1 = 1
3633: rd %psr, %l0 ! l0 = (junk << 5) + CWP;
3634: sll %l1, %l0, %l1 ! %wim = 1 << CWP;
3635: wr %l1, 0, %wim
1.111 pk 3636: sethi %hi(cpcb), %l1
3637: ld [%l1 + %lo(cpcb)], %l1
1.1 deraadt 3638: and %l0, 31, %l0
3639: st %l0, [%l1 + PCB_WIM] ! cpcb->pcb_wim = l0 & 31;
3640: save %g0, %g0, %g0 ! back to window I
3641: LOADWIN(%sp) ! suck hard
3642: save %g0, %g0, %g0 ! back to window T
3643: RETT
3644:
3645: /*
3646: * Return from trap. Entered after a
3647: * wr %l0, 0, %psr
3648: * which disables traps so that we can rett; registers are:
3649: *
3650: * %l0 = %psr
3651: * %l1 = return pc
3652: * %l2 = return npc
3653: *
3654: * (%l3..%l7 anything).
3655: *
3656: * If we are returning to user code, we must:
3657: * 1. Check for register windows in the pcb that belong on the stack.
3658: * If there are any, reenter trap with type T_WINOF.
3659: * 2. Make sure the register windows will not underflow. This is
3660: * much easier in kernel mode....
3661: */
3662: return_from_trap:
3663: ! wr %l0, 0, %psr ! disable traps so we can rett
3664: ! (someone else did this already)
3665: and %l0, 31, %l5
3666: set wmask, %l6
3667: ldub [%l6 + %l5], %l5 ! %l5 = 1 << ((CWP + 1) % nwindows)
3668: btst PSR_PS, %l0 ! returning to userland?
3669: bnz rft_kernel ! no, go return to kernel
3670: rd %wim, %l4 ! (read %wim in any case)
3671:
3672: rft_user_or_recover_pcb_windows:
3673: /*
3674: * (entered with %l4=%wim, %l5=wmask[cwp]; %l0..%l2 as usual)
3675: *
3676: * check cpcb->pcb_nsaved:
3677: * if 0, do a `normal' return to user (see rft_user);
3678: * if > 0, cpcb->pcb_rw[] holds registers to be copied to stack;
3679: * if -1, cpcb->pcb_rw[0] holds user registers for rett window
3680: * from an earlier T_RWRET pseudo-trap.
3681: */
1.111 pk 3682: sethi %hi(cpcb), %l6
3683: ld [%l6 + %lo(cpcb)], %l6
1.1 deraadt 3684: ld [%l6 + PCB_NSAVED], %l7
3685: tst %l7
3686: bz,a rft_user
1.179 pk 3687: sethi %hi(_WANT_AST), %l7 ! first instr of rft_user
1.1 deraadt 3688:
3689: bg,a softtrap ! if (pcb_nsaved > 0)
3690: mov T_WINOF, %o0 ! trap(T_WINOF);
3691:
3692: /*
3693: * To get here, we must have tried to return from a previous
3694: * trap and discovered that it would cause a window underflow.
3695: * We then must have tried to pull the registers out of the
3696: * user stack (from the address in %fp==%i6) and discovered
3697: * that it was either unaligned or not loaded in memory, and
3698: * therefore we ran a trap(T_RWRET), which loaded one set of
3699: * registers into cpcb->pcb_pcb_rw[0] (if it had killed the
3700: * process due to a bad stack, we would not be here).
3701: *
3702: * We want to load pcb_rw[0] into the previous window, which
3703: * we know is currently invalid. In other words, we want
3704: * %wim to be 1 << ((cwp + 2) % nwindows).
3705: */
3706: wr %g0, 0, %wim ! enable restores
3707: mov %g6, %l3 ! save g6 in l3
3708: mov %l6, %g6 ! set g6 = &u
3709: st %g0, [%g6 + PCB_NSAVED] ! clear cpcb->pcb_nsaved
3710: restore ! enter window I
3711: restore %g0, 1, %l1 ! enter window X, then %l1 = 1
3712: rd %psr, %l0
3713: sll %l1, %l0, %l1 ! %wim = 1 << CWP;
3714: wr %l1, 0, %wim
3715: and %l0, 31, %l0
3716: st %l0, [%g6 + PCB_WIM] ! cpcb->pcb_wim = CWP;
3717: nop ! unnecessary? old wim was 0...
3718: save %g0, %g0, %g0 ! back to window I
3719: LOADWIN(%g6 + PCB_RW)
3720: save %g0, %g0, %g0 ! back to window T (trap window)
3721: wr %l0, 0, %psr ! cond codes, cond codes everywhere
3722: mov %l3, %g6 ! restore g6
3723: RETT
3724:
3725: ! exported end marker for kernel gdb
1.111 pk 3726: .globl _C_LABEL(endtrapcode)
3727: _C_LABEL(endtrapcode):
1.1 deraadt 3728:
3729: /*
3730: * init_tables(nwin) int nwin;
3731: *
3732: * Set up the uwtab and wmask tables.
3733: * We know nwin > 1.
3734: */
3735: init_tables:
3736: /*
3737: * for (i = -nwin, j = nwin - 2; ++i < 0; j--)
3738: * uwtab[i] = j;
3739: * (loop runs at least once)
3740: */
3741: set uwtab, %o3
3742: sub %g0, %o0, %o1 ! i = -nwin + 1
3743: inc %o1
3744: add %o0, -2, %o2 ! j = nwin - 2;
3745: 0:
3746: stb %o2, [%o3 + %o1] ! uwtab[i] = j;
3747: 1:
3748: inccc %o1 ! ++i < 0?
3749: bl 0b ! yes, continue loop
3750: dec %o2 ! in any case, j--
3751:
3752: /*
3753: * (i now equals 0)
3754: * for (j = nwin - 1; i < nwin; i++, j--)
3755: * uwtab[i] = j;
3756: * (loop runs at least twice)
3757: */
3758: sub %o0, 1, %o2 ! j = nwin - 1
3759: 0:
3760: stb %o2, [%o3 + %o1] ! uwtab[i] = j
3761: inc %o1 ! i++
3762: 1:
3763: cmp %o1, %o0 ! i < nwin?
3764: bl 0b ! yes, continue
3765: dec %o2 ! in any case, j--
3766:
3767: /*
3768: * We observe that, for i in 0..nwin-2, (i+1)%nwin == i+1;
3769: * for i==nwin-1, (i+1)%nwin == 0.
3770: * To avoid adding 1, we run i from 1 to nwin and set
3771: * wmask[i-1].
3772: *
3773: * for (i = j = 1; i < nwin; i++) {
3774: * j <<= 1; (j now == 1 << i)
3775: * wmask[i - 1] = j;
3776: * }
3777: * (loop runs at least once)
3778: */
3779: set wmask - 1, %o3
3780: mov 1, %o1 ! i = 1;
3781: mov 2, %o2 ! j = 2;
3782: 0:
3783: stb %o2, [%o3 + %o1] ! (wmask - 1)[i] = j;
3784: inc %o1 ! i++
3785: cmp %o1, %o0 ! i < nwin?
3786: bl,a 0b ! yes, continue
3787: sll %o2, 1, %o2 ! (and j <<= 1)
3788:
3789: /*
3790: * Now i==nwin, so we want wmask[i-1] = 1.
3791: */
3792: mov 1, %o2 ! j = 1;
3793: retl
3794: stb %o2, [%o3 + %o1] ! (wmask - 1)[i] = j;
3795:
1.13 deraadt 3796:
1.1 deraadt 3797: dostart:
1.32 pk 3798: /*
3799: * Startup.
3800: *
1.186 pk 3801: * We may have been loaded in low RAM, at some address which
1.119 christos 3802: * is page aligned (PROM_LOADADDR actually) rather than where we
3803: * want to run (KERNBASE+PROM_LOADADDR). Until we get everything set,
1.32 pk 3804: * we have to be sure to use only pc-relative addressing.
3805: */
3806:
1.27 pk 3807: /*
1.186 pk 3808: * Find out if the above is the case.
3809: */
3810: 0: call 1f
3811: sethi %hi(0b), %l0 ! %l0 = virtual address of 0:
3812: 1: or %l0, %lo(0b), %l0
3813: sub %l0, %o7, %l7 ! subtract actual physical address of 0:
3814:
3815: /*
3816: * If we're already running at our desired virtual load address,
3817: * %l7 will be set to 0, otherwise it will be KERNBASE.
3818: * From now on until the end of locore bootstrap code, %l7 will
3819: * be used to relocate memory references.
3820: */
3821: #define RELOCATE(l,r) \
3822: set l, r; \
3823: sub r, %l7, r
3824:
3825: /*
3826: * We use the bootinfo method to pass arguments, and the new
1.153 pk 3827: * magic number indicates that. A pointer to the kernel top, i.e.
3828: * the first address after the load kernel image (including DDB
3829: * symbols, if any) is passed in %o4[0] and the bootinfo structure
3830: * is passed in %o4[1].
3831: *
3832: * A magic number is passed in %o5 to allow for bootloaders
3833: * that know nothing about the bootinfo structure or previous
3834: * DDB symbol loading conventions.
1.117 christos 3835: *
3836: * For compatibility with older versions, we check for DDB arguments
1.153 pk 3837: * if the older magic number is there. The loader passes `kernel_top'
3838: * (previously known as `esym') in %o4.
3839: *
1.40 pk 3840: * Note: we don't touch %o1-%o3; SunOS bootloaders seem to use them
3841: * for their own mirky business.
1.73 pk 3842: *
1.153 pk 3843: * Pre-NetBSD 1.3 bootblocks had KERNBASE compiled in, and used it
3844: * to compute the value of `kernel_top' (previously known as `esym').
3845: * In order to successfully boot a kernel built with a different value
3846: * for KERNBASE using old bootblocks, we fixup `kernel_top' here by
3847: * the difference between KERNBASE and the old value (known to be
3848: * 0xf8000000) compiled into pre-1.3 bootblocks.
1.27 pk 3849: */
1.117 christos 3850:
3851: set 0x44444232, %l3 ! bootinfo magic
3852: cmp %o5, %l3
3853: bne 1f
1.118 pk 3854: nop
3855:
3856: /* The loader has passed to us a `bootinfo' structure */
1.153 pk 3857: ld [%o4], %l3 ! 1st word is kernel_top
1.186 pk 3858: add %l3, %l7, %o5 ! relocate: + KERNBASE
3859: RELOCATE(_C_LABEL(kernel_top),%l3)
3860: st %o5, [%l3] ! and store it
1.120 pk 3861:
3862: ld [%o4 + 4], %l3 ! 2nd word is bootinfo
1.186 pk 3863: add %l3, %l7, %o5 ! relocate
3864: RELOCATE(_C_LABEL(bootinfo),%l3)
3865: st %o5, [%l3] ! store bootinfo
1.153 pk 3866: b,a 4f
1.117 christos 3867:
1.118 pk 3868: 1:
1.153 pk 3869: #ifdef DDB
1.120 pk 3870: /* Check for old-style DDB loader magic */
1.186 pk 3871: set KERNBASE, %l4
1.153 pk 3872: set 0x44444231, %l3 ! Is it DDB_MAGIC1?
1.117 christos 3873: cmp %o5, %l3
1.118 pk 3874: be,a 2f
3875: clr %l4 ! if DDB_MAGIC1, clear %l4
1.115 christos 3876:
1.153 pk 3877: set 0x44444230, %l3 ! Is it DDB_MAGIC0?
3878: cmp %o5, %l3 ! if so, need to relocate %o4
1.154 thorpej 3879: bne 3f /* if not, there's no bootloader info */
1.73 pk 3880:
1.118 pk 3881: ! note: %l4 set to KERNBASE above.
1.73 pk 3882: set 0xf8000000, %l5 ! compute correction term:
3883: sub %l5, %l4, %l4 ! old KERNBASE (0xf8000000 ) - KERNBASE
3884:
1.117 christos 3885: 2:
1.40 pk 3886: tst %o4 ! do we have the symbols?
1.117 christos 3887: bz 3f
1.73 pk 3888: sub %o4, %l4, %o4 ! apply compat correction
1.153 pk 3889: sethi %hi(_C_LABEL(kernel_top) - KERNBASE), %l3 ! and store it
3890: st %o4, [%l3 + %lo(_C_LABEL(kernel_top) - KERNBASE)]
3891: b,a 4f
1.117 christos 3892: 3:
1.27 pk 3893: #endif
1.13 deraadt 3894: /*
1.153 pk 3895: * The boot loader did not pass in a value for `kernel_top';
3896: * let it default to `end'.
3897: */
3898: set end, %o4
1.186 pk 3899: RELOCATE(_C_LABEL(kernel_top),%l3)
3900: st %o4, [%l3] ! store kernel_top
1.153 pk 3901:
3902: 4:
3903:
3904: /*
1.13 deraadt 3905: * Sun4 passes in the `load address'. Although possible, its highly
3906: * unlikely that OpenBoot would place the prom vector there.
3907: */
1.119 christos 3908: set PROM_LOADADDR, %g7
1.17 pk 3909: cmp %o0, %g7
1.50 pk 3910: be is_sun4
1.14 deraadt 3911: nop
3912:
1.158 thorpej 3913: #if defined(SUN4C) || defined(SUN4M) || defined(SUN4D)
1.144 uwe 3914: /*
3915: * Be prepared to get OF client entry in either %o0 or %o3.
1.158 thorpej 3916: * XXX Will this ever trip on sun4d? Let's hope not!
1.144 uwe 3917: */
3918: cmp %o0, 0
3919: be is_openfirm
3920: nop
3921:
3922: mov %o0, %g7 ! save romp passed by boot code
1.9 deraadt 3923:
1.109 pk 3924: /* First, check `romp->pv_magic' */
3925: ld [%g7 + PV_MAGIC], %o0 ! v = pv->pv_magic
3926: set OBP_MAGIC, %o1
3927: cmp %o0, %o1 ! if ( v != OBP_MAGIC) {
1.144 uwe 3928: bne is_sun4m ! assume this is an OPENFIRM machine
1.109 pk 3929: nop ! }
3930:
1.13 deraadt 3931: /*
1.158 thorpej 3932: * are we on a sun4c or a sun4m or a sun4d?
1.13 deraadt 3933: */
1.28 deraadt 3934: ld [%g7 + PV_NODEOPS], %o4 ! node = pv->pv_nodeops->no_nextnode(0)
3935: ld [%o4 + NO_NEXTNODE], %o4
1.18 deraadt 3936: call %o4
3937: mov 0, %o0 ! node
1.37 pk 3938:
1.186 pk 3939: !mov %o0, %l0
3940: RELOCATE(cputypvar,%o1) ! name = "compatible"
3941: RELOCATE(cputypval,%l2) ! buffer ptr (assume buffer long enough)
1.28 deraadt 3942: ld [%g7 + PV_NODEOPS], %o4 ! (void)pv->pv_nodeops->no_getprop(...)
3943: ld [%o4 + NO_GETPROP], %o4
1.18 deraadt 3944: call %o4
1.186 pk 3945: mov %l2, %o2
3946: !set cputypval-KERNBASE, %o2 ! buffer ptr
3947: ldub [%l2 + 4], %o0 ! which is it... "sun4c", "sun4m", "sun4d"?
1.18 deraadt 3948: cmp %o0, 'c'
1.50 pk 3949: be is_sun4c
1.13 deraadt 3950: nop
1.18 deraadt 3951: cmp %o0, 'm'
1.50 pk 3952: be is_sun4m
1.18 deraadt 3953: nop
1.158 thorpej 3954: cmp %o0, 'd'
3955: be is_sun4d
3956: nop
3957: #endif /* SUN4C || SUN4M || SUN4D */
1.18 deraadt 3958:
1.158 thorpej 3959: /*
3960: * Don't know what type of machine this is; just halt back
3961: * out to the PROM.
3962: */
1.28 deraadt 3963: ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
1.18 deraadt 3964: call %o1
3965: nop
3966:
1.109 pk 3967: is_openfirm:
1.144 uwe 3968: ! OF client entry in %o3 (kernel booted directly by PROM?)
3969: mov %o3, %g7
1.109 pk 3970: /* FALLTHROUGH to sun4m case */
3971:
1.18 deraadt 3972: is_sun4m:
1.13 deraadt 3973: #if defined(SUN4M)
1.52 pk 3974: set trapbase_sun4m, %g6
1.13 deraadt 3975: mov SUN4CM_PGSHIFT, %g5
3976: b start_havetype
3977: mov CPU_SUN4M, %g4
3978: #else
1.186 pk 3979: RELOCATE(sun4m_notsup,%o0)
1.28 deraadt 3980: ld [%g7 + PV_EVAL], %o1
1.9 deraadt 3981: call %o1 ! print a message saying that the
3982: nop ! sun4m architecture is not supported
1.158 thorpej 3983: ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
3984: call %o1
3985: nop
3986: /*NOTREACHED*/
3987: #endif
3988: is_sun4d:
3989: #if defined(SUN4D)
1.159 thorpej 3990: set trapbase_sun4m, %g6 /* XXXJRT trapbase_sun4d */
1.158 thorpej 3991: mov SUN4CM_PGSHIFT, %g5
3992: b start_havetype
3993: mov CPU_SUN4D, %g4
3994: #else
1.186 pk 3995: RELOCATE(sun4d_notsup,%o0)
1.158 thorpej 3996: ld [%g7 + PV_EVAL], %o1
3997: call %o1 ! print a message saying that the
3998: nop ! sun4d architecture is not supported
1.28 deraadt 3999: ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
1.9 deraadt 4000: call %o1
4001: nop
1.13 deraadt 4002: /*NOTREACHED*/
4003: #endif
4004: is_sun4c:
4005: #if defined(SUN4C)
1.52 pk 4006: set trapbase_sun4c, %g6
1.13 deraadt 4007: mov SUN4CM_PGSHIFT, %g5
4008:
4009: set AC_CONTEXT, %g1 ! paranoia: set context to kernel
4010: stba %g0, [%g1] ASI_CONTROL
4011:
4012: b start_havetype
4013: mov CPU_SUN4C, %g4 ! XXX CPU_SUN4
1.9 deraadt 4014: #else
1.186 pk 4015: RELOCATE(sun4c_notsup,%o0)
1.28 deraadt 4016:
4017: ld [%g7 + PV_ROMVEC_VERS], %o1
4018: cmp %o1, 0
4019: bne 1f
4020: nop
4021:
4022: ! stupid version 0 rom interface is pv_eval(int length, char *string)
4023: mov %o0, %o1
4024: 2: ldub [%o0], %o4
1.186 pk 4025: tst %o4
1.28 deraadt 4026: bne 2b
4027: inc %o0
4028: dec %o0
4029: sub %o0, %o1, %o0
4030:
4031: 1: ld [%g7 + PV_EVAL], %o2
4032: call %o2 ! print a message saying that the
1.9 deraadt 4033: nop ! sun4c architecture is not supported
1.28 deraadt 4034: ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
1.9 deraadt 4035: call %o1
4036: nop
1.13 deraadt 4037: /*NOTREACHED*/
1.9 deraadt 4038: #endif
1.13 deraadt 4039: is_sun4:
4040: #if defined(SUN4)
1.52 pk 4041: set trapbase_sun4, %g6
1.13 deraadt 4042: mov SUN4_PGSHIFT, %g5
1.1 deraadt 4043:
1.13 deraadt 4044: set AC_CONTEXT, %g1 ! paranoia: set context to kernel
4045: stba %g0, [%g1] ASI_CONTROL
4046:
4047: b start_havetype
1.14 deraadt 4048: mov CPU_SUN4, %g4
1.13 deraadt 4049: #else
1.14 deraadt 4050: set PROM_BASE, %g7
4051:
1.186 pk 4052: RELOCATE(sun4_notsup,%o0)
1.28 deraadt 4053: ld [%g7 + OLDMON_PRINTF], %o1
1.13 deraadt 4054: call %o1 ! print a message saying that the
4055: nop ! sun4 architecture is not supported
1.28 deraadt 4056: ld [%g7 + OLDMON_HALT], %o1 ! by this kernel, then halt
1.13 deraadt 4057: call %o1
4058: nop
4059: /*NOTREACHED*/
4060: #endif
4061:
4062: start_havetype:
1.186 pk 4063: cmp %l7, 0
4064: be startmap_done
4065:
1.1 deraadt 4066: /*
4067: * Step 1: double map low RAM (addresses [0.._end-start-1])
4068: * to KERNBASE (addresses [KERNBASE.._end-1]). None of these
4069: * are `bad' aliases (since they are all on segment boundaries)
4070: * so we do not have to worry about cache aliasing.
4071: *
4072: * We map in another couple of segments just to have some
4073: * more memory (512K, actually) guaranteed available for
4074: * bootstrap code (pmap_bootstrap needs memory to hold MMU
1.39 pk 4075: * and context data structures). Note: this is only relevant
4076: * for 2-level MMU sun4/sun4c machines.
1.1 deraadt 4077: */
4078: clr %l0 ! lowva
4079: set KERNBASE, %l1 ! highva
1.153 pk 4080:
4081: sethi %hi(_C_LABEL(kernel_top) - KERNBASE), %o0
4082: ld [%o0 + %lo(_C_LABEL(kernel_top) - KERNBASE)], %o1
4083: set (2 << 18), %o2 ! add slack for sun4c MMU
4084: add %o1, %o2, %l2 ! last va that must be remapped
4085:
1.13 deraadt 4086: /*
4087: * Need different initial mapping functions for different
4088: * types of machines.
4089: */
4090: #if defined(SUN4C)
4091: cmp %g4, CPU_SUN4C
1.9 deraadt 4092: bne 1f
1.14 deraadt 4093: set 1 << 18, %l3 ! segment size in bytes
1.1 deraadt 4094: 0:
4095: lduba [%l0] ASI_SEGMAP, %l4 ! segmap[highva] = segmap[lowva];
4096: stba %l4, [%l1] ASI_SEGMAP
4097: add %l3, %l1, %l1 ! highva += segsiz;
4098: cmp %l1, %l2 ! done?
1.34 pk 4099: blu 0b ! no, loop
1.1 deraadt 4100: add %l3, %l0, %l0 ! (and lowva += segsz)
1.135 pk 4101: b,a startmap_done
1.52 pk 4102: 1:
1.13 deraadt 4103: #endif /* SUN4C */
1.135 pk 4104:
1.13 deraadt 4105: #if defined(SUN4)
4106: cmp %g4, CPU_SUN4
4107: bne 2f
1.114 pk 4108: #if defined(SUN4_MMU3L)
1.34 pk 4109: set AC_IDPROM+1, %l3
4110: lduba [%l3] ASI_CONTROL, %l3
4111: cmp %l3, 0x24 ! XXX - SUN4_400
4112: bne no_3mmu
1.133 pk 4113: nop
1.135 pk 4114:
4115: /*
4116: * Three-level sun4 MMU.
4117: * Double-map by duplicating a single region entry (which covers
4118: * 16MB) corresponding to the kernel's virtual load address.
4119: */
1.34 pk 4120: add %l0, 2, %l0 ! get to proper half-word in RG space
4121: add %l1, 2, %l1
4122: lduha [%l0] ASI_REGMAP, %l4 ! regmap[highva] = regmap[lowva];
4123: stha %l4, [%l1] ASI_REGMAP
1.135 pk 4124: b,a startmap_done
1.34 pk 4125: no_3mmu:
4126: #endif
1.135 pk 4127:
4128: /*
4129: * Three-level sun4 MMU.
4130: * Double-map by duplicating the required number of segment
4131: * entries corresponding to the kernel's virtual load address.
4132: */
4133: set 1 << 18, %l3 ! segment size in bytes
1.13 deraadt 4134: 0:
4135: lduha [%l0] ASI_SEGMAP, %l4 ! segmap[highva] = segmap[lowva];
4136: stha %l4, [%l1] ASI_SEGMAP
4137: add %l3, %l1, %l1 ! highva += segsiz;
4138: cmp %l1, %l2 ! done?
1.34 pk 4139: blu 0b ! no, loop
1.13 deraadt 4140: add %l3, %l0, %l0 ! (and lowva += segsz)
1.37 pk 4141: b,a startmap_done
1.52 pk 4142: 2:
1.13 deraadt 4143: #endif /* SUN4 */
1.135 pk 4144:
1.159 thorpej 4145: #if defined(SUN4M) || defined(SUN4D)
4146: cmp %g4, CPU_SUN4M
4147: beq 3f
4148: nop
4149: cmp %g4, CPU_SUN4D
1.164 pk 4150: bne 4f
1.13 deraadt 4151:
1.159 thorpej 4152: 3:
1.37 pk 4153: /*
1.38 pk 4154: * The OBP guarantees us a 16MB mapping using a level 1 PTE at
1.135 pk 4155: * the start of the memory bank in which we were loaded. All we
4156: * have to do is copy the entry.
4157: * Also, we must check to see if we have a TI Viking in non-mbus mode,
4158: * and if so do appropriate flipping and turning off traps before
1.38 pk 4159: * we dork with MMU passthrough. -grrr
1.37 pk 4160: */
4161:
1.38 pk 4162: sethi %hi(0x40000000), %o1 ! TI version bit
4163: rd %psr, %o0
4164: andcc %o0, %o1, %g0
4165: be remap_notvik ! is non-TI normal MBUS module
4166: lda [%g0] ASI_SRMMU, %o0 ! load MMU
4167: andcc %o0, 0x800, %g0
4168: bne remap_notvik ! It is a viking MBUS module
4169: nop
4170:
4171: /*
4172: * Ok, we have a non-Mbus TI Viking, a MicroSparc.
4173: * In this scenerio, in order to play with the MMU
4174: * passthrough safely, we need turn off traps, flip
4175: * the AC bit on in the mmu status register, do our
4176: * passthroughs, then restore the mmu reg and %psr
4177: */
4178: rd %psr, %o4 ! saved here till done
4179: andn %o4, 0x20, %o5
4180: wr %o5, 0x0, %psr
4181: nop; nop; nop;
4182: set SRMMU_CXTPTR, %o0
4183: lda [%o0] ASI_SRMMU, %o0 ! get context table ptr
4184: sll %o0, 4, %o0 ! make physical
4185: lda [%g0] ASI_SRMMU, %o3 ! hold mmu-sreg here
4186: /* 0x8000 is AC bit in Viking mmu-ctl reg */
4187: set 0x8000, %o2
4188: or %o3, %o2, %o2
4189: sta %o2, [%g0] ASI_SRMMU ! AC bit on
1.135 pk 4190:
1.38 pk 4191: lda [%o0] ASI_BYPASS, %o1
4192: srl %o1, 4, %o1
4193: sll %o1, 8, %o1 ! get phys addr of l1 entry
4194: lda [%o1] ASI_BYPASS, %l4
4195: srl %l1, 22, %o2 ! note: 22 == RGSHIFT - 2
4196: add %o1, %o2, %o1
4197: sta %l4, [%o1] ASI_BYPASS
1.135 pk 4198:
1.38 pk 4199: sta %o3, [%g0] ASI_SRMMU ! restore mmu-sreg
4200: wr %o4, 0x0, %psr ! restore psr
1.164 pk 4201: b,a startmap_done
1.38 pk 4202:
4203: /*
4204: * The following is generic and should work on all
4205: * Mbus based SRMMU's.
4206: */
4207: remap_notvik:
4208: set SRMMU_CXTPTR, %o0
4209: lda [%o0] ASI_SRMMU, %o0 ! get context table ptr
4210: sll %o0, 4, %o0 ! make physical
4211: lda [%o0] ASI_BYPASS, %o1
4212: srl %o1, 4, %o1
4213: sll %o1, 8, %o1 ! get phys addr of l1 entry
4214: lda [%o1] ASI_BYPASS, %l4
4215: srl %l1, 22, %o2 ! note: 22 == RGSHIFT - 2
4216: add %o1, %o2, %o1
4217: sta %l4, [%o1] ASI_BYPASS
1.52 pk 4218: !b,a startmap_done
1.163 pk 4219: 4:
1.159 thorpej 4220: #endif /* SUN4M || SUN4D */
1.13 deraadt 4221: ! botch! We should blow up.
4222:
4223: startmap_done:
1.1 deraadt 4224: /*
4225: * All set, fix pc and npc. Once we are where we should be,
4226: * we can give ourselves a stack and enable traps.
4227: */
1.9 deraadt 4228: set 1f, %g1
4229: jmp %g1
1.1 deraadt 4230: nop
4231: 1:
1.197 wiz 4232: sethi %hi(_C_LABEL(cputyp)), %o0 ! what type of CPU we are on
1.111 pk 4233: st %g4, [%o0 + %lo(_C_LABEL(cputyp))]
1.9 deraadt 4234:
1.111 pk 4235: sethi %hi(_C_LABEL(pgshift)), %o0 ! pgshift = log2(nbpg)
4236: st %g5, [%o0 + %lo(_C_LABEL(pgshift))]
1.13 deraadt 4237:
4238: mov 1, %o0 ! nbpg = 1 << pgshift
4239: sll %o0, %g5, %g5
1.111 pk 4240: sethi %hi(_C_LABEL(nbpg)), %o0 ! nbpg = bytes in a page
4241: st %g5, [%o0 + %lo(_C_LABEL(nbpg))]
1.13 deraadt 4242:
4243: sub %g5, 1, %g5
1.111 pk 4244: sethi %hi(_C_LABEL(pgofset)), %o0 ! page offset = bytes in a page - 1
4245: st %g5, [%o0 + %lo(_C_LABEL(pgofset))]
1.13 deraadt 4246:
1.9 deraadt 4247: rd %psr, %g3 ! paranoia: make sure ...
4248: andn %g3, PSR_ET, %g3 ! we have traps off
4249: wr %g3, 0, %psr ! so that we can fiddle safely
4250: nop; nop; nop
4251:
4252: wr %g0, 0, %wim ! make sure we can set psr
4253: nop; nop; nop
4254: wr %g0, PSR_S|PSR_PS|PSR_PIL, %psr ! set initial psr
4255: nop; nop; nop
4256:
4257: wr %g0, 2, %wim ! set initial %wim (w1 invalid)
4258: mov 1, %g1 ! set pcb_wim (log2(%wim) = 1)
1.111 pk 4259: sethi %hi(_C_LABEL(u0) + PCB_WIM), %g2
4260: st %g1, [%g2 + %lo(_C_LABEL(u0) + PCB_WIM)]
1.9 deraadt 4261:
1.1 deraadt 4262: set USRSTACK - CCFSZ, %fp ! as if called from user code
4263: set estack0 - CCFSZ - 80, %sp ! via syscall(boot_me_up) or somesuch
4264: rd %psr, %l0
4265: wr %l0, PSR_ET, %psr
1.9 deraadt 4266: nop; nop; nop
1.1 deraadt 4267:
1.52 pk 4268: /* Export actual trapbase */
1.111 pk 4269: sethi %hi(_C_LABEL(trapbase)), %o0
4270: st %g6, [%o0+%lo(_C_LABEL(trapbase))]
1.52 pk 4271:
1.117 christos 4272: #ifdef notdef
1.1 deraadt 4273: /*
4274: * Step 2: clear BSS. This may just be paranoia; the boot
4275: * loader might already do it for us; but what the hell.
4276: */
4277: set _edata, %o0 ! bzero(edata, end - edata)
4278: set _end, %o1
1.111 pk 4279: call _C_LABEL(bzero)
1.1 deraadt 4280: sub %o1, %o0, %o1
1.117 christos 4281: #endif
1.1 deraadt 4282:
4283: /*
4284: * Stash prom vectors now, after bzero, as it lives in bss
4285: * (which we just zeroed).
4286: * This depends on the fact that bzero does not use %g7.
4287: */
1.111 pk 4288: sethi %hi(_C_LABEL(romp)), %l0
4289: st %g7, [%l0 + %lo(_C_LABEL(romp))]
1.1 deraadt 4290:
4291: /*
4292: * Step 3: compute number of windows and set up tables.
4293: * We could do some of this later.
4294: */
4295: save %sp, -64, %sp
4296: rd %psr, %g1
4297: restore
4298: and %g1, 31, %g1 ! want just the CWP bits
4299: add %g1, 1, %o0 ! compute nwindows
1.111 pk 4300: sethi %hi(_C_LABEL(nwindows)), %o1 ! may as well tell everyone
1.1 deraadt 4301: call init_tables
1.111 pk 4302: st %o0, [%o1 + %lo(_C_LABEL(nwindows))]
1.1 deraadt 4303:
1.148 pk 4304: #if defined(SUN4) || defined(SUN4C)
1.29 deraadt 4305: /*
1.148 pk 4306: * Some sun4/sun4c models have fewer than 8 windows. For extra
1.29 deraadt 4307: * speed, we do not need to save/restore those windows
1.196 pk 4308: * The save/restore code has 6 "save"'s followed by 6
1.29 deraadt 4309: * "restore"'s -- we "nop" out the last "save" and first
4310: * "restore"
4311: */
4312: cmp %o0, 8
1.50 pk 4313: be 1f
1.29 deraadt 4314: noplab: nop
1.148 pk 4315: sethi %hi(noplab), %l0
4316: ld [%l0 + %lo(noplab)], %l1
1.29 deraadt 4317: set wb1, %l0
1.173 pk 4318: st %l1, [%l0 + 5*4]
4319: st %l1, [%l0 + 6*4]
1.29 deraadt 4320: 1:
4321: #endif
4322:
1.159 thorpej 4323: #if (defined(SUN4) || defined(SUN4C)) && (defined(SUN4M) || defined(SUN4D))
1.62 pk 4324:
4325: /*
4326: * Patch instructions at specified labels that start
4327: * per-architecture code-paths.
4328: */
4329: Lgandul: nop
4330:
4331: #define MUNGE(label) \
4332: sethi %hi(label), %o0; \
4333: st %l0, [%o0 + %lo(label)]
4334:
4335: sethi %hi(Lgandul), %o0
4336: ld [%o0 + %lo(Lgandul)], %l0 ! %l0 = NOP
4337:
4338: cmp %g4, CPU_SUN4M
1.159 thorpej 4339: beq,a 2f
4340: nop
4341:
4342: cmp %g4, CPU_SUN4D
1.62 pk 4343: bne,a 1f
4344: nop
4345:
1.159 thorpej 4346: 2: ! this should be automated!
1.62 pk 4347: MUNGE(NOP_ON_4M_1)
4348: MUNGE(NOP_ON_4M_2)
4349: MUNGE(NOP_ON_4M_3)
4350: MUNGE(NOP_ON_4M_4)
4351: MUNGE(NOP_ON_4M_5)
4352: MUNGE(NOP_ON_4M_6)
4353: MUNGE(NOP_ON_4M_7)
4354: MUNGE(NOP_ON_4M_8)
4355: MUNGE(NOP_ON_4M_9)
4356: MUNGE(NOP_ON_4M_10)
4357: MUNGE(NOP_ON_4M_11)
4358: MUNGE(NOP_ON_4M_12)
1.152 pk 4359: MUNGE(NOP_ON_4M_15)
1.62 pk 4360: b,a 2f
4361:
4362: 1:
1.68 mycroft 4363: MUNGE(NOP_ON_4_4C_1)
1.62 pk 4364:
4365: 2:
4366:
4367: #undef MUNGE
4368: #endif
4369:
1.1 deraadt 4370: /*
4371: * Step 4: change the trap base register, now that our trap handlers
4372: * will function (they need the tables we just set up).
1.195 pk 4373: * This depends on the fact that memset does not use %g6.
1.1 deraadt 4374: */
1.52 pk 4375: wr %g6, 0, %tbr
1.9 deraadt 4376: nop; nop; nop ! paranoia
1.37 pk 4377:
1.195 pk 4378: /* Clear `cpuinfo': memset(&cpuinfo, 0, sizeof cpuinfo) */
4379: sethi %hi(CPUINFO_VA), %o0
4380: set CPUINFO_STRUCTSIZE, %o2
1.192 jdolecek 4381: call _C_LABEL(memset)
1.194 martin 4382: clr %o1
1.98 pk 4383:
1.131 thorpej 4384: /*
4385: * Initialize `cpuinfo' fields which are needed early. Note
4386: * we make the cpuinfo self-reference at the local VA for now.
4387: * It may be changed to reference a global VA later.
4388: */
1.111 pk 4389: set _C_LABEL(u0), %o0 ! cpuinfo.curpcb = u0;
4390: sethi %hi(cpcb), %l0
4391: st %o0, [%l0 + %lo(cpcb)]
1.98 pk 4392:
1.132 pk 4393: sethi %hi(CPUINFO_VA), %o0 ! cpuinfo.ci_self = &cpuinfo;
1.131 thorpej 4394: sethi %hi(_CISELFP), %l0
4395: st %o0, [%l0 + %lo(_CISELFP)]
4396:
1.111 pk 4397: set _C_LABEL(eintstack), %o0 ! cpuinfo.eintstack= _eintstack;
1.101 pk 4398: sethi %hi(_EINTSTACKP), %l0
4399: st %o0, [%l0 + %lo(_EINTSTACKP)]
1.1 deraadt 4400:
4401: /*
1.11 deraadt 4402: * Ready to run C code; finish bootstrap.
1.1 deraadt 4403: */
1.111 pk 4404: call _C_LABEL(bootstrap)
1.1 deraadt 4405: nop
1.11 deraadt 4406:
4407: /*
4408: * Call main. This returns to us after loading /sbin/init into
4409: * user space. (If the exec fails, main() does not return.)
4410: */
1.111 pk 4411: call _C_LABEL(main)
1.11 deraadt 4412: clr %o0 ! our frame arg is ignored
1.89 pk 4413: /*NOTREACHED*/
1.164 pk 4414:
1.198 pk 4415: /*
4416: * Openfirmware entry point: openfirmware(void *args)
4417: */
4418: ENTRY(openfirmware)
4419: sethi %hi(_C_LABEL(romp)), %o1
4420: ld [%o1 + %lo(_C_LABEL(romp))], %o2
4421: jmp %o2
4422: nop
1.165 pk 4423:
4424: #if defined(SUN4M) || defined(SUN4D)
4425: /*
4426: * V8 multiply and divide routines, to be copied over the code
4427: * for the V6/V7 routines. Seems a shame to spend the call, but....
4428: * Note: while .umul and .smul return a 64-bit result in %o1%o0,
4429: * gcc only really cares about the low 32 bits in %o0. This is
4430: * really just gcc output, cleaned up a bit.
4431: */
1.164 pk 4432: .globl _C_LABEL(sparc_v8_muldiv)
4433: _C_LABEL(sparc_v8_muldiv):
4434: save %sp, -CCFSZ, %sp
4435:
4436: #define OVERWRITE(rtn, v8_rtn, len) \
4437: set v8_rtn, %o0; \
4438: set rtn, %o1; \
4439: call _C_LABEL(bcopy); \
4440: mov len, %o2; \
4441: /* now flush the insn cache */ \
4442: set rtn, %o0; \
4443: mov len, %o1; \
4444: 0: \
4445: flush %o0; \
4446: subcc %o1, 8, %o1; \
4447: bgu 0b; \
4448: add %o0, 8, %o0; \
4449:
1.188 uwe 4450: OVERWRITE(.mul, v8_smul, .Lv8_smul_len)
4451: OVERWRITE(.umul, v8_umul, .Lv8_umul_len)
4452: OVERWRITE(.div, v8_sdiv, .Lv8_sdiv_len)
4453: OVERWRITE(.udiv, v8_udiv, .Lv8_udiv_len)
4454: OVERWRITE(.rem, v8_srem, .Lv8_srem_len)
4455: OVERWRITE(.urem, v8_urem, .Lv8_urem_len)
1.164 pk 4456: #undef OVERWRITE
4457: ret
4458: restore
4459:
4460: v8_smul:
4461: retl
4462: smul %o0, %o1, %o0
1.188 uwe 4463: .Lv8_smul_len = .-v8_smul
1.164 pk 4464: v8_umul:
4465: retl
4466: umul %o0, %o1, %o0
4467: !v8_umul_len = 2 * 4
1.188 uwe 4468: .Lv8_umul_len = .-v8_umul
1.164 pk 4469: v8_sdiv:
4470: sra %o0, 31, %g2
4471: wr %g2, 0, %y
4472: nop; nop; nop
4473: retl
4474: sdiv %o0, %o1, %o0
1.188 uwe 4475: .Lv8_sdiv_len = .-v8_sdiv
1.164 pk 4476: v8_udiv:
4477: wr %g0, 0, %y
4478: nop; nop; nop
4479: retl
4480: udiv %o0, %o1, %o0
1.188 uwe 4481: .Lv8_udiv_len = .-v8_udiv
1.164 pk 4482: v8_srem:
4483: sra %o0, 31, %g3
4484: wr %g3, 0, %y
4485: nop; nop; nop
4486: sdiv %o0, %o1, %g2
4487: smul %g2, %o1, %g2
4488: retl
4489: sub %o0, %g2, %o0
1.188 uwe 4490: .Lv8_srem_len = .-v8_srem
1.164 pk 4491: v8_urem:
4492: wr %g0, 0, %y
4493: nop; nop; nop
4494: udiv %o0, %o1, %g2
4495: smul %g2, %o1, %g2
4496: retl
4497: sub %o0, %g2, %o0
1.188 uwe 4498: .Lv8_urem_len = .-v8_urem
1.164 pk 4499:
1.165 pk 4500: #endif /* SUN4M || SUN4D */
1.89 pk 4501:
1.145 mrg 4502: #if defined(MULTIPROCESSOR)
1.89 pk 4503: /*
4504: * Entry point for non-boot CPUs in MP systems.
4505: */
1.111 pk 4506: .globl _C_LABEL(cpu_hatch)
4507: _C_LABEL(cpu_hatch):
1.89 pk 4508: rd %psr, %g3 ! paranoia: make sure ...
4509: andn %g3, PSR_ET, %g3 ! we have traps off
4510: wr %g3, 0, %psr ! so that we can fiddle safely
4511: nop; nop; nop
4512:
4513: wr %g0, 0, %wim ! make sure we can set psr
4514: nop; nop; nop
4515: wr %g0, PSR_S|PSR_PS|PSR_PIL, %psr ! set initial psr
4516: nop; nop; nop
4517:
4518: wr %g0, 2, %wim ! set initial %wim (w1 invalid)
4519:
4520: /* Initialize Trap Base register */
1.111 pk 4521: sethi %hi(_C_LABEL(trapbase)), %o0
4522: ld [%o0+%lo(_C_LABEL(trapbase))], %g6
1.89 pk 4523: wr %g6, 0, %tbr
4524: nop; nop; nop ! paranoia
4525:
4526: /* Set up a stack */
4527: set USRSTACK - CCFSZ, %fp ! as if called from user code
1.182 mrg 4528: sethi %hi(IDLE_UP), %o0
4529: ld [%o0 + %lo(IDLE_UP)], %o0
1.102 pk 4530: set USPACE - CCFSZ - 80, %sp
4531: add %sp, %o0, %sp
1.89 pk 4532:
4533: /* Enable traps */
4534: rd %psr, %l0
4535: wr %l0, PSR_ET, %psr
1.182 mrg 4536: nop; nop
1.89 pk 4537:
4538: /* Call C code */
1.111 pk 4539: call _C_LABEL(cpu_setup)
1.182 mrg 4540: nop ! 3rd from above
1.89 pk 4541:
1.170 pk 4542: /* Enable interrupts */
4543: rd %psr, %l0
4544: andn %l0, PSR_PIL, %l0 ! psr &= ~PSR_PIL;
4545: wr %l0, 0, %psr ! (void) spl0();
4546: nop; nop; nop
4547:
1.145 mrg 4548: /* Wait for go_smp_cpus to go */
4549: set _C_LABEL(go_smp_cpus), %l1
1.142 mrg 4550: ld [%l1], %l0
4551: 1:
1.145 mrg 4552: cmp %l0, %g0
1.142 mrg 4553: be 1b
4554: ld [%l1], %l0
4555:
1.173 pk 4556: mov PSR_S|PSR_ET, %l1 ! oldpsr = PSR_S | PSR_ET;
4557: sethi %hi(_C_LABEL(sched_whichqs)), %l2
4558: clr %l4
4559: sethi %hi(cpcb), %l6
4560: b idle_enter
1.185 thorpej 4561: sethi %hi(curlwp), %l7
1.145 mrg 4562:
4563: #endif /* MULTIPROCESSOR */
1.1 deraadt 4564:
1.141 mrg 4565: #include "sigcode_state.s"
1.122 christos 4566:
1.111 pk 4567: .globl _C_LABEL(sigcode)
4568: .globl _C_LABEL(esigcode)
4569: _C_LABEL(sigcode):
1.1 deraadt 4570:
1.122 christos 4571: SAVE_STATE
4572:
1.1 deraadt 4573: ldd [%fp + 64], %o0 ! sig, code
4574: ld [%fp + 76], %o3 ! arg3
4575: call %g1 ! (*sa->sa_handler)(sig,code,scp,arg3)
4576: add %fp, 64 + 16, %o2 ! scp
4577:
1.122 christos 4578: RESTORE_STATE
1.1 deraadt 4579:
1.92 pk 4580: ! get registers back & set syscall #
1.189 pk 4581: restore %g0, SYS_compat_16___sigreturn14, %g1
1.1 deraadt 4582: add %sp, 64 + 16, %o0 ! compute scp
4583: t ST_SYSCALL ! sigreturn(scp)
4584: ! sigreturn does not return unless it fails
4585: mov SYS_exit, %g1 ! exit(errno)
4586: t ST_SYSCALL
1.185 thorpej 4587: /* NOTREACHED */
1.111 pk 4588: _C_LABEL(esigcode):
1.1 deraadt 4589:
4590: /*
4591: * Primitives
1.52 pk 4592: */
1.1 deraadt 4593:
1.63 pk 4594: /*
4595: * General-purpose NULL routine.
4596: */
4597: ENTRY(sparc_noop)
4598: retl
4599: nop
1.1 deraadt 4600:
4601: /*
1.24 deraadt 4602: * getfp() - get stack frame pointer
4603: */
4604: ENTRY(getfp)
4605: retl
4606: mov %fp, %o0
4607:
4608: /*
1.1 deraadt 4609: * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
4610: *
4611: * Copy a null terminated string from the user address space into
4612: * the kernel address space.
4613: */
4614: ENTRY(copyinstr)
4615: ! %o0 = fromaddr, %o1 = toaddr, %o2 = maxlen, %o3 = &lencopied
1.126 chs 4616: mov %o1, %o5 ! save = toaddr;
4617: tst %o2 ! maxlen == 0?
4618: beq,a Lcstoolong ! yes, return ENAMETOOLONG
4619: sethi %hi(cpcb), %o4
4620:
1.1 deraadt 4621: set KERNBASE, %o4
4622: cmp %o0, %o4 ! fromaddr < KERNBASE?
1.126 chs 4623: blu Lcsdocopy ! yes, go do it
4624: sethi %hi(cpcb), %o4 ! (first instr of copy)
1.1 deraadt 4625:
4626: b Lcsdone ! no, return EFAULT
4627: mov EFAULT, %o0
4628:
4629: /*
4630: * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
4631: *
4632: * Copy a null terminated string from the kernel
4633: * address space to the user address space.
4634: */
4635: ENTRY(copyoutstr)
4636: ! %o0 = fromaddr, %o1 = toaddr, %o2 = maxlen, %o3 = &lencopied
1.126 chs 4637: mov %o1, %o5 ! save = toaddr;
4638: tst %o2 ! maxlen == 0?
4639: beq,a Lcstoolong ! yes, return ENAMETOOLONG
4640: sethi %hi(cpcb), %o4
4641:
1.1 deraadt 4642: set KERNBASE, %o4
4643: cmp %o1, %o4 ! toaddr < KERNBASE?
1.126 chs 4644: blu Lcsdocopy ! yes, go do it
1.111 pk 4645: sethi %hi(cpcb), %o4 ! (first instr of copy)
1.1 deraadt 4646:
4647: b Lcsdone ! no, return EFAULT
4648: mov EFAULT, %o0
4649:
4650: Lcsdocopy:
1.111 pk 4651: ! sethi %hi(cpcb), %o4 ! (done earlier)
4652: ld [%o4 + %lo(cpcb)], %o4 ! catch faults
1.138 chs 4653: set Lcsdone, %g1
1.126 chs 4654: st %g1, [%o4 + PCB_ONFAULT]
1.1 deraadt 4655:
4656: ! XXX should do this in bigger chunks when possible
4657: 0: ! loop:
4658: ldsb [%o0], %g1 ! c = *fromaddr;
4659: tst %g1
4660: stb %g1, [%o1] ! *toaddr++ = c;
4661: be 1f ! if (c == NULL)
4662: inc %o1 ! goto ok;
4663: deccc %o2 ! if (--len > 0) {
1.126 chs 4664: bgu 0b ! fromaddr++;
1.1 deraadt 4665: inc %o0 ! goto loop;
4666: ! }
1.126 chs 4667: Lcstoolong: !
1.1 deraadt 4668: b Lcsdone ! error = ENAMETOOLONG;
4669: mov ENAMETOOLONG, %o0 ! goto done;
4670: 1: ! ok:
4671: clr %o0 ! error = 0;
4672: Lcsdone: ! done:
4673: sub %o1, %o5, %o1 ! len = to - save;
4674: tst %o3 ! if (lencopied)
4675: bnz,a 3f
4676: st %o1, [%o3] ! *lencopied = len;
4677: 3:
4678: retl ! cpcb->pcb_onfault = 0;
4679: st %g0, [%o4 + PCB_ONFAULT]! return (error);
4680:
4681: /*
4682: * copystr(fromaddr, toaddr, maxlength, &lencopied)
4683: *
4684: * Copy a null terminated string from one point to another in
4685: * the kernel address space. (This is a leaf procedure, but
4686: * it does not seem that way to the C compiler.)
4687: */
4688: ENTRY(copystr)
4689: mov %o1, %o5 ! to0 = to;
1.126 chs 4690: tst %o2 ! if (maxlength == 0)
4691: beq,a 2f !
4692: mov ENAMETOOLONG, %o0 ! ret = ENAMETOOLONG; goto done;
4693:
1.1 deraadt 4694: 0: ! loop:
4695: ldsb [%o0], %o4 ! c = *from;
4696: tst %o4
4697: stb %o4, [%o1] ! *to++ = c;
4698: be 1f ! if (c == 0)
4699: inc %o1 ! goto ok;
4700: deccc %o2 ! if (--len > 0) {
1.126 chs 4701: bgu,a 0b ! from++;
1.1 deraadt 4702: inc %o0 ! goto loop;
4703: b 2f ! }
4704: mov ENAMETOOLONG, %o0 ! ret = ENAMETOOLONG; goto done;
4705: 1: ! ok:
4706: clr %o0 ! ret = 0;
4707: 2:
4708: sub %o1, %o5, %o1 ! len = to - to0;
4709: tst %o3 ! if (lencopied)
4710: bnz,a 3f
4711: st %o1, [%o3] ! *lencopied = len;
4712: 3:
4713: retl
4714: nop
4715:
1.52 pk 4716: /*
1.1 deraadt 4717: * Copyin(src, dst, len)
4718: *
4719: * Copy specified amount of data from user space into the kernel.
4720: */
4721: ENTRY(copyin)
4722: set KERNBASE, %o3
4723: cmp %o0, %o3 ! src < KERNBASE?
4724: blu,a Ldocopy ! yes, can try it
1.111 pk 4725: sethi %hi(cpcb), %o3
1.1 deraadt 4726:
4727: /* source address points into kernel space: return EFAULT */
4728: retl
4729: mov EFAULT, %o0
4730:
4731: /*
4732: * Copyout(src, dst, len)
4733: *
4734: * Copy specified amount of data from kernel to user space.
4735: * Just like copyin, except that the `dst' addresses are user space
4736: * rather than the `src' addresses.
4737: */
4738: ENTRY(copyout)
4739: set KERNBASE, %o3
4740: cmp %o1, %o3 ! dst < KERBASE?
4741: blu,a Ldocopy
1.111 pk 4742: sethi %hi(cpcb), %o3
1.1 deraadt 4743:
4744: /* destination address points into kernel space: return EFAULT */
4745: retl
4746: mov EFAULT, %o0
4747:
4748: /*
4749: * ******NOTE****** this depends on bcopy() not using %g7
4750: */
4751: Ldocopy:
1.111 pk 4752: ! sethi %hi(cpcb), %o3
4753: ld [%o3 + %lo(cpcb)], %o3
1.1 deraadt 4754: set Lcopyfault, %o4
4755: mov %o7, %g7 ! save return address
1.111 pk 4756: call _C_LABEL(bcopy) ! bcopy(src, dst, len)
1.1 deraadt 4757: st %o4, [%o3 + PCB_ONFAULT]
4758:
1.111 pk 4759: sethi %hi(cpcb), %o3
4760: ld [%o3 + %lo(cpcb)], %o3
1.1 deraadt 4761: st %g0, [%o3 + PCB_ONFAULT]
4762: jmp %g7 + 8
4763: clr %o0 ! return 0
4764:
4765: ! Copyin or copyout fault. Clear cpcb->pcb_onfault and return EFAULT.
4766: ! Note that although we were in bcopy, there is no state to clean up;
4767: ! the only special thing is that we have to return to [g7 + 8] rather than
4768: ! [o7 + 8].
4769: Lcopyfault:
1.111 pk 4770: sethi %hi(cpcb), %o3
4771: ld [%o3 + %lo(cpcb)], %o3
1.1 deraadt 4772: jmp %g7 + 8
1.138 chs 4773: st %g0, [%o3 + PCB_ONFAULT]
1.1 deraadt 4774:
4775:
4776: /*
4777: * Write all user windows presently in the CPU back to the user's stack.
4778: * We just do `save' instructions until pcb_uw == 0.
4779: *
4780: * p = cpcb;
4781: * nsaves = 0;
4782: * while (p->pcb_uw > 0)
4783: * save(), nsaves++;
4784: * while (--nsaves >= 0)
4785: * restore();
4786: */
4787: ENTRY(write_user_windows)
1.111 pk 4788: sethi %hi(cpcb), %g6
4789: ld [%g6 + %lo(cpcb)], %g6
1.1 deraadt 4790: b 2f
4791: clr %g5
4792: 1:
4793: save %sp, -64, %sp
4794: 2:
4795: ld [%g6 + PCB_UW], %g7
4796: tst %g7
4797: bg,a 1b
4798: inc %g5
4799: 3:
4800: deccc %g5
4801: bge,a 3b
4802: restore
4803: retl
4804: nop
4805:
4806:
4807: /*
4808: * Switch statistics (for later tweaking):
4809: * nswitchdiff = p1 => p2 (i.e., chose different process)
1.111 pk 4810: * cnt.v_swtch = total calls to swtch+swtchexit
1.1 deraadt 4811: */
1.111 pk 4812: .comm _C_LABEL(nswitchdiff), 4
1.1 deraadt 4813:
1.173 pk 4814: /*
1.209 pk 4815: * cpu_exit is called as the last action during exit.
1.173 pk 4816: *
4817: * We lay the process to rest by changing to the `idle' kernel stack,
4818: * and note that the `last loaded process' is nonexistent.
1.209 pk 4819: *
4820: * lwp_exit2(0 will free the thread's stack.
1.173 pk 4821: */
1.209 pk 4822: ENTRY(cpu_exit)
4823: mov %o0, %g2 ! save lwp for lwp_exit2() call
1.173 pk 4824:
4825: /*
4826: * Change pcb to idle u. area, i.e., set %sp to top of stack
4827: * and %psr to PSR_S|PSR_ET, and set cpcb to point to idle_u.
4828: * Once we have left the old stack, we can call exit2() to
4829: * destroy it. Call it any sooner and the register windows
4830: * go bye-bye.
4831: */
4832: #if defined(MULTIPROCESSOR)
4833: sethi %hi(IDLE_UP), %g5
4834: ld [%g5 + %lo(IDLE_UP)], %g5
4835: #else
4836: set _C_LABEL(idle_u), %g5
4837: #endif
4838: sethi %hi(cpcb), %g6
4839: mov 1, %g7
4840: wr %g0, PSR_S, %psr ! change to window 0, traps off
4841: wr %g0, 2, %wim ! and make window 1 the trap window
4842: st %g5, [%g6 + %lo(cpcb)] ! cpcb = &idle_u
4843: st %g7, [%g5 + PCB_WIM] ! idle_u.pcb_wim = log2(2) = 1
4844: #if defined(MULTIPROCESSOR)
4845: set USPACE-CCFSZ, %o1 !
4846: add %g5, %o1, %sp ! set new %sp
4847: #else
4848: set _C_LABEL(idle_u) + USPACE-CCFSZ, %sp ! set new %sp
4849: #endif
4850:
4851: #ifdef DEBUG
4852: mov %g5, %l6 ! %l6 = _idle_u
4853: SET_SP_REDZONE(%l6, %l5)
4854: #endif
4855: wr %g0, PSR_S|PSR_ET, %psr ! and then enable traps
1.176 pk 4856: nop
1.209 pk 4857: call lwp_exit2 ! lwp_exit2(l)
1.173 pk 4858: mov %g2, %o0
4859:
4860: /*
4861: * Now fall through to `the last switch'. %l6 was set to
4862: * %hi(cpcb), but may have been clobbered in exit2(),
4863: * so all the registers described below will be set here.
4864: *
4865: * REGISTER USAGE AT THIS POINT:
4866: * %l1 = oldpsr (excluding ipl bits)
4867: * %l2 = %hi(whichqs)
4868: * %l4 = lastproc
4869: * %l6 = %hi(cpcb)
1.185 thorpej 4870: * %l7 = %hi(curlwp)
1.173 pk 4871: * %o0 = tmp 1
4872: * %o1 = tmp 2
4873: */
4874:
4875: mov PSR_S|PSR_ET, %l1 ! oldpsr = PSR_S | PSR_ET;
4876: sethi %hi(_C_LABEL(sched_whichqs)), %l2
1.180 mrg 4877: #if !defined(MULTIPROCESSOR)
1.173 pk 4878: clr %l4 ! lastproc = NULL;
1.180 mrg 4879: #endif
1.173 pk 4880: sethi %hi(cpcb), %l6
1.185 thorpej 4881: sethi %hi(curlwp), %l7
1.173 pk 4882: b idle_enter
1.185 thorpej 4883: st %g0, [%l7 + %lo(curlwp)] ! curlwp = NULL;
1.173 pk 4884:
4885: /*
4886: * When no processes are on the runq, switch
4887: * idles here waiting for something to come ready.
4888: * The registers are set up as noted above.
1.184 pk 4889: *
4890: * There are three entry points into the idle loop.
4891: * idle_switch: when a switch to the CPU's idle stack is required
4892: * idle: when already on the idle stack, scheduler lock held
4893: * idle_enter: when already on the idle stack, scheduler lock not held
1.173 pk 4894: */
1.184 pk 4895: idle_switch:
4896: #if defined(MULTIPROCESSOR)
4897: sethi %hi(IDLE_UP), %g5
4898: ld [%g5 + %lo(IDLE_UP)], %g5
4899: #else
4900: set _C_LABEL(idle_u), %g5
4901: #endif
4902: mov %l6, %g6 ! save %hi(cpcb) before changing windows
4903: wr %g0, PSR_S|PSR_PIL, %psr! change to window 0, traps off
4904: wr %g0, 2, %wim ! and make window 1 the trap window
4905: mov 1, %o0
4906: st %g5, [%g6 + %lo(cpcb)] ! cpcb = &idle_u
4907: st %o0, [%g5 + PCB_WIM] ! idle_u.pcb_wim = log2(2) = 1
4908: #if defined(MULTIPROCESSOR)
4909: set USPACE-CCFSZ, %o1 !
4910: add %g5, %o1, %sp ! set new %sp
4911: #else
4912: set _C_LABEL(idle_u) + USPACE-CCFSZ, %sp ! set new %sp
4913: #endif
4914: mov %g0, %i6 ! paranoid
4915: mov %g0, %i7 !
4916:
4917: #ifdef DEBUG
4918: mov %g5, %o0 ! %o0 = _idle_u
4919: SET_SP_REDZONE(%o0, %o1)
4920: #endif
4921: ! enable traps and continue at splsched()
4922: wr %g0, PSR_S|PSR_ET|(IPL_SCHED<<8), %psr
4923:
4924: /* now set up the locals in our new window */
4925: mov PSR_S|PSR_ET, %l1 ! oldpsr = PSR_S | PSR_ET;
4926: sethi %hi(_C_LABEL(sched_whichqs)), %l2
4927: clr %l4 ! lastproc = NULL;
4928: sethi %hi(cpcb), %l6
1.185 thorpej 4929: sethi %hi(curlwp), %l7
1.184 pk 4930: /* FALLTHROUGH*/
4931:
1.173 pk 4932: idle:
4933: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
4934: ! unlock scheduler lock
4935: call _C_LABEL(sched_unlock_idle)
4936: nop
4937: #endif
4938:
4939: idle_enter:
1.180 mrg 4940: #if defined(MULTIPROCESSOR)
4941: clr %l4 ! lastproc = NULL;
4942: #endif
1.173 pk 4943: wr %l1, 0, %psr ! (void) spl0();
4944: 1: ! spin reading whichqs until nonzero
4945: ld [%l2 + %lo(_C_LABEL(sched_whichqs))], %o3
4946: tst %o3
4947: bnz,a idle_leave
4948: wr %l1, (IPL_SCHED << 8), %psr ! (void) splsched();
4949:
4950: ! Check uvm.page_idle_zero
4951: sethi %hi(_C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO), %o3
4952: ld [%o3 + %lo(_C_LABEL(uvm) + UVM_PAGE_IDLE_ZERO)], %o3
4953: tst %o3
4954: bz 1b
4955: nop
4956:
4957: call _C_LABEL(uvm_pageidlezero)
4958: nop
4959: b,a 1b
4960:
1.184 pk 4961: idle_leave:
4962: ! just wrote to %psr; observe psr delay before doing a `save'
4963: ! or loading sched_whichqs.
4964: nop; nop
1.173 pk 4965: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
4966: /* Before we leave the idle loop, detain the scheduler lock */
4967: call _C_LABEL(sched_lock_idle)
4968: nop
4969: #endif
1.184 pk 4970: b Lsw_scan
4971: ld [%l2 + %lo(_C_LABEL(sched_whichqs))], %o3
1.173 pk 4972:
4973: Lsw_panic_rq:
4974: sethi %hi(1f), %o0
4975: call _C_LABEL(panic)
4976: or %lo(1f), %o0, %o0
4977: Lsw_panic_wchan:
4978: sethi %hi(2f), %o0
4979: call _C_LABEL(panic)
4980: or %lo(2f), %o0, %o0
4981: Lsw_panic_srun:
4982: sethi %hi(3f), %o0
4983: call _C_LABEL(panic)
4984: or %lo(3f), %o0, %o0
4985: 1: .asciz "switch rq"
4986: 2: .asciz "switch wchan"
4987: 3: .asciz "switch SRUN"
4988: _ALIGN
4989:
4990: /*
4991: * cpu_switch() picks a process to run and runs it, saving the current
4992: * one away. On the assumption that (since most workstations are
4993: * single user machines) the chances are quite good that the new
4994: * process will turn out to be the current process, we defer saving
4995: * it here until we have found someone to load. If that someone
4996: * is the current process we avoid both store and load.
4997: *
4998: * cpu_switch() is always entered at splsched.
4999: *
5000: * IT MIGHT BE WORTH SAVING BEFORE ENTERING idle TO AVOID HAVING TO
5001: * SAVE LATER WHEN SOMEONE ELSE IS READY ... MUST MEASURE!
5002: */
5003: .globl _C_LABEL(__ffstab)
1.185 thorpej 5004: ENTRY(cpu_switch)
1.184 pk 5005: ENTRY(cpu_switchto)
1.173 pk 5006: /*
5007: * REGISTER USAGE AT THIS POINT:
5008: * %l1 = oldpsr (excluding ipl bits)
5009: * %l2 = %hi(whichqs)
5010: * %l3(%g3) = p
5011: * %l4(%g4) = lastproc
5012: * %l5 = tmp 0
5013: * %l6 = %hi(cpcb)
1.185 thorpej 5014: * %l7 = %hi(curlwp)
1.173 pk 5015: * %o0 = tmp 1
5016: * %o1 = tmp 2
5017: * %o2 = tmp 3
5018: * %o3 = tmp 4, then at Lsw_scan, whichqs
5019: * %o4 = tmp 5, then at Lsw_scan, which
5020: * %o5 = tmp 6, then at Lsw_scan, q
5021: */
5022: save %sp, -CCFSZ, %sp
5023: mov %i0, %l4 ! save p
5024: sethi %hi(cpcb), %l6
5025: ld [%l6 + %lo(cpcb)], %o0
5026: std %i6, [%o0 + PCB_SP] ! cpcb->pcb_<sp,pc> = <fp,pc>;
5027: rd %psr, %l1 ! oldpsr = %psr;
1.185 thorpej 5028: sethi %hi(curlwp), %l7
1.173 pk 5029: st %l1, [%o0 + PCB_PSR] ! cpcb->pcb_psr = oldpsr;
5030: andn %l1, PSR_PIL, %l1 ! oldpsr &= ~PSR_PIL;
1.185 thorpej 5031: st %g0, [%l7 + %lo(curlwp)] ! curlwp = NULL;
1.173 pk 5032: /*
5033: * Save the old process: write back all windows (excluding
5034: * the current one). XXX crude; knows nwindows <= 8
5035: */
5036: #define SAVE save %sp, -64, %sp
5037: wb1: SAVE; SAVE; SAVE; SAVE; SAVE; SAVE; /* 6 of each: */
5038: restore; restore; restore; restore; restore; restore
5039:
1.184 pk 5040: #if defined(MULTIPROCESSOR)
5041: /* flush this process's context from TLB (on SUN4M/4D) */
5042: call _C_LABEL(pmap_deactivate) ! pmap_deactive(lastproc);
5043: mov %i0, %o0
5044: #endif
5045:
1.173 pk 5046: /* If we've been given a process to switch to, skip the rq stuff */
5047: tst %i1
5048: bnz,a Lsw_load
5049: mov %i1, %l3 ! but move into the expected register first
5050:
1.184 pk 5051: /* If nothing on the rq, wait after switching to idle stack */
1.173 pk 5052: sethi %hi(_C_LABEL(sched_whichqs)), %l2
1.184 pk 5053: ld [%l2 + %lo(_C_LABEL(sched_whichqs))], %o3
5054: tst %o3
5055: bz idle_switch
5056: EMPTY
1.173 pk 5057:
5058: Lsw_scan:
5059: /*
1.184 pk 5060: * Enter here with %o3 set to sched_whichqs.
5061: *
1.173 pk 5062: * Optimized inline expansion of `which = ffs(whichqs) - 1';
5063: * branches to idle if ffs(whichqs) was 0.
5064: */
5065: set _C_LABEL(__ffstab), %o2
5066: andcc %o3, 0xff, %o1 ! byte 0 zero?
5067: bz,a 1f ! yes, try byte 1
5068: srl %o3, 8, %o0
5069: b 2f ! ffs = ffstab[byte0]; which = ffs - 1;
5070: ldsb [%o2 + %o1], %o0
5071: 1: andcc %o0, 0xff, %o1 ! byte 1 zero?
5072: bz,a 1f ! yes, try byte 2
5073: srl %o0, 8, %o0
5074: ldsb [%o2 + %o1], %o0 ! which = ffstab[byte1] + 7;
5075: b 3f
5076: add %o0, 7, %o4
5077: 1: andcc %o0, 0xff, %o1 ! byte 2 zero?
5078: bz,a 1f ! yes, try byte 3
5079: srl %o0, 8, %o0
5080: ldsb [%o2 + %o1], %o0 ! which = ffstab[byte2] + 15;
5081: b 3f
5082: add %o0, 15, %o4
5083: 1: ldsb [%o2 + %o0], %o0 ! ffs = ffstab[byte3] + 24
5084: addcc %o0, 24, %o0 ! (note that ffstab[0] == -24)
5085: bz idle ! if answer was 0, go idle
5086: EMPTY
5087: 2: sub %o0, 1, %o4 ! which = ffs(whichqs) - 1
5088: 3: /* end optimized inline expansion */
5089:
5090: /*
5091: * We found a nonempty run queue. Take its first process.
5092: */
5093: set _C_LABEL(sched_qs), %o5 ! q = &qs[which];
5094: sll %o4, 3, %o0
5095: add %o0, %o5, %o5
5096: ld [%o5], %l3 ! p = q->ph_link;
5097: cmp %l3, %o5 ! if (p == q)
5098: be Lsw_panic_rq ! panic("switch rq");
5099: EMPTY
5100: ld [%l3], %o0 ! tmp0 = p->p_forw;
5101: st %o0, [%o5] ! q->ph_link = tmp0;
5102: st %o5, [%o0 + 4] ! tmp0->p_back = q;
5103: cmp %o0, %o5 ! if (tmp0 == q)
5104: bne Lsw_load
5105: EMPTY
5106: mov 1, %o1 ! whichqs &= ~(1 << which);
5107: sll %o1, %o4, %o1
5108: andn %o3, %o1, %o3
5109: st %o3, [%l2 + %lo(_C_LABEL(sched_whichqs))]
5110:
5111: Lsw_load:
5112: /*
5113: * PHASE TWO: NEW REGISTER USAGE:
5114: * %l1 = oldpsr (excluding ipl bits)
5115: * %l2 =
5116: * %l3 = p
5117: * %l4 = lastproc
5118: * %l5 =
5119: * %l6 = %hi(cpcb)
1.185 thorpej 5120: * %l7 = %hi(curlwp)
1.173 pk 5121: * %o0 = tmp 1
5122: * %o1 = tmp 2
5123: * %o2 = tmp 3
5124: * %o3 = vm
5125: */
5126:
5127: /* firewalls */
1.185 thorpej 5128: ld [%l3 + L_WCHAN], %o0 ! if (p->p_wchan)
1.173 pk 5129: tst %o0
5130: bne Lsw_panic_wchan ! panic("switch wchan");
5131: EMPTY
1.185 thorpej 5132: ld [%l3 + L_STAT], %o0 ! if (p->p_stat != LSRUN)
5133: cmp %o0, LSRUN
1.173 pk 5134: bne Lsw_panic_srun ! panic("switch SRUN");
5135: EMPTY
5136:
5137: /*
5138: * Committed to running process p.
5139: * It may be the same as the one we were running before.
5140: */
1.185 thorpej 5141: mov LSONPROC, %o0 ! p->p_stat = LSONPROC;
5142: st %o0, [%l3 + L_STAT]
1.173 pk 5143:
5144: /* p->p_cpu initialized in fork1() for single-processor */
5145: #if defined(MULTIPROCESSOR)
5146: sethi %hi(_CISELFP), %o0 ! p->p_cpu = cpuinfo.ci_self;
5147: ld [%o0 + %lo(_CISELFP)], %o0
1.185 thorpej 5148: st %o0, [%l3 + L_CPU]
1.173 pk 5149: #endif
5150:
1.185 thorpej 5151: ld [%l3 + L_ADDR], %g5 ! newpcb = p->p_addr;
1.173 pk 5152: st %g0, [%l3 + 4] ! p->p_back = NULL;
1.185 thorpej 5153: st %l3, [%l7 + %lo(curlwp)] ! curlwp = p;
1.173 pk 5154:
5155: /*
5156: * Load the new process. To load, we must change stacks and
5157: * and alter cpcb. We must also load the CWP and WIM from the
5158: * new process' PCB, since, when we finally return from
5159: * the trap, the CWP of the trap window must match the
5160: * CWP stored in the trap frame.
5161: *
5162: * Once the new CWP is set below our local registers become
5163: * invalid, so:
5164: *
5165: * PHASE THREE: NEW REGISTER USAGE:
5166: * %g2 = newpsr
5167: * %g3 = p
5168: * %g4 = lastproc
5169: * %g5 = newpcb
1.176 pk 5170: * %l0 = return value
1.173 pk 5171: * %l1 = oldpsr (excluding ipl bits)
5172: * %l6 = %hi(cpcb)
5173: * %o0 = tmp 1
5174: * %o1 = tmp 2
5175: * %o2 = tmp 3
5176: * %o3 = vm
5177: */
5178:
5179: mov %l3, %g3 ! save p and lastproc to globals
5180: mov %l4, %g4 !
5181: ld [%g5 + PCB_PSR], %g2 ! newpsr = newpcb->pcb_psr;
5182:
5183: /* traps off while we switch to the new stack */
5184: wr %l1, (IPL_SCHED << 8) | PSR_ET, %psr
5185:
5186: /* set new cpcb */
5187: st %g5, [%l6 + %lo(cpcb)] ! cpcb = newpcb;
5188:
5189: /* compute new wim */
5190: ld [%g5 + PCB_WIM], %o0
5191: mov 1, %o1
5192: sll %o1, %o0, %o0
5193: wr %o0, 0, %wim ! %wim = 1 << newpcb->pcb_wim;
5194: /* now must not change %psr for 3 more instrs */
5195: /* Clear FP & CP enable bits, as well as the PIL field */
5196: /*1,2*/ set PSR_EF|PSR_EC|PSR_PIL, %o0
5197: /*3*/ andn %g2, %o0, %g2 ! newpsr &= ~(PSR_EF|PSR_EC|PSR_PIL);
5198: /* set new psr, but with traps disabled */
5199: wr %g2, (IPL_SCHED << 8)|PSR_ET, %psr ! %psr = newpsr ^ PSR_ET;
5200:
5201: /* load new stack and return address */
5202: ldd [%g5 + PCB_SP], %i6 ! <fp,pc> = newpcb->pcb_<sp,pc>
5203: add %fp, -CCFSZ, %sp ! set stack frame for this window
5204: #ifdef DEBUG
5205: mov %g5, %o0
5206: SET_SP_REDZONE(%o0, %o1)
5207: CHECK_SP_REDZONE(%o0, %o1)
5208: #endif
5209:
5210: /* finally, enable traps and continue at splsched() */
5211: wr %g2, IPL_SCHED << 8 , %psr ! psr = newpsr;
5212:
1.191 pk 5213: mov %g3, %l3 ! restore p and lastproc from globals
5214: mov %g4, %l4 ! (globals will get clobbered by the
5215: ! sched_unlock_idle() below)
5216:
1.184 pk 5217: sethi %hi(_WANT_RESCHED), %o0 ! want_resched = 0;
5218: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
5219: /* Done with the run queues; release the scheduler lock */
5220: call _C_LABEL(sched_unlock_idle)
1.180 mrg 5221: #endif
1.184 pk 5222: st %g0, [%o0 + %lo(_WANT_RESCHED)]! delay slot
1.180 mrg 5223:
1.173 pk 5224: /*
5225: * Now running p. Make sure it has a context so that it
5226: * can talk about user space stuff. (Its pcb_uw is currently
5227: * zero so it is safe to have interrupts going here.)
1.176 pk 5228: *
5229: * On multi-processor machines, the context might have changed
5230: * (e.g. by exec(2)) even if we pick up the same process here.
1.173 pk 5231: */
1.191 pk 5232: subcc %l3, %l4, %l0 ! p == lastproc?
1.176 pk 5233: #if !defined(MULTIPROCESSOR)
1.173 pk 5234: be Lsw_sameproc ! yes, context is still set for p
5235: EMPTY
1.176 pk 5236: #endif
1.173 pk 5237:
1.191 pk 5238: ld [%l3 + L_PROC], %o2 ! p = l->l_proc;
1.173 pk 5239: INCR(_C_LABEL(nswitchdiff)) ! clobbers %o0,%o1
1.185 thorpej 5240: ld [%o2 + P_VMSPACE], %o3 ! vm = p->p_vmspace;
1.173 pk 5241: ld [%o3 + VM_PMAP], %o3 ! pm = vm->vm_map.vm_pmap;
1.180 mrg 5242: #if defined(MULTIPROCESSOR)
1.197 wiz 5243: /* Add this CPU to the pmap's CPU set */
1.180 mrg 5244: sethi %hi(CPUINFO_VA + CPUINFO_CPUNO), %o0
5245: ld [%o0 + %lo(CPUINFO_VA + CPUINFO_CPUNO)], %o1
5246: mov 1, %o2
5247: ld [%o3 + PMAP_CPUSET], %o0
5248: sll %o2, %o1, %o2
5249: or %o0, %o2, %o0 ! pm->pm_cpuset |= cpu_number();
5250: st %o0, [%o3 + PMAP_CPUSET]
5251: #endif
1.173 pk 5252: ld [%o3 + PMAP_CTX], %o0 ! if (pm->pm_ctx != NULL)
5253: tst %o0
5254: bnz,a Lsw_havectx ! goto havecontext;
5255: ld [%o3 + PMAP_CTXNUM], %i1 ! load context number
5256:
5257: /* p does not have a context: call ctx_alloc to get one */
5258: call _C_LABEL(ctx_alloc) ! ctx_alloc(pm);
5259: mov %o3, %o0
5260:
5261: ret
1.176 pk 5262: restore %g0, %l0, %o0 ! return (p != lastproc)
1.173 pk 5263:
5264: /* p does have a context: just switch to it */
5265: Lsw_havectx:
5266: ! context is in %i1
5267: #if defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
5268: NOP_ON_4M_15:
5269: b,a 1f
5270: b,a 2f
5271: #endif
5272: 1:
5273: #if defined(SUN4) || defined(SUN4C)
5274: set AC_CONTEXT, %o1
5275: stba %i1, [%o1] ASI_CONTROL ! setcontext(vm->vm_pmap.pm_ctxnum);
5276: ret
1.176 pk 5277: restore %g0, %l0, %o0 ! return (p != lastproc)
1.173 pk 5278: #endif
5279: 2:
1.176 pk 5280: #if defined(SUN4M) || defined(SUN4D)
1.173 pk 5281: /*
5282: * Flush caches that need to be flushed on context switch.
5283: * We know this is currently only necessary on the sun4m hypersparc.
5284: */
5285: sethi %hi(CPUINFO_VA + CPUINFO_PURE_VCACHE_FLS), %o0
5286: ld [%o0 + %lo(CPUINFO_VA + CPUINFO_PURE_VCACHE_FLS)], %o2
5287: jmpl %o2, %o7
5288: set SRMMU_CXR, %i2
5289: sta %i1, [%i2] ASI_SRMMU ! setcontext(vm->vm_pmap.pm_ctxnum);
5290: ret
1.176 pk 5291: restore %g0, %l0, %o0 ! return (p != lastproc)
1.173 pk 5292: #endif
5293:
1.176 pk 5294: #if !defined(MULTIPROCESSOR)
1.173 pk 5295: Lsw_sameproc:
5296: /*
5297: * We are resuming the process that was running at the
1.176 pk 5298: * call to switch().
1.173 pk 5299: */
5300: ret
1.176 pk 5301: restore %g0, %g0, %o0 ! return (0)
5302: #endif /* !MULTIPROCESSOR */
1.173 pk 5303:
1.185 thorpej 5304:
1.173 pk 5305: /*
5306: * Snapshot the current process so that stack frames are up to date.
5307: * Only used just before a crash dump.
5308: */
5309: ENTRY(snapshot)
5310: std %o6, [%o0 + PCB_SP] ! save sp
5311: rd %psr, %o1 ! save psr
5312: st %o1, [%o0 + PCB_PSR]
5313:
5314: /*
5315: * Just like switch(); same XXX comments apply.
5316: * 7 of each. Minor tweak: the 7th restore is
5317: * done after a ret.
5318: */
5319: SAVE; SAVE; SAVE; SAVE; SAVE; SAVE; SAVE
5320: restore; restore; restore; restore; restore; restore; ret; restore
5321:
5322:
5323: /*
5324: * cpu_fork() arrange for proc_trampoline() to run after a process gets
5325: * chosen in switch(). The stack frame will contain a function pointer
5326: * in %l0, and an argument to pass to it in %l2.
5327: *
5328: * If the function *(%l0) returns, we arrange for an immediate return
5329: * to user mode. This happens in two known cases: after execve(2) of init,
5330: * and when returning a child to user mode after a fork(2).
5331: *
5332: * If were setting up a kernel thread, the function *(%l0) will not return.
5333: */
5334: ENTRY(proc_trampoline)
5335: /*
5336: * Note: cpu_fork() has set up a stack frame for us to run in,
5337: * so we can call other functions from here without using
5338: * `save ... restore'.
5339: */
5340: #ifdef MULTIPROCESSOR
5341: /* Finish setup in SMP environment: acquire locks etc. */
5342: call _C_LABEL(proc_trampoline_mp)
5343: nop
5344: #endif
5345:
5346: /* Reset interrupt level */
1.174 pk 5347: rd %psr, %l2
5348: andn %l2, PSR_PIL, %o0 ! psr &= ~PSR_PIL;
1.173 pk 5349: wr %o0, 0, %psr ! (void) spl0();
5350: nop ! psr delay; the next 2 instructions
5351: ! can safely be made part of the
5352: ! required 3 instructions psr delay
5353: call %l0
5354: mov %l1, %o0
5355:
5356: /*
5357: * Here we finish up as in syscall, but simplified.
5358: * cpu_fork() (or sendsig(), if we took a pending signal
5359: * in child_return()) will have set the user-space return
5360: * address in tf_pc. In both cases, %npc should be %pc + 4.
5361: */
5362: ld [%sp + CCFSZ + 4], %l1 ! pc = tf->tf_pc from cpu_fork()
1.174 pk 5363: and %l2, PSR_CWP, %o1 ! keep current CWP
1.173 pk 5364: or %o1, PSR_S, %l0 ! user psr
5365: b return_from_syscall
5366: add %l1, 4, %l2 ! npc = pc+4
5367:
1.1 deraadt 5368: /*
5369: * {fu,su}{,i}{byte,word}
5370: */
1.111 pk 5371: _ENTRY(fuiword)
1.1 deraadt 5372: ENTRY(fuword)
5373: set KERNBASE, %o2
5374: cmp %o0, %o2 ! if addr >= KERNBASE...
5375: bgeu Lfsbadaddr
5376: EMPTY
5377: btst 3, %o0 ! or has low bits set...
5378: bnz Lfsbadaddr ! go return -1
5379: EMPTY
1.111 pk 5380: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5381: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5382: set Lfserr, %o3
5383: st %o3, [%o2 + PCB_ONFAULT]
5384: ld [%o0], %o0 ! fetch the word
5385: retl ! phew, made it, return the word
1.138 chs 5386: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
1.1 deraadt 5387:
5388: Lfserr:
5389: st %g0, [%o2 + PCB_ONFAULT]! error in r/w, clear pcb_onfault
5390: Lfsbadaddr:
5391: retl ! and return error indicator
1.21 deraadt 5392: mov -1, %o0
1.1 deraadt 5393:
5394: /*
5395: * This is just like Lfserr, but it's a global label that allows
5396: * mem_access_fault() to check to see that we don't want to try to
5397: * page in the fault. It's used by fuswintr() etc.
5398: */
1.111 pk 5399: .globl _C_LABEL(Lfsbail)
5400: _C_LABEL(Lfsbail):
1.1 deraadt 5401: st %g0, [%o2 + PCB_ONFAULT]! error in r/w, clear pcb_onfault
5402: retl ! and return error indicator
1.21 deraadt 5403: mov -1, %o0
1.1 deraadt 5404:
5405: /*
5406: * Like fusword but callable from interrupt context.
5407: * Fails if data isn't resident.
5408: */
5409: ENTRY(fuswintr)
5410: set KERNBASE, %o2
5411: cmp %o0, %o2 ! if addr >= KERNBASE
5412: bgeu Lfsbadaddr ! return error
5413: EMPTY
1.111 pk 5414: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfsbail;
5415: ld [%o2 + %lo(cpcb)], %o2
5416: set _C_LABEL(Lfsbail), %o3
1.1 deraadt 5417: st %o3, [%o2 + PCB_ONFAULT]
5418: lduh [%o0], %o0 ! fetch the halfword
5419: retl ! made it
5420: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
5421:
5422: ENTRY(fusword)
5423: set KERNBASE, %o2
5424: cmp %o0, %o2 ! if addr >= KERNBASE
5425: bgeu Lfsbadaddr ! return error
5426: EMPTY
1.111 pk 5427: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5428: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5429: set Lfserr, %o3
5430: st %o3, [%o2 + PCB_ONFAULT]
5431: lduh [%o0], %o0 ! fetch the halfword
5432: retl ! made it
5433: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
5434:
1.111 pk 5435: _ENTRY(fuibyte)
1.1 deraadt 5436: ENTRY(fubyte)
5437: set KERNBASE, %o2
5438: cmp %o0, %o2 ! if addr >= KERNBASE
5439: bgeu Lfsbadaddr ! return error
5440: EMPTY
1.111 pk 5441: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5442: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5443: set Lfserr, %o3
5444: st %o3, [%o2 + PCB_ONFAULT]
5445: ldub [%o0], %o0 ! fetch the byte
5446: retl ! made it
5447: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
5448:
1.111 pk 5449: _ENTRY(suiword)
1.1 deraadt 5450: ENTRY(suword)
5451: set KERNBASE, %o2
5452: cmp %o0, %o2 ! if addr >= KERNBASE ...
5453: bgeu Lfsbadaddr
5454: EMPTY
5455: btst 3, %o0 ! or has low bits set ...
5456: bnz Lfsbadaddr ! go return error
5457: EMPTY
1.111 pk 5458: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5459: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5460: set Lfserr, %o3
5461: st %o3, [%o2 + PCB_ONFAULT]
5462: st %o1, [%o0] ! store the word
5463: st %g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
5464: retl ! and return 0
5465: clr %o0
5466:
5467: ENTRY(suswintr)
5468: set KERNBASE, %o2
5469: cmp %o0, %o2 ! if addr >= KERNBASE
5470: bgeu Lfsbadaddr ! go return error
5471: EMPTY
1.111 pk 5472: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfsbail;
5473: ld [%o2 + %lo(cpcb)], %o2
5474: set _C_LABEL(Lfsbail), %o3
1.1 deraadt 5475: st %o3, [%o2 + PCB_ONFAULT]
5476: sth %o1, [%o0] ! store the halfword
5477: st %g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
5478: retl ! and return 0
5479: clr %o0
5480:
5481: ENTRY(susword)
5482: set KERNBASE, %o2
5483: cmp %o0, %o2 ! if addr >= KERNBASE
5484: bgeu Lfsbadaddr ! go return error
5485: EMPTY
1.111 pk 5486: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5487: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5488: set Lfserr, %o3
5489: st %o3, [%o2 + PCB_ONFAULT]
5490: sth %o1, [%o0] ! store the halfword
5491: st %g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
5492: retl ! and return 0
5493: clr %o0
5494:
1.111 pk 5495: _ENTRY(suibyte)
1.1 deraadt 5496: ENTRY(subyte)
5497: set KERNBASE, %o2
5498: cmp %o0, %o2 ! if addr >= KERNBASE
5499: bgeu Lfsbadaddr ! go return error
5500: EMPTY
1.111 pk 5501: sethi %hi(cpcb), %o2 ! cpcb->pcb_onfault = Lfserr;
5502: ld [%o2 + %lo(cpcb)], %o2
1.1 deraadt 5503: set Lfserr, %o3
5504: st %o3, [%o2 + PCB_ONFAULT]
5505: stb %o1, [%o0] ! store the byte
5506: st %g0, [%o2 + PCB_ONFAULT]! made it, clear onfault
5507: retl ! and return 0
5508: clr %o0
5509:
5510: /* probeget and probeset are meant to be used during autoconfiguration */
5511:
5512: /*
5513: * probeget(addr, size) caddr_t addr; int size;
5514: *
5515: * Read or write a (byte,word,longword) from the given address.
5516: * Like {fu,su}{byte,halfword,word} but our caller is supposed
5517: * to know what he is doing... the address can be anywhere.
5518: *
5519: * We optimize for space, rather than time, here.
5520: */
5521: ENTRY(probeget)
5522: ! %o0 = addr, %o1 = (1,2,4)
1.111 pk 5523: sethi %hi(cpcb), %o2
5524: ld [%o2 + %lo(cpcb)], %o2 ! cpcb->pcb_onfault = Lfserr;
1.1 deraadt 5525: set Lfserr, %o5
5526: st %o5, [%o2 + PCB_ONFAULT]
5527: btst 1, %o1
5528: bnz,a 0f ! if (len & 1)
5529: ldub [%o0], %o0 ! value = *(char *)addr;
5530: 0: btst 2, %o1
5531: bnz,a 0f ! if (len & 2)
5532: lduh [%o0], %o0 ! value = *(short *)addr;
5533: 0: btst 4, %o1
5534: bnz,a 0f ! if (len & 4)
5535: ld [%o0], %o0 ! value = *(int *)addr;
5536: 0: retl ! made it, clear onfault and return
5537: st %g0, [%o2 + PCB_ONFAULT]
5538:
5539: /*
5540: * probeset(addr, size, val) caddr_t addr; int size, val;
5541: *
5542: * As above, but we return 0 on success.
5543: */
5544: ENTRY(probeset)
5545: ! %o0 = addr, %o1 = (1,2,4), %o2 = val
1.111 pk 5546: sethi %hi(cpcb), %o3
5547: ld [%o3 + %lo(cpcb)], %o3 ! cpcb->pcb_onfault = Lfserr;
1.1 deraadt 5548: set Lfserr, %o5
1.35 pk 5549: st %o5, [%o3 + PCB_ONFAULT]
1.1 deraadt 5550: btst 1, %o1
5551: bnz,a 0f ! if (len & 1)
5552: stb %o2, [%o0] ! *(char *)addr = value;
5553: 0: btst 2, %o1
5554: bnz,a 0f ! if (len & 2)
5555: sth %o2, [%o0] ! *(short *)addr = value;
5556: 0: btst 4, %o1
5557: bnz,a 0f ! if (len & 4)
5558: st %o2, [%o0] ! *(int *)addr = value;
5559: 0: clr %o0 ! made it, clear onfault and return 0
5560: retl
1.35 pk 5561: st %g0, [%o3 + PCB_ONFAULT]
1.21 deraadt 5562:
5563: /*
1.22 deraadt 5564: * int xldcontrolb(caddr_t, pcb)
5565: * %o0 %o1
1.21 deraadt 5566: *
5567: * read a byte from the specified address in ASI_CONTROL space.
5568: */
1.22 deraadt 5569: ENTRY(xldcontrolb)
1.111 pk 5570: !sethi %hi(cpcb), %o2
5571: !ld [%o2 + %lo(cpcb)], %o2 ! cpcb->pcb_onfault = Lfsbail;
1.22 deraadt 5572: or %o1, %g0, %o2 ! %o2 = %o1
1.111 pk 5573: set _C_LABEL(Lfsbail), %o5
1.21 deraadt 5574: st %o5, [%o2 + PCB_ONFAULT]
5575: lduba [%o0] ASI_CONTROL, %o0 ! read
5576: 0: retl
1.1 deraadt 5577: st %g0, [%o2 + PCB_ONFAULT]
1.78 pk 5578:
5579: /*
5580: * int fkbyte(caddr_t, pcb)
5581: * %o0 %o1
5582: *
5583: * Just like fubyte(), but for kernel space.
5584: * (currently used to work around unexplained transient bus errors
5585: * when reading the VME interrupt vector)
5586: */
5587: ENTRY(fkbyte)
5588: or %o1, %g0, %o2 ! %o2 = %o1
1.111 pk 5589: set _C_LABEL(Lfsbail), %o5
1.78 pk 5590: st %o5, [%o2 + PCB_ONFAULT]
5591: ldub [%o0], %o0 ! fetch the byte
5592: retl ! made it
5593: st %g0, [%o2 + PCB_ONFAULT]! but first clear onfault
1.1 deraadt 5594:
5595:
5596: /*
5597: * copywords(src, dst, nbytes)
5598: *
5599: * Copy `nbytes' bytes from src to dst, both of which are word-aligned;
5600: * nbytes is a multiple of four. It may, however, be zero, in which case
5601: * nothing is to be copied.
5602: */
5603: ENTRY(copywords)
5604: ! %o0 = src, %o1 = dst, %o2 = nbytes
5605: b 1f
5606: deccc 4, %o2
5607: 0:
5608: st %o3, [%o1 + %o2]
5609: deccc 4, %o2 ! while ((n -= 4) >= 0)
5610: 1:
5611: bge,a 0b ! *(int *)(dst+n) = *(int *)(src+n);
5612: ld [%o0 + %o2], %o3
5613: retl
5614: nop
5615:
5616: /*
5617: * qcopy(src, dst, nbytes)
5618: *
5619: * (q for `quad' or `quick', as opposed to b for byte/block copy)
5620: *
5621: * Just like copywords, but everything is multiples of 8.
5622: */
5623: ENTRY(qcopy)
5624: b 1f
5625: deccc 8, %o2
5626: 0:
5627: std %o4, [%o1 + %o2]
5628: deccc 8, %o2
5629: 1:
5630: bge,a 0b
5631: ldd [%o0 + %o2], %o4
5632: retl
5633: nop
5634:
5635: /*
5636: * qzero(addr, nbytes)
5637: *
5638: * Zeroes `nbytes' bytes of a quad-aligned virtual address,
5639: * where nbytes is itself a multiple of 8.
5640: */
5641: ENTRY(qzero)
5642: ! %o0 = addr, %o1 = len (in bytes)
5643: clr %g1
5644: 0:
5645: deccc 8, %o1 ! while ((n =- 8) >= 0)
5646: bge,a 0b
5647: std %g0, [%o0 + %o1] ! *(quad *)(addr + n) = 0;
5648: retl
5649: nop
5650:
5651: /*
1.83 mycroft 5652: * kernel bcopy
1.1 deraadt 5653: * Assumes regions do not overlap; has no useful return value.
5654: *
5655: * Must not use %g7 (see copyin/copyout above).
5656: */
5657:
5658: #define BCOPY_SMALL 32 /* if < 32, copy by bytes */
5659:
5660: ENTRY(bcopy)
5661: cmp %o2, BCOPY_SMALL
5662: Lbcopy_start:
5663: bge,a Lbcopy_fancy ! if >= this many, go be fancy.
5664: btst 7, %o0 ! (part of being fancy)
5665:
5666: /*
5667: * Not much to copy, just do it a byte at a time.
5668: */
5669: deccc %o2 ! while (--len >= 0)
5670: bl 1f
5671: EMPTY
5672: 0:
5673: inc %o0
5674: ldsb [%o0 - 1], %o4 ! (++dst)[-1] = *src++;
5675: stb %o4, [%o1]
5676: deccc %o2
5677: bge 0b
5678: inc %o1
5679: 1:
5680: retl
1.80 mrg 5681: nop
1.1 deraadt 5682: /* NOTREACHED */
5683:
5684: /*
5685: * Plenty of data to copy, so try to do it optimally.
5686: */
5687: Lbcopy_fancy:
5688: ! check for common case first: everything lines up.
5689: ! btst 7, %o0 ! done already
5690: bne 1f
5691: EMPTY
5692: btst 7, %o1
5693: be,a Lbcopy_doubles
5694: dec 8, %o2 ! if all lined up, len -= 8, goto bcopy_doubes
5695:
5696: ! If the low bits match, we can make these line up.
5697: 1:
5698: xor %o0, %o1, %o3 ! t = src ^ dst;
5699: btst 1, %o3 ! if (t & 1) {
5700: be,a 1f
5701: btst 1, %o0 ! [delay slot: if (src & 1)]
5702:
5703: ! low bits do not match, must copy by bytes.
5704: 0:
5705: ldsb [%o0], %o4 ! do {
5706: inc %o0 ! (++dst)[-1] = *src++;
5707: inc %o1
5708: deccc %o2
5709: bnz 0b ! } while (--len != 0);
5710: stb %o4, [%o1 - 1]
5711: retl
1.80 mrg 5712: nop
1.1 deraadt 5713: /* NOTREACHED */
5714:
5715: ! lowest bit matches, so we can copy by words, if nothing else
5716: 1:
5717: be,a 1f ! if (src & 1) {
5718: btst 2, %o3 ! [delay slot: if (t & 2)]
5719:
5720: ! although low bits match, both are 1: must copy 1 byte to align
5721: ldsb [%o0], %o4 ! *dst++ = *src++;
5722: stb %o4, [%o1]
5723: inc %o0
5724: inc %o1
5725: dec %o2 ! len--;
5726: btst 2, %o3 ! } [if (t & 2)]
5727: 1:
5728: be,a 1f ! if (t & 2) {
5729: btst 2, %o0 ! [delay slot: if (src & 2)]
5730: dec 2, %o2 ! len -= 2;
5731: 0:
5732: ldsh [%o0], %o4 ! do {
5733: sth %o4, [%o1] ! *(short *)dst = *(short *)src;
5734: inc 2, %o0 ! dst += 2, src += 2;
5735: deccc 2, %o2 ! } while ((len -= 2) >= 0);
5736: bge 0b
5737: inc 2, %o1
5738: b Lbcopy_mopb ! goto mop_up_byte;
5739: btst 1, %o2 ! } [delay slot: if (len & 1)]
5740: /* NOTREACHED */
5741:
5742: ! low two bits match, so we can copy by longwords
5743: 1:
5744: be,a 1f ! if (src & 2) {
5745: btst 4, %o3 ! [delay slot: if (t & 4)]
5746:
5747: ! although low 2 bits match, they are 10: must copy one short to align
5748: ldsh [%o0], %o4 ! (*short *)dst = *(short *)src;
5749: sth %o4, [%o1]
5750: inc 2, %o0 ! dst += 2;
5751: inc 2, %o1 ! src += 2;
5752: dec 2, %o2 ! len -= 2;
5753: btst 4, %o3 ! } [if (t & 4)]
5754: 1:
5755: be,a 1f ! if (t & 4) {
5756: btst 4, %o0 ! [delay slot: if (src & 4)]
5757: dec 4, %o2 ! len -= 4;
5758: 0:
5759: ld [%o0], %o4 ! do {
5760: st %o4, [%o1] ! *(int *)dst = *(int *)src;
5761: inc 4, %o0 ! dst += 4, src += 4;
5762: deccc 4, %o2 ! } while ((len -= 4) >= 0);
5763: bge 0b
5764: inc 4, %o1
5765: b Lbcopy_mopw ! goto mop_up_word_and_byte;
5766: btst 2, %o2 ! } [delay slot: if (len & 2)]
5767: /* NOTREACHED */
5768:
5769: ! low three bits match, so we can copy by doublewords
5770: 1:
5771: be 1f ! if (src & 4) {
5772: dec 8, %o2 ! [delay slot: len -= 8]
5773: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
5774: st %o4, [%o1]
5775: inc 4, %o0 ! dst += 4, src += 4, len -= 4;
5776: inc 4, %o1
5777: dec 4, %o2 ! }
5778: 1:
5779: Lbcopy_doubles:
5780: ldd [%o0], %o4 ! do {
5781: std %o4, [%o1] ! *(double *)dst = *(double *)src;
5782: inc 8, %o0 ! dst += 8, src += 8;
5783: deccc 8, %o2 ! } while ((len -= 8) >= 0);
5784: bge Lbcopy_doubles
5785: inc 8, %o1
5786:
5787: ! check for a usual case again (save work)
5788: btst 7, %o2 ! if ((len & 7) == 0)
5789: be Lbcopy_done ! goto bcopy_done;
5790:
5791: btst 4, %o2 ! if ((len & 4)) == 0)
5792: be,a Lbcopy_mopw ! goto mop_up_word_and_byte;
5793: btst 2, %o2 ! [delay slot: if (len & 2)]
5794: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
5795: st %o4, [%o1]
5796: inc 4, %o0 ! dst += 4;
5797: inc 4, %o1 ! src += 4;
5798: btst 2, %o2 ! } [if (len & 2)]
5799:
5800: 1:
5801: ! mop up trailing word (if present) and byte (if present).
5802: Lbcopy_mopw:
5803: be Lbcopy_mopb ! no word, go mop up byte
5804: btst 1, %o2 ! [delay slot: if (len & 1)]
5805: ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
5806: be Lbcopy_done ! if ((len & 1) == 0) goto done;
5807: sth %o4, [%o1]
5808: ldsb [%o0 + 2], %o4 ! dst[2] = src[2];
5809: retl
1.80 mrg 5810: stb %o4, [%o1 + 2]
1.1 deraadt 5811: /* NOTREACHED */
5812:
5813: ! mop up trailing byte (if present).
5814: Lbcopy_mopb:
5815: bne,a 1f
5816: ldsb [%o0], %o4
5817:
5818: Lbcopy_done:
5819: retl
1.80 mrg 5820: nop
1.1 deraadt 5821:
5822: 1:
5823: retl
1.80 mrg 5824: stb %o4,[%o1]
1.1 deraadt 5825: /*
5826: * ovbcopy(src, dst, len): like bcopy, but regions may overlap.
5827: */
5828: ENTRY(ovbcopy)
5829: cmp %o0, %o1 ! src < dst?
5830: bgeu Lbcopy_start ! no, go copy forwards as via bcopy
5831: cmp %o2, BCOPY_SMALL! (check length for doublecopy first)
5832:
5833: /*
5834: * Since src comes before dst, and the regions might overlap,
5835: * we have to do the copy starting at the end and working backwards.
5836: */
5837: add %o2, %o0, %o0 ! src += len
5838: add %o2, %o1, %o1 ! dst += len
5839: bge,a Lback_fancy ! if len >= BCOPY_SMALL, go be fancy
5840: btst 3, %o0
5841:
5842: /*
5843: * Not much to copy, just do it a byte at a time.
5844: */
5845: deccc %o2 ! while (--len >= 0)
5846: bl 1f
5847: EMPTY
5848: 0:
5849: dec %o0 ! *--dst = *--src;
5850: ldsb [%o0], %o4
5851: dec %o1
5852: deccc %o2
5853: bge 0b
5854: stb %o4, [%o1]
5855: 1:
5856: retl
5857: nop
5858:
5859: /*
5860: * Plenty to copy, try to be optimal.
5861: * We only bother with word/halfword/byte copies here.
5862: */
5863: Lback_fancy:
5864: ! btst 3, %o0 ! done already
5865: bnz 1f ! if ((src & 3) == 0 &&
5866: btst 3, %o1 ! (dst & 3) == 0)
5867: bz,a Lback_words ! goto words;
5868: dec 4, %o2 ! (done early for word copy)
5869:
5870: 1:
5871: /*
5872: * See if the low bits match.
5873: */
5874: xor %o0, %o1, %o3 ! t = src ^ dst;
5875: btst 1, %o3
5876: bz,a 3f ! if (t & 1) == 0, can do better
5877: btst 1, %o0
5878:
5879: /*
5880: * Nope; gotta do byte copy.
5881: */
5882: 2:
5883: dec %o0 ! do {
5884: ldsb [%o0], %o4 ! *--dst = *--src;
5885: dec %o1
5886: deccc %o2 ! } while (--len != 0);
5887: bnz 2b
5888: stb %o4, [%o1]
5889: retl
5890: nop
5891:
5892: 3:
5893: /*
5894: * Can do halfword or word copy, but might have to copy 1 byte first.
5895: */
5896: ! btst 1, %o0 ! done earlier
5897: bz,a 4f ! if (src & 1) { /* copy 1 byte */
5898: btst 2, %o3 ! (done early)
5899: dec %o0 ! *--dst = *--src;
5900: ldsb [%o0], %o4
5901: dec %o1
5902: stb %o4, [%o1]
5903: dec %o2 ! len--;
5904: btst 2, %o3 ! }
5905:
5906: 4:
5907: /*
5908: * See if we can do a word copy ((t&2) == 0).
5909: */
5910: ! btst 2, %o3 ! done earlier
5911: bz,a 6f ! if (t & 2) == 0, can do word copy
5912: btst 2, %o0 ! (src&2, done early)
5913:
5914: /*
5915: * Gotta do halfword copy.
5916: */
5917: dec 2, %o2 ! len -= 2;
5918: 5:
5919: dec 2, %o0 ! do {
5920: ldsh [%o0], %o4 ! src -= 2;
5921: dec 2, %o1 ! dst -= 2;
5922: deccc 2, %o0 ! *(short *)dst = *(short *)src;
5923: bge 5b ! } while ((len -= 2) >= 0);
5924: sth %o4, [%o1]
5925: b Lback_mopb ! goto mop_up_byte;
5926: btst 1, %o2 ! (len&1, done early)
5927:
5928: 6:
5929: /*
5930: * We can do word copies, but we might have to copy
5931: * one halfword first.
5932: */
5933: ! btst 2, %o0 ! done already
5934: bz 7f ! if (src & 2) {
5935: dec 4, %o2 ! (len -= 4, done early)
5936: dec 2, %o0 ! src -= 2, dst -= 2;
5937: ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
5938: dec 2, %o1
5939: sth %o4, [%o1]
5940: dec 2, %o2 ! len -= 2;
5941: ! }
5942:
5943: 7:
5944: Lback_words:
5945: /*
5946: * Do word copies (backwards), then mop up trailing halfword
5947: * and byte if any.
5948: */
5949: ! dec 4, %o2 ! len -= 4, done already
5950: 0: ! do {
5951: dec 4, %o0 ! src -= 4;
5952: dec 4, %o1 ! src -= 4;
5953: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
5954: deccc 4, %o2 ! } while ((len -= 4) >= 0);
5955: bge 0b
5956: st %o4, [%o1]
5957:
5958: /*
5959: * Check for trailing shortword.
5960: */
5961: btst 2, %o2 ! if (len & 2) {
5962: bz,a 1f
5963: btst 1, %o2 ! (len&1, done early)
5964: dec 2, %o0 ! src -= 2, dst -= 2;
5965: ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
5966: dec 2, %o1
5967: sth %o4, [%o1] ! }
5968: btst 1, %o2
5969:
5970: /*
5971: * Check for trailing byte.
5972: */
5973: 1:
5974: Lback_mopb:
5975: ! btst 1, %o2 ! (done already)
5976: bnz,a 1f ! if (len & 1) {
5977: ldsb [%o0 - 1], %o4 ! b = src[-1];
5978: retl
5979: nop
5980: 1:
5981: retl ! dst[-1] = b;
5982: stb %o4, [%o1 - 1] ! }
5983:
1.79 mrg 5984: /*
5985: * kcopy() is exactly like bcopy except that it set pcb_onfault such that
5986: * when a fault occurs, it is able to return -1 to indicate this to the
5987: * caller.
5988: */
5989: ENTRY(kcopy)
1.111 pk 5990: sethi %hi(cpcb), %o5 ! cpcb->pcb_onfault = Lkcerr;
5991: ld [%o5 + %lo(cpcb)], %o5
1.79 mrg 5992: set Lkcerr, %o3
1.107 mycroft 5993: ld [%o5 + PCB_ONFAULT], %g1! save current onfault handler
1.79 mrg 5994: st %o3, [%o5 + PCB_ONFAULT]
5995:
5996: cmp %o2, BCOPY_SMALL
5997: Lkcopy_start:
5998: bge,a Lkcopy_fancy ! if >= this many, go be fancy.
1.106 pk 5999: btst 7, %o0 ! (part of being fancy)
1.79 mrg 6000:
6001: /*
6002: * Not much to copy, just do it a byte at a time.
6003: */
6004: deccc %o2 ! while (--len >= 0)
1.108 mycroft 6005: bl 1f
6006: EMPTY
1.79 mrg 6007: 0:
1.107 mycroft 6008: ldsb [%o0], %o4 ! *dst++ = *src++;
1.79 mrg 6009: inc %o0
6010: stb %o4, [%o1]
6011: deccc %o2
6012: bge 0b
1.106 pk 6013: inc %o1
1.79 mrg 6014: 1:
1.106 pk 6015: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.79 mrg 6016: retl
1.106 pk 6017: mov 0, %o0 ! delay slot: return success
1.79 mrg 6018: /* NOTREACHED */
6019:
6020: /*
6021: * Plenty of data to copy, so try to do it optimally.
6022: */
6023: Lkcopy_fancy:
6024: ! check for common case first: everything lines up.
6025: ! btst 7, %o0 ! done already
6026: bne 1f
1.108 mycroft 6027: EMPTY
1.79 mrg 6028: btst 7, %o1
6029: be,a Lkcopy_doubles
1.106 pk 6030: dec 8, %o2 ! if all lined up, len -= 8, goto bcopy_doubes
1.79 mrg 6031:
6032: ! If the low bits match, we can make these line up.
6033: 1:
6034: xor %o0, %o1, %o3 ! t = src ^ dst;
6035: btst 1, %o3 ! if (t & 1) {
6036: be,a 1f
1.106 pk 6037: btst 1, %o0 ! [delay slot: if (src & 1)]
1.79 mrg 6038:
6039: ! low bits do not match, must copy by bytes.
6040: 0:
6041: ldsb [%o0], %o4 ! do {
1.107 mycroft 6042: inc %o0 ! *dst++ = *src++;
6043: stb %o4, [%o1]
1.79 mrg 6044: deccc %o2
6045: bnz 0b ! } while (--len != 0);
1.107 mycroft 6046: inc %o1
1.106 pk 6047: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.79 mrg 6048: retl
1.106 pk 6049: mov 0, %o0 ! delay slot: return success
1.79 mrg 6050: /* NOTREACHED */
6051:
6052: ! lowest bit matches, so we can copy by words, if nothing else
6053: 1:
6054: be,a 1f ! if (src & 1) {
1.106 pk 6055: btst 2, %o3 ! [delay slot: if (t & 2)]
1.79 mrg 6056:
6057: ! although low bits match, both are 1: must copy 1 byte to align
6058: ldsb [%o0], %o4 ! *dst++ = *src++;
1.107 mycroft 6059: inc %o0
1.79 mrg 6060: stb %o4, [%o1]
1.107 mycroft 6061: dec %o2 ! len--;
1.79 mrg 6062: inc %o1
6063: btst 2, %o3 ! } [if (t & 2)]
6064: 1:
6065: be,a 1f ! if (t & 2) {
1.106 pk 6066: btst 2, %o0 ! [delay slot: if (src & 2)]
1.79 mrg 6067: dec 2, %o2 ! len -= 2;
6068: 0:
6069: ldsh [%o0], %o4 ! do {
1.107 mycroft 6070: inc 2, %o0 ! dst += 2, src += 2;
1.79 mrg 6071: sth %o4, [%o1] ! *(short *)dst = *(short *)src;
6072: deccc 2, %o2 ! } while ((len -= 2) >= 0);
6073: bge 0b
1.106 pk 6074: inc 2, %o1
1.79 mrg 6075: b Lkcopy_mopb ! goto mop_up_byte;
1.106 pk 6076: btst 1, %o2 ! } [delay slot: if (len & 1)]
1.79 mrg 6077: /* NOTREACHED */
6078:
6079: ! low two bits match, so we can copy by longwords
6080: 1:
6081: be,a 1f ! if (src & 2) {
1.106 pk 6082: btst 4, %o3 ! [delay slot: if (t & 4)]
1.79 mrg 6083:
6084: ! although low 2 bits match, they are 10: must copy one short to align
6085: ldsh [%o0], %o4 ! (*short *)dst = *(short *)src;
1.107 mycroft 6086: inc 2, %o0 ! dst += 2;
1.79 mrg 6087: sth %o4, [%o1]
1.107 mycroft 6088: dec 2, %o2 ! len -= 2;
1.79 mrg 6089: inc 2, %o1 ! src += 2;
6090: btst 4, %o3 ! } [if (t & 4)]
6091: 1:
6092: be,a 1f ! if (t & 4) {
1.106 pk 6093: btst 4, %o0 ! [delay slot: if (src & 4)]
1.79 mrg 6094: dec 4, %o2 ! len -= 4;
6095: 0:
6096: ld [%o0], %o4 ! do {
1.107 mycroft 6097: inc 4, %o0 ! dst += 4, src += 4;
1.79 mrg 6098: st %o4, [%o1] ! *(int *)dst = *(int *)src;
6099: deccc 4, %o2 ! } while ((len -= 4) >= 0);
6100: bge 0b
1.106 pk 6101: inc 4, %o1
1.79 mrg 6102: b Lkcopy_mopw ! goto mop_up_word_and_byte;
1.106 pk 6103: btst 2, %o2 ! } [delay slot: if (len & 2)]
1.79 mrg 6104: /* NOTREACHED */
6105:
6106: ! low three bits match, so we can copy by doublewords
6107: 1:
6108: be 1f ! if (src & 4) {
1.106 pk 6109: dec 8, %o2 ! [delay slot: len -= 8]
1.79 mrg 6110: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
1.107 mycroft 6111: inc 4, %o0 ! dst += 4, src += 4, len -= 4;
1.79 mrg 6112: st %o4, [%o1]
1.107 mycroft 6113: dec 4, %o2 ! }
1.79 mrg 6114: inc 4, %o1
6115: 1:
6116: Lkcopy_doubles:
6117: ! swap %o4 with %o2 during doubles copy, since %o5 is verboten
6118: mov %o2, %o4
6119: Lkcopy_doubles2:
6120: ldd [%o0], %o2 ! do {
1.107 mycroft 6121: inc 8, %o0 ! dst += 8, src += 8;
1.79 mrg 6122: std %o2, [%o1] ! *(double *)dst = *(double *)src;
6123: deccc 8, %o4 ! } while ((len -= 8) >= 0);
6124: bge Lkcopy_doubles2
6125: inc 8, %o1
6126: mov %o4, %o2 ! restore len
6127:
6128: ! check for a usual case again (save work)
6129: btst 7, %o2 ! if ((len & 7) == 0)
6130: be Lkcopy_done ! goto bcopy_done;
6131:
1.106 pk 6132: btst 4, %o2 ! if ((len & 4)) == 0)
1.79 mrg 6133: be,a Lkcopy_mopw ! goto mop_up_word_and_byte;
1.106 pk 6134: btst 2, %o2 ! [delay slot: if (len & 2)]
1.79 mrg 6135: ld [%o0], %o4 ! *(int *)dst = *(int *)src;
1.107 mycroft 6136: inc 4, %o0 ! dst += 4;
1.79 mrg 6137: st %o4, [%o1]
6138: inc 4, %o1 ! src += 4;
6139: btst 2, %o2 ! } [if (len & 2)]
6140:
6141: 1:
6142: ! mop up trailing word (if present) and byte (if present).
6143: Lkcopy_mopw:
6144: be Lkcopy_mopb ! no word, go mop up byte
1.106 pk 6145: btst 1, %o2 ! [delay slot: if (len & 1)]
1.79 mrg 6146: ldsh [%o0], %o4 ! *(short *)dst = *(short *)src;
6147: be Lkcopy_done ! if ((len & 1) == 0) goto done;
1.106 pk 6148: sth %o4, [%o1]
1.79 mrg 6149: ldsb [%o0 + 2], %o4 ! dst[2] = src[2];
6150: stb %o4, [%o1 + 2]
1.106 pk 6151: st %g1, [%o5 + PCB_ONFAULT]! restore onfault
1.79 mrg 6152: retl
1.106 pk 6153: mov 0, %o0 ! delay slot: return success
1.79 mrg 6154: /* NOTREACHED */
6155:
6156: ! mop up trailing byte (if present).
6157: Lkcopy_mopb:
6158: bne,a 1f
1.106 pk 6159: ldsb [%o0], %o4
1.79 mrg 6160:
6161: Lkcopy_done:
1.106 pk 6162: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.79 mrg 6163: retl
1.106 pk 6164: mov 0, %o0 ! delay slot: return success
1.108 mycroft 6165: /* NOTREACHED */
1.79 mrg 6166:
6167: 1:
1.107 mycroft 6168: stb %o4, [%o1]
6169: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.79 mrg 6170: retl
1.107 mycroft 6171: mov 0, %o0 ! delay slot: return success
1.108 mycroft 6172: /* NOTREACHED */
1.107 mycroft 6173:
1.79 mrg 6174: Lkcerr:
1.107 mycroft 6175: retl
1.138 chs 6176: st %g1, [%o5 + PCB_ONFAULT] ! restore onfault
1.108 mycroft 6177: /* NOTREACHED */
1.1 deraadt 6178:
6179: /*
6180: * savefpstate(f) struct fpstate *f;
6181: *
6182: * Store the current FPU state. The first `st %fsr' may cause a trap;
6183: * our trap handler knows how to recover (by `returning' to savefpcont).
6184: */
6185: ENTRY(savefpstate)
6186: rd %psr, %o1 ! enable FP before we begin
6187: set PSR_EF, %o2
6188: or %o1, %o2, %o1
6189: wr %o1, 0, %psr
6190: /* do some setup work while we wait for PSR_EF to turn on */
6191: set FSR_QNE, %o5 ! QNE = 0x2000, too big for immediate
6192: clr %o3 ! qsize = 0;
6193: nop ! (still waiting for PSR_EF)
6194: special_fp_store:
6195: st %fsr, [%o0 + FS_FSR] ! f->fs_fsr = getfsr();
6196: /*
6197: * Even if the preceding instruction did not trap, the queue
6198: * is not necessarily empty: this state save might be happening
6199: * because user code tried to store %fsr and took the FPU
6200: * from `exception pending' mode to `exception' mode.
6201: * So we still have to check the blasted QNE bit.
6202: * With any luck it will usually not be set.
6203: */
6204: ld [%o0 + FS_FSR], %o4 ! if (f->fs_fsr & QNE)
6205: btst %o5, %o4
6206: bnz Lfp_storeq ! goto storeq;
6207: std %f0, [%o0 + FS_REGS + (4*0)] ! f->fs_f0 = etc;
6208: Lfp_finish:
6209: st %o3, [%o0 + FS_QSIZE] ! f->fs_qsize = qsize;
6210: std %f2, [%o0 + FS_REGS + (4*2)]
6211: std %f4, [%o0 + FS_REGS + (4*4)]
6212: std %f6, [%o0 + FS_REGS + (4*6)]
6213: std %f8, [%o0 + FS_REGS + (4*8)]
6214: std %f10, [%o0 + FS_REGS + (4*10)]
6215: std %f12, [%o0 + FS_REGS + (4*12)]
6216: std %f14, [%o0 + FS_REGS + (4*14)]
6217: std %f16, [%o0 + FS_REGS + (4*16)]
6218: std %f18, [%o0 + FS_REGS + (4*18)]
6219: std %f20, [%o0 + FS_REGS + (4*20)]
6220: std %f22, [%o0 + FS_REGS + (4*22)]
6221: std %f24, [%o0 + FS_REGS + (4*24)]
6222: std %f26, [%o0 + FS_REGS + (4*26)]
6223: std %f28, [%o0 + FS_REGS + (4*28)]
6224: retl
6225: std %f30, [%o0 + FS_REGS + (4*30)]
6226:
6227: /*
6228: * Store the (now known nonempty) FP queue.
6229: * We have to reread the fsr each time in order to get the new QNE bit.
6230: */
6231: Lfp_storeq:
6232: add %o0, FS_QUEUE, %o1 ! q = &f->fs_queue[0];
6233: 1:
6234: std %fq, [%o1 + %o3] ! q[qsize++] = fsr_qfront();
6235: st %fsr, [%o0 + FS_FSR] ! reread fsr
6236: ld [%o0 + FS_FSR], %o4 ! if fsr & QNE, loop
6237: btst %o5, %o4
6238: bnz 1b
6239: inc 8, %o3
6240: b Lfp_finish ! set qsize and finish storing fregs
6241: srl %o3, 3, %o3 ! (but first fix qsize)
6242:
6243: /*
6244: * The fsr store trapped. Do it again; this time it will not trap.
6245: * We could just have the trap handler return to the `st %fsr', but
6246: * if for some reason it *does* trap, that would lock us into a tight
6247: * loop. This way we panic instead. Whoopee.
6248: */
6249: savefpcont:
6250: b special_fp_store + 4 ! continue
6251: st %fsr, [%o0 + FS_FSR] ! but first finish the %fsr store
6252:
6253: /*
6254: * Load FPU state.
6255: */
6256: ENTRY(loadfpstate)
6257: rd %psr, %o1 ! enable FP before we begin
6258: set PSR_EF, %o2
6259: or %o1, %o2, %o1
6260: wr %o1, 0, %psr
6261: nop; nop; nop ! paranoia
6262: ldd [%o0 + FS_REGS + (4*0)], %f0
6263: ldd [%o0 + FS_REGS + (4*2)], %f2
6264: ldd [%o0 + FS_REGS + (4*4)], %f4
6265: ldd [%o0 + FS_REGS + (4*6)], %f6
6266: ldd [%o0 + FS_REGS + (4*8)], %f8
6267: ldd [%o0 + FS_REGS + (4*10)], %f10
6268: ldd [%o0 + FS_REGS + (4*12)], %f12
6269: ldd [%o0 + FS_REGS + (4*14)], %f14
6270: ldd [%o0 + FS_REGS + (4*16)], %f16
6271: ldd [%o0 + FS_REGS + (4*18)], %f18
6272: ldd [%o0 + FS_REGS + (4*20)], %f20
6273: ldd [%o0 + FS_REGS + (4*22)], %f22
6274: ldd [%o0 + FS_REGS + (4*24)], %f24
6275: ldd [%o0 + FS_REGS + (4*26)], %f26
6276: ldd [%o0 + FS_REGS + (4*28)], %f28
6277: ldd [%o0 + FS_REGS + (4*30)], %f30
6278: retl
6279: ld [%o0 + FS_FSR], %fsr ! setfsr(f->fs_fsr);
6280:
6281: /*
6282: * ienab_bis(bis) int bis;
6283: * ienab_bic(bic) int bic;
6284: *
1.167 pk 6285: * Set and clear bits in the sun4/sun4c interrupt register.
1.52 pk 6286: */
6287:
6288: #if defined(SUN4) || defined(SUN4C)
6289: /*
1.1 deraadt 6290: * Since there are no read-modify-write instructions for this,
6291: * and one of the interrupts is nonmaskable, we must disable traps.
6292: */
6293: ENTRY(ienab_bis)
6294: ! %o0 = bits to set
6295: rd %psr, %o2
6296: wr %o2, PSR_ET, %psr ! disable traps
6297: nop; nop ! 3-instr delay until ET turns off
1.62 pk 6298: sethi %hi(INTRREG_VA), %o3
6299: ldub [%o3 + %lo(INTRREG_VA)], %o4
6300: or %o4, %o0, %o4 ! *INTRREG_VA |= bis;
6301: stb %o4, [%o3 + %lo(INTRREG_VA)]
1.1 deraadt 6302: wr %o2, 0, %psr ! reenable traps
6303: nop
6304: retl
6305: nop
6306:
6307: ENTRY(ienab_bic)
6308: ! %o0 = bits to clear
6309: rd %psr, %o2
6310: wr %o2, PSR_ET, %psr ! disable traps
6311: nop; nop
1.62 pk 6312: sethi %hi(INTRREG_VA), %o3
6313: ldub [%o3 + %lo(INTRREG_VA)], %o4
6314: andn %o4, %o0, %o4 ! *INTRREG_VA &=~ bic;
6315: stb %o4, [%o3 + %lo(INTRREG_VA)]
1.1 deraadt 6316: wr %o2, 0, %psr ! reenable traps
6317: nop
6318: retl
6319: nop
1.167 pk 6320: #endif /* SUN4 || SUN4C */
1.52 pk 6321:
6322: #if defined(SUN4M)
6323: /*
6324: * raise(cpu, level)
6325: */
6326: ENTRY(raise)
1.149 uwe 6327: #if !defined(MSIIEP) /* normal suns */
1.52 pk 6328: ! *(ICR_PI_SET + cpu*_MAXNBPG) = PINTR_SINTRLEV(level)
6329: sethi %hi(1 << 16), %o2
6330: sll %o2, %o1, %o2
6331: set ICR_PI_SET, %o1
6332: set _MAXNBPG, %o3
6333: 1:
6334: subcc %o0, 1, %o0
6335: bpos,a 1b
6336: add %o1, %o3, %o1
6337: retl
6338: st %o2, [%o1]
1.197 wiz 6339: #else /* MSIIEP - ignore %o0, only one CPU ever */
1.149 uwe 6340: mov 1, %o2
6341: sethi %hi(MSIIEP_PCIC_VA), %o0
6342: sll %o2, %o1, %o2
6343: retl
6344: sth %o2, [%o0 + PCIC_SOFT_INTR_SET_REG]
6345: #endif
1.62 pk 6346:
6347: /*
1.94 pk 6348: * Read Synchronous Fault Status registers.
6349: * On entry: %l1 == PC, %l3 == fault type, %l4 == storage, %l7 == return address
6350: * Only use %l5 and %l6.
6351: * Note: not C callable.
6352: */
1.111 pk 6353: _ENTRY(_C_LABEL(srmmu_get_syncflt))
6354: _ENTRY(_C_LABEL(hypersparc_get_syncflt))
1.94 pk 6355: set SRMMU_SFAR, %l5
6356: lda [%l5] ASI_SRMMU, %l5 ! sync virt addr; must be read first
6357: st %l5, [%l4 + 4] ! => dump.sfva
6358: set SRMMU_SFSR, %l5
6359: lda [%l5] ASI_SRMMU, %l5 ! get sync fault status register
6360: jmp %l7 + 8 ! return to caller
6361: st %l5, [%l4] ! => dump.sfsr
6362:
1.111 pk 6363: _ENTRY(_C_LABEL(viking_get_syncflt))
6364: _ENTRY(_C_LABEL(ms1_get_syncflt))
6365: _ENTRY(_C_LABEL(swift_get_syncflt))
6366: _ENTRY(_C_LABEL(turbosparc_get_syncflt))
6367: _ENTRY(_C_LABEL(cypress_get_syncflt))
1.62 pk 6368: cmp %l3, T_TEXTFAULT
6369: be,a 1f
1.94 pk 6370: mov %l1, %l5 ! use PC if type == T_TEXTFAULT
1.62 pk 6371:
1.94 pk 6372: set SRMMU_SFAR, %l5
6373: lda [%l5] ASI_SRMMU, %l5 ! sync virt addr; must be read first
1.62 pk 6374: 1:
1.94 pk 6375: st %l5, [%l4 + 4] ! => dump.sfva
1.62 pk 6376:
1.94 pk 6377: set SRMMU_SFSR, %l5
6378: lda [%l5] ASI_SRMMU, %l5 ! get sync fault status register
6379: jmp %l7 + 8 ! return to caller
6380: st %l5, [%l4] ! => dump.sfsr
1.62 pk 6381:
1.162 uwe 6382: #if defined(MULTIPROCESSOR) && 0 /* notyet */
1.142 mrg 6383: /*
6384: * Read Synchronous Fault Status registers.
6385: * On entry: %o0 == &sfsr, %o1 == &sfar
6386: */
6387: _ENTRY(_C_LABEL(smp_get_syncflt))
6388: save %sp, -CCFSZ, %sp
6389:
6390: sethi %hi(CPUINFO_VA), %o4
6391: ld [%l4 + %lo(CPUINFO_VA+CPUINFO_GETSYNCFLT)], %o5
6392: clr %l1
6393: clr %l3
6394: jmpl %o5, %l7
6395: or %o4, %lo(CPUINFO_SYNCFLTDUMP), %l4
6396:
6397: ! load values out of the dump
6398: ld [%o4 + %lo(CPUINFO_VA+CPUINFO_SYNCFLTDUMP)], %o5
6399: st %o5, [%i0]
6400: ld [%o4 + %lo(CPUINFO_VA+CPUINFO_SYNCFLTDUMP+4)], %o5
6401: st %o5, [%i1]
6402: ret
6403: restore
6404: #endif /* MULTIPROCESSOR */
1.62 pk 6405:
1.94 pk 6406: /*
6407: * Read Asynchronous Fault Status registers.
6408: * On entry: %o0 == &afsr, %o1 == &afar
6409: * Return 0 if async register are present.
6410: */
1.111 pk 6411: _ENTRY(_C_LABEL(srmmu_get_asyncflt))
1.94 pk 6412: set SRMMU_AFAR, %o4
6413: lda [%o4] ASI_SRMMU, %o4 ! get async fault address
6414: set SRMMU_AFSR, %o3 !
6415: st %o4, [%o1]
6416: lda [%o3] ASI_SRMMU, %o3 ! get async fault status
6417: st %o3, [%o0]
6418: retl
6419: clr %o0 ! return value
1.62 pk 6420:
1.111 pk 6421: _ENTRY(_C_LABEL(cypress_get_asyncflt))
6422: _ENTRY(_C_LABEL(hypersparc_get_asyncflt))
1.94 pk 6423: set SRMMU_AFSR, %o3 ! must read status before fault on HS
6424: lda [%o3] ASI_SRMMU, %o3 ! get async fault status
6425: st %o3, [%o0]
6426: btst AFSR_AFO, %o3 ! and only read fault address
6427: bz 1f ! if valid.
6428: set SRMMU_AFAR, %o4
6429: lda [%o4] ASI_SRMMU, %o4 ! get async fault address
6430: clr %o0 ! return value
1.62 pk 6431: retl
1.94 pk 6432: st %o4, [%o1]
1.62 pk 6433: 1:
6434: retl
1.94 pk 6435: clr %o0 ! return value
1.62 pk 6436:
1.111 pk 6437: _ENTRY(_C_LABEL(no_asyncflt_regs))
1.62 pk 6438: retl
1.94 pk 6439: mov 1, %o0 ! return value
1.86 pk 6440:
1.111 pk 6441: _ENTRY(_C_LABEL(hypersparc_pure_vcache_flush))
1.86 pk 6442: /*
6443: * Flush entire on-chip instruction cache, which is
6444: * a pure vitually-indexed/virtually-tagged cache.
6445: */
6446: retl
6447: sta %g0, [%g0] ASI_HICACHECLR
1.62 pk 6448:
1.52 pk 6449: #endif /* SUN4M */
1.1 deraadt 6450:
1.149 uwe 6451: #if !defined(MSIIEP) /* normal suns */
1.1 deraadt 6452: /*
1.29 deraadt 6453: * void lo_microtime(struct timeval *tv)
1.1 deraadt 6454: *
6455: * LBL's sparc bsd 'microtime': We don't need to spl (so this routine
6456: * can be a leaf routine) and we don't keep a 'last' timeval (there
6457: * can't be two calls to this routine in a microsecond). This seems to
6458: * be about 20 times faster than the Sun code on an SS-2. - vj
6459: *
6460: * Read time values from slowest-changing to fastest-changing,
6461: * then re-read out to slowest. If the values read before
6462: * the innermost match those read after, the innermost value
6463: * is consistent with the outer values. If not, it may not
6464: * be and we must retry. Typically this loop runs only once;
6465: * occasionally it runs twice, and only rarely does it run longer.
6466: */
1.30 deraadt 6467: #if defined(SUN4)
1.29 deraadt 6468: ENTRY(lo_microtime)
1.30 deraadt 6469: #else
6470: ENTRY(microtime)
6471: #endif
1.111 pk 6472: sethi %hi(_C_LABEL(time)), %g2
1.68 mycroft 6473:
6474: #if defined(SUN4M) && !(defined(SUN4C) || defined(SUN4))
6475: sethi %hi(TIMERREG_VA+4), %g3
6476: or %g3, %lo(TIMERREG_VA+4), %g3
6477: #elif (defined(SUN4C) || defined(SUN4)) && !defined(SUN4M)
6478: sethi %hi(TIMERREG_VA), %g3
6479: or %g3, %lo(TIMERREG_VA), %g3
6480: #else
1.1 deraadt 6481: sethi %hi(TIMERREG_VA), %g3
1.62 pk 6482: or %g3, %lo(TIMERREG_VA), %g3
1.68 mycroft 6483: NOP_ON_4_4C_1:
1.62 pk 6484: add %g3, 4, %g3
1.68 mycroft 6485: #endif
1.62 pk 6486:
1.69 mycroft 6487: 2:
1.111 pk 6488: ldd [%g2+%lo(_C_LABEL(time))], %o2 ! time.tv_sec & time.tv_usec
1.62 pk 6489: ld [%g3], %o4 ! usec counter
1.111 pk 6490: ldd [%g2+%lo(_C_LABEL(time))], %g4 ! see if time values changed
1.1 deraadt 6491: cmp %g4, %o2
1.52 pk 6492: bne 2b ! if time.tv_sec changed
1.1 deraadt 6493: cmp %g5, %o3
1.52 pk 6494: bne 2b ! if time.tv_usec changed
1.1 deraadt 6495: tst %o4
6496:
1.52 pk 6497: bpos 3f ! reached limit?
1.1 deraadt 6498: srl %o4, TMR_SHIFT, %o4 ! convert counter to usec
1.111 pk 6499: sethi %hi(_C_LABEL(tick)), %g4 ! bump usec by 1 tick
6500: ld [%g4+%lo(_C_LABEL(tick))], %o1
1.1 deraadt 6501: set TMR_MASK, %g5
6502: add %o1, %o3, %o3
6503: and %o4, %g5, %o4
1.52 pk 6504: 3:
1.1 deraadt 6505: add %o4, %o3, %o3
6506: set 1000000, %g5 ! normalize usec value
6507: cmp %o3, %g5
1.52 pk 6508: bl,a 4f
1.155 uwe 6509: st %o2, [%o0]
1.1 deraadt 6510: add %o2, 1, %o2 ! overflow
6511: sub %o3, %g5, %o3
1.155 uwe 6512: st %o2, [%o0]
1.52 pk 6513: 4:
1.1 deraadt 6514: retl
6515: st %o3, [%o0+4]
1.149 uwe 6516:
6517: #else /* MSIIEP */
6518: /* XXX: uwe: can be merged with 4c/4m version above */
6519: /*
6520: * ms-IIep version of
6521: * void microtime(struct timeval *tv)
6522: *
6523: * This is similar to 4c/4m microtime. The difference is that
1.197 wiz 6524: * counter uses 31 bits and ticks every 4 CPU cycles (CPU is @100MHz)
1.149 uwe 6525: * the magic to divide by 25 is stolen from gcc
6526: */
6527: ENTRY(microtime)
6528: sethi %hi(_C_LABEL(time)), %g2
6529:
6530: sethi %hi(MSIIEP_PCIC_VA), %g3
6531: or %g3, PCIC_SCCR_REG, %g3
6532:
6533: 2:
6534: ldd [%g2+%lo(_C_LABEL(time))], %o2 ! time.tv_sec & time.tv_usec
6535: ld [%g3], %o4 ! system (timer) counter
6536: ldd [%g2+%lo(_C_LABEL(time))], %g4 ! see if time values changed
6537: cmp %g4, %o2
6538: bne 2b ! if time.tv_sec changed
6539: cmp %g5, %o3
6540: bne 2b ! if time.tv_usec changed
6541: tst %o4
6542: !! %o2 - time.tv_sec; %o3 - time.tv_usec; %o4 - timer counter
6543:
6544: !!! BEGIN ms-IIep specific code
6545: bpos 3f ! if limit not reached yet
6546: clr %g4 ! then use timer as is
6547:
1.155 uwe 6548: set 0x80000000, %g5
1.149 uwe 6549: sethi %hi(_C_LABEL(tick)), %g4
1.155 uwe 6550: bclr %g5, %o4 ! cleat limit reached flag
1.149 uwe 6551: ld [%g4+%lo(_C_LABEL(tick))], %g4
6552:
6553: !! %g4 - either 0 or tick (if timer has hit the limit)
6554: 3:
6555: inc -1, %o4 ! timer is 1-based, adjust
1.155 uwe 6556: !! divide by 25 magic stolen from a gcc output
6557: set 1374389535, %g5
1.149 uwe 6558: umul %o4, %g5, %g0
6559: rd %y, %o4
6560: srl %o4, 3, %o4
6561: add %o4, %g4, %o4 ! may be bump usec by tick
6562: !!! END ms-IIep specific code
6563:
6564: add %o3, %o4, %o3 ! add timer to time.tv_usec
6565: set 1000000, %g5 ! normalize usec value
6566: cmp %o3, %g5
1.156 uwe 6567: bl,a 4f
6568: st %o2, [%o0]
1.149 uwe 6569: inc %o2 ! overflow into tv_sec
6570: sub %o3, %g5, %o3
1.155 uwe 6571: st %o2, [%o0]
1.156 uwe 6572: 4: retl
1.155 uwe 6573: st %o3, [%o0 + 4]
1.149 uwe 6574: #endif /* MSIIEP */
1.1 deraadt 6575:
1.54 pk 6576: /*
6577: * delay function
6578: *
6579: * void delay(N) -- delay N microseconds
6580: *
6581: * Register usage: %o0 = "N" number of usecs to go (counts down to zero)
6582: * %o1 = "timerblurb" (stays constant)
6583: * %o2 = counter for 1 usec (counts down from %o1 to zero)
6584: *
6585: */
6586:
6587: ENTRY(delay) ! %o0 = n
1.57 pk 6588: subcc %o0, %g0, %g0
6589: be 2f
6590:
1.111 pk 6591: sethi %hi(_C_LABEL(timerblurb)), %o1
6592: ld [%o1 + %lo(_C_LABEL(timerblurb))], %o1 ! %o1 = timerblurb
1.53 pk 6593:
1.57 pk 6594: addcc %o1, %g0, %o2 ! %o2 = cntr (start @ %o1), clear CCs
1.54 pk 6595: ! first time through only
6596:
6597: ! delay 1 usec
6598: 1: bne 1b ! come back here if not done
6599: subcc %o2, 1, %o2 ! %o2 = %o2 - 1 [delay slot]
1.53 pk 6600:
1.54 pk 6601: subcc %o0, 1, %o0 ! %o0 = %o0 - 1
6602: bne 1b ! done yet?
6603: addcc %o1, %g0, %o2 ! reinit %o2 and CCs [delay slot]
6604: ! harmless if not branching
1.57 pk 6605: 2:
1.54 pk 6606: retl ! return
6607: nop ! [delay slot]
1.53 pk 6608:
1.207 pk 6609:
1.203 pk 6610: /*
6611: * void __cpu_simple_lock(__cpu_simple_lock_t *alp)
6612: */
6613: _ENTRY(_C_LABEL(__cpu_simple_lock))
6614: 0:
6615: ldstub [%o0], %o1
6616: tst %o1
1.214 pk 6617: bnz,a 2f
6618: ldub [%o0], %o1
6619: 1:
1.203 pk 6620: retl
6621: EMPTY
1.214 pk 6622: 2:
6623: set 0x1000000, %o2 ! set spinout counter
6624: 3:
1.203 pk 6625: tst %o1
6626: bz 0b ! lock has been released; try again
6627: deccc %o2
1.214 pk 6628: bcc,a 3b ! repeat until counter < 0
6629: ldub [%o0], %o1
6630:
6631: ! spun out; check if already panicking
6632: sethi %hi(_C_LABEL(panicstr)), %o2
6633: ld [%o2 + %lo(_C_LABEL(panicstr))], %o1
6634: tst %o1
6635: ! if so, just take the lock and return on the assumption that
6636: ! in panic mode we're running on a single CPU anyway.
6637: bnz,a 1b
6638: ldstub [%o0], %g0
1.203 pk 6639:
1.214 pk 6640: ! set up stack frame and call panic
1.203 pk 6641: save %sp, -CCFSZ, %sp
6642: sethi %hi(CPUINFO_VA + CPUINFO_CPUNO), %o0
6643: ld [%o0 + %lo(CPUINFO_VA + CPUINFO_CPUNO)], %o1
6644: mov %i0, %o2
6645: sethi %hi(Lpanic_spunout), %o0
6646: call _C_LABEL(panic)
6647: or %o0, %lo(Lpanic_spunout), %o0
6648:
6649: Lpanic_spunout:
6650: .asciz "cpu%d: stuck on lock@%x"
6651: _ALIGN
6652:
1.60 pk 6653: #if defined(KGDB) || defined(DDB) || defined(DIAGNOSTIC)
1.1 deraadt 6654: /*
6655: * Write all windows (user or otherwise), except the current one.
6656: *
6657: * THIS COULD BE DONE IN USER CODE
6658: */
6659: ENTRY(write_all_windows)
6660: /*
6661: * g2 = g1 = nwindows - 1;
6662: * while (--g1 > 0) save();
6663: * while (--g2 > 0) restore();
6664: */
1.111 pk 6665: sethi %hi(_C_LABEL(nwindows)), %g1
6666: ld [%g1 + %lo(_C_LABEL(nwindows))], %g1
1.1 deraadt 6667: dec %g1
6668: mov %g1, %g2
6669:
6670: 1: deccc %g1
6671: bg,a 1b
6672: save %sp, -64, %sp
6673:
6674: 2: deccc %g2
6675: bg,a 2b
6676: restore
6677:
6678: retl
6679: nop
6680: #endif /* KGDB */
6681:
1.8 pk 6682: ENTRY(setjmp)
6683: std %sp, [%o0+0] ! stack pointer & return pc
6684: st %fp, [%o0+8] ! frame pointer
6685: retl
6686: clr %o0
6687:
6688: Lpanic_ljmp:
6689: .asciz "longjmp botch"
1.52 pk 6690: _ALIGN
1.8 pk 6691:
6692: ENTRY(longjmp)
6693: addcc %o1, %g0, %g6 ! compute v ? v : 1 in a global register
6694: be,a 0f
6695: mov 1, %g6
6696: 0:
6697: mov %o0, %g1 ! save a in another global register
6698: ld [%g1+8], %g7 /* get caller's frame */
6699: 1:
6700: cmp %fp, %g7 ! compare against desired frame
6701: bl,a 1b ! if below,
6702: restore ! pop frame and loop
6703: be,a 2f ! if there,
6704: ldd [%g1+0], %o2 ! fetch return %sp and pc, and get out
6705:
6706: Llongjmpbotch:
6707: ! otherwise, went too far; bomb out
6708: save %sp, -CCFSZ, %sp /* preserve current window */
6709: sethi %hi(Lpanic_ljmp), %o0
1.111 pk 6710: call _C_LABEL(panic)
1.8 pk 6711: or %o0, %lo(Lpanic_ljmp), %o0;
6712: unimp 0
6713:
6714: 2:
6715: cmp %o2, %sp ! %sp must not decrease
6716: bge,a 3f
6717: mov %o2, %sp ! it is OK, put it in place
6718: b,a Llongjmpbotch
1.52 pk 6719: 3:
1.8 pk 6720: jmp %o3 + 8 ! success, return %g6
6721: mov %g6, %o0
6722:
1.1 deraadt 6723: .data
1.153 pk 6724: .globl _C_LABEL(kernel_top)
6725: _C_LABEL(kernel_top):
1.117 christos 6726: .word 0
6727: .globl _C_LABEL(bootinfo)
6728: _C_LABEL(bootinfo):
1.8 pk 6729: .word 0
1.1 deraadt 6730:
1.111 pk 6731: .globl _C_LABEL(proc0paddr)
6732: _C_LABEL(proc0paddr):
6733: .word _C_LABEL(u0) ! KVA of proc0 uarea
1.1 deraadt 6734:
6735: /* interrupt counters XXX THESE BELONG ELSEWHERE (if anywhere) */
1.111 pk 6736: .globl _C_LABEL(intrcnt), _C_LABEL(eintrcnt)
6737: .globl _C_LABEL(intrnames), _C_LABEL(eintrnames)
6738: _C_LABEL(intrnames):
1.1 deraadt 6739: .asciz "spur"
6740: .asciz "lev1"
6741: .asciz "lev2"
6742: .asciz "lev3"
6743: .asciz "lev4"
6744: .asciz "lev5"
6745: .asciz "lev6"
6746: .asciz "lev7"
6747: .asciz "lev8"
6748: .asciz "lev9"
6749: .asciz "clock"
6750: .asciz "lev11"
6751: .asciz "lev12"
6752: .asciz "lev13"
6753: .asciz "prof"
1.111 pk 6754: _C_LABEL(eintrnames):
1.52 pk 6755: _ALIGN
1.111 pk 6756: _C_LABEL(intrcnt):
1.1 deraadt 6757: .skip 4*15
1.111 pk 6758: _C_LABEL(eintrcnt):
1.1 deraadt 6759:
1.111 pk 6760: .comm _C_LABEL(nwindows), 4
6761: .comm _C_LABEL(romp), 4
CVSweb <webmaster@jp.NetBSD.org>