Annotation of src/sys/arch/amd64/amd64/cpufunc.S, Revision 1.63
1.63 ! maxv 1: /* $NetBSD: cpufunc.S,v 1.62 2020/06/15 20:27:30 riastradh Exp $ */
1.1 ad 2:
1.30 maxv 3: /*
1.53 ad 4: * Copyright (c) 1998, 2007, 2008, 2020 The NetBSD Foundation, Inc.
1.1 ad 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Charles M. Hannum, and by Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
1.20 jym 32: #include <sys/errno.h>
33:
1.1 ad 34: #include <machine/asm.h>
1.17 chs 35: #include <machine/frameasm.h>
1.1 ad 36: #include <machine/specialreg.h>
37: #include <machine/segments.h>
38:
1.3 bouyer 39: #include "opt_xen.h"
1.41 maxv 40: #include "opt_svs.h"
1.3 bouyer 41:
1.1 ad 42: #include "assym.h"
43:
44: /* Small and slow, so align less. */
45: #undef _ALIGN_TEXT
46: #define _ALIGN_TEXT .align 8
47:
1.14 chs 48: ENTRY(x86_lfence)
1.1 ad 49: lfence
50: ret
1.30 maxv 51: END(x86_lfence)
1.1 ad 52:
1.14 chs 53: ENTRY(x86_sfence)
1.1 ad 54: sfence
55: ret
1.30 maxv 56: END(x86_sfence)
1.1 ad 57:
1.14 chs 58: ENTRY(x86_mfence)
1.1 ad 59: mfence
60: ret
1.30 maxv 61: END(x86_mfence)
1.1 ad 62:
1.36 cherry 63: #ifndef XENPV
1.35 cherry 64: ENTRY(invlpg)
1.41 maxv 65: #ifdef SVS
66: movb _C_LABEL(svs_pcid),%al
67: testb %al,%al
68: jz 1f
69: pushq %rdi
70: pushq $PMAP_PCID_USER
71: movq $INVPCID_ADDRESS,%rax
72: invpcid (%rsp),%rax
73: addq $16,%rsp
74: 1: /* FALLTHROUGH */
75: #endif
1.1 ad 76: invlpg (%rdi)
77: ret
1.35 cherry 78: END(invlpg)
1.1 ad 79:
1.45 maxv 80: ENTRY(lgdt)
81: /* Reload the descriptor table. */
82: movq %rdi,%rax
83: lgdt (%rax)
84: /* Flush the prefetch queue. */
85: jmp 1f
86: nop
87: 1: jmp _C_LABEL(lgdt_finish)
88: END(lgdt)
89:
1.35 cherry 90: ENTRY(lidt)
1.1 ad 91: lidt (%rdi)
92: ret
1.35 cherry 93: END(lidt)
1.1 ad 94:
1.35 cherry 95: ENTRY(lldt)
1.13 ad 96: cmpl %edi, CPUVAR(CURLDT)
97: jne 1f
98: ret
99: 1:
100: movl %edi, CPUVAR(CURLDT)
1.1 ad 101: lldt %di
102: ret
1.35 cherry 103: END(lldt)
1.1 ad 104:
1.35 cherry 105: ENTRY(ltr)
1.1 ad 106: ltr %di
107: ret
1.35 cherry 108: END(ltr)
1.1 ad 109:
1.35 cherry 110: ENTRY(tlbflushg)
1.1 ad 111: movq %cr4, %rax
1.19 rmind 112: testq $CR4_PGE, %rax
1.41 maxv 113: jz tlbflush
1.1 ad 114: movq %rax, %rdx
115: andq $~CR4_PGE, %rdx
116: movq %rdx, %cr4
117: movq %rax, %cr4
118: ret
1.35 cherry 119: END(tlbflushg)
1.1 ad 120:
1.35 cherry 121: ENTRY(tlbflush)
1.41 maxv 122: #ifdef SVS
123: movb _C_LABEL(svs_pcid),%al
124: testb %al,%al
125: jz 1f
126: xorq %rax,%rax
127: pushq %rax
128: pushq %rax
129: movq $INVPCID_ALL_NONGLOBAL,%rax
130: invpcid (%rsp),%rax
131: addq $16,%rsp
132: ret
133: #endif
134: 1: movq %cr3, %rax
1.1 ad 135: movq %rax, %cr3
136: ret
1.35 cherry 137: END(tlbflush)
1.1 ad 138:
1.45 maxv 139: ENTRY(wbinvd)
140: wbinvd
141: ret
142: END(wbinvd)
143:
144: ENTRY(setusergs)
145: CLI(ax)
146: swapgs
147: movw %di, %gs
148: swapgs
149: STI(ax)
150: ret
151: END(setusergs)
152:
1.14 chs 153: ENTRY(x86_read_flags)
1.1 ad 154: pushfq
155: popq %rax
1.47 maxv 156: KMSAN_INIT_RET(8)
1.1 ad 157: ret
1.30 maxv 158: END(x86_read_flags)
1.1 ad 159:
160: STRONG_ALIAS(x86_read_psl,x86_read_flags)
161:
1.14 chs 162: ENTRY(x86_write_flags)
1.1 ad 163: pushq %rdi
164: popfq
165: ret
1.30 maxv 166: END(x86_write_flags)
1.1 ad 167:
168: STRONG_ALIAS(x86_write_psl,x86_write_flags)
169:
1.51 bouyer 170: /*
171: * %rdi = name
172: * %rsi = sel
173: */
174: ENTRY(x86_hotpatch)
175: /* save RFLAGS, and disable intrs */
176: pushfq
177: cli
178:
179: /* save CR0, and disable WP */
180: movq %cr0,%rcx
181: pushq %rcx
182: andq $~CR0_WP,%rcx
183: movq %rcx,%cr0
184:
185: callq _C_LABEL(x86_hotpatch_apply)
186:
187: /* write back and invalidate cache */
188: wbinvd
189:
190: /* restore CR0 */
191: popq %rcx
192: movq %rcx,%cr0
193:
194: /* flush instruction pipeline */
195: pushq %rax
196: callq x86_flush
197: popq %rax
198:
199: /* clean up */
200: movq %rax,%rdi
201: callq _C_LABEL(x86_hotpatch_cleanup)
202:
203: /* restore RFLAGS */
204: popfq
205: ret
206: END(x86_hotpatch)
1.45 maxv 207: #endif /* !XENPV */
208:
1.60 ad 209: /*
1.61 msaitoh 210: * cpu_counter and cpu_counter32 could be exact same, but KMSAN needs to have
211: * the correct size of the return value.
1.60 ad 212: */
1.61 msaitoh 213: #define SERIALIZE_lfence lfence
214: #define SERIALIZE_mfence mfence
215:
216: #define ADD_counter32 addl CPUVAR(CC_SKEW), %eax
217: #define ADD_counter shlq $32, %rdx ;\
218: orq %rdx, %rax ;\
219: addq CPUVAR(CC_SKEW), %rax
220:
221: #define RSIZE_counter32 4
222: #define RSIZE_counter 8
1.62 riastrad 223:
1.61 msaitoh 224: #define CPU_COUNTER_FENCE(counter, fence) \
225: ENTRY(cpu_ ## counter ## _ ## fence) ;\
226: movq CPUVAR(CURLWP), %rcx ;\
227: 1: ;\
228: movq L_NCSW(%rcx), %rdi ;\
229: SERIALIZE_ ## fence ;\
230: rdtsc ;\
231: ADD_ ## counter ;\
232: cmpq %rdi, L_NCSW(%rcx) ;\
233: jne 2f ;\
234: KMSAN_INIT_RET(RSIZE_ ## counter) ;\
235: ret ;\
236: 2: ;\
237: jmp 1b ;\
238: END(cpu_ ## counter ## _ ## fence)
239:
240: CPU_COUNTER_FENCE(counter, lfence)
241: CPU_COUNTER_FENCE(counter, mfence)
242: CPU_COUNTER_FENCE(counter32, lfence)
243: CPU_COUNTER_FENCE(counter32, mfence)
244:
245: #define CPU_COUNTER_CPUID(counter) \
246: ENTRY(cpu_ ## counter ## _cpuid) ;\
247: movq %rbx, %r9 ;\
248: movq CPUVAR(CURLWP), %r8 ;\
249: 1: ;\
250: movq L_NCSW(%r8), %rdi ;\
251: xor %eax, %eax ;\
252: cpuid ;\
253: rdtsc ;\
254: ADD_ ## counter ;\
255: cmpq %rdi, L_NCSW(%r8) ;\
256: jne 2f ;\
257: movq %r9, %rbx ;\
258: KMSAN_INIT_RET(RSIZE_ ## counter) ;\
259: ret ;\
260: 2: ;\
261: jmp 1b ;\
262: END(cpu_ ## counter ## _cpuid)
1.55 ad 263:
1.61 msaitoh 264: CPU_COUNTER_CPUID(counter)
265: CPU_COUNTER_CPUID(counter32)
1.54 ad 266:
1.20 jym 267: ENTRY(rdmsr_safe)
268: movq CPUVAR(CURLWP), %r8
269: movq L_PCB(%r8), %r8
270: movq $_C_LABEL(msr_onfault), PCB_ONFAULT(%r8)
271:
1.45 maxv 272: movl %edi, %ecx
273: rdmsr
274: salq $32, %rdx
1.20 jym 275: movl %eax, %eax /* zero-extend %eax -> %rax */
276: orq %rdx, %rax
1.45 maxv 277: movq %rax, (%rsi)
1.20 jym 278:
1.45 maxv 279: xorq %rax, %rax
1.20 jym 280: movq %rax, PCB_ONFAULT(%r8)
1.47 maxv 281: #ifdef KMSAN
282: movq %rsi,%rdi
283: movq $8,%rsi
284: xorq %rdx,%rdx
285: callq _C_LABEL(kmsan_mark)
286: #endif
287: KMSAN_INIT_RET(4)
1.20 jym 288: ret
1.30 maxv 289: END(rdmsr_safe)
1.20 jym 290:
1.32 maxv 291: ENTRY(msr_onfault)
1.20 jym 292: movq CPUVAR(CURLWP), %r8
293: movq L_PCB(%r8), %r8
294: movq $0, PCB_ONFAULT(%r8)
295: movl $EFAULT, %eax
1.21 jym 296: ret
1.30 maxv 297: END(msr_onfault)
1.20 jym 298:
1.14 chs 299: ENTRY(breakpoint)
1.7 ad 300: pushq %rbp
301: movq %rsp, %rbp
1.1 ad 302: int $0x03 /* paranoid, not 'int3' */
1.7 ad 303: leave
1.1 ad 304: ret
1.30 maxv 305: END(breakpoint)
1.1 ad 306:
1.14 chs 307: ENTRY(x86_curcpu)
1.1 ad 308: movq %gs:(CPU_INFO_SELF), %rax
1.47 maxv 309: KMSAN_INIT_RET(8)
1.1 ad 310: ret
1.30 maxv 311: END(x86_curcpu)
1.1 ad 312:
1.14 chs 313: ENTRY(x86_curlwp)
1.1 ad 314: movq %gs:(CPU_INFO_CURLWP), %rax
1.47 maxv 315: KMSAN_INIT_RET(8)
1.1 ad 316: ret
1.30 maxv 317: END(x86_curlwp)
1.1 ad 318:
1.14 chs 319: ENTRY(__byte_swap_u32_variable)
1.1 ad 320: movl %edi, %eax
321: bswapl %eax
1.47 maxv 322: KMSAN_INIT_RET(4)
1.1 ad 323: ret
1.30 maxv 324: END(__byte_swap_u32_variable)
1.1 ad 325:
1.14 chs 326: ENTRY(__byte_swap_u16_variable)
1.1 ad 327: movl %edi, %eax
328: xchgb %al, %ah
1.47 maxv 329: KMSAN_INIT_RET(2)
1.1 ad 330: ret
1.30 maxv 331: END(__byte_swap_u16_variable)
1.1 ad 332:
333: /*
1.45 maxv 334: * Reload segments after a GDT change.
1.3 bouyer 335: */
1.14 chs 336: ENTRY(lgdt_finish)
1.1 ad 337: movl $GSEL(GDATA_SEL, SEL_KPL),%eax
338: movl %eax,%ds
339: movl %eax,%es
340: movl %eax,%ss
1.31 maxv 341: jmp _C_LABEL(x86_flush)
342: END(lgdt_finish)
1.1 ad 343:
344: /*
345: * Flush instruction pipelines by doing an intersegment (far) return.
346: */
1.14 chs 347: ENTRY(x86_flush)
1.1 ad 348: popq %rax
349: pushq $GSEL(GCODE_SEL, SEL_KPL)
350: pushq %rax
351: lretq
1.30 maxv 352: END(x86_flush)
1.1 ad 353:
354: /* Waits - set up stack frame. */
1.14 chs 355: ENTRY(x86_hlt)
1.1 ad 356: pushq %rbp
357: movq %rsp, %rbp
358: hlt
359: leave
360: ret
1.30 maxv 361: END(x86_hlt)
1.1 ad 362:
363: /* Waits - set up stack frame. */
1.14 chs 364: ENTRY(x86_stihlt)
1.1 ad 365: pushq %rbp
366: movq %rsp, %rbp
367: sti
368: hlt
369: leave
370: ret
1.30 maxv 371: END(x86_stihlt)
1.1 ad 372:
1.14 chs 373: ENTRY(x86_monitor)
1.1 ad 374: movq %rdi, %rax
375: movq %rsi, %rcx
1.16 skrll 376: monitor %rax, %rcx, %rdx
1.1 ad 377: ret
1.30 maxv 378: END(x86_monitor)
1.1 ad 379:
380: /* Waits - set up stack frame. */
1.30 maxv 381: ENTRY(x86_mwait)
1.1 ad 382: pushq %rbp
383: movq %rsp, %rbp
384: movq %rdi, %rax
385: movq %rsi, %rcx
1.16 skrll 386: mwait %rax, %rcx
1.1 ad 387: leave
388: ret
1.30 maxv 389: END(x86_mwait)
1.1 ad 390:
391: ENTRY(stts)
392: movq %cr0, %rax
393: orq $CR0_TS, %rax
394: movq %rax, %cr0
395: ret
1.30 maxv 396: END(stts)
1.1 ad 397:
1.14 chs 398: ENTRY(fldummy)
1.1 ad 399: ffree %st(7)
1.25 dsl 400: fldz
1.1 ad 401: ret
1.30 maxv 402: END(fldummy)
1.1 ad 403:
1.14 chs 404: ENTRY(inb)
1.1 ad 405: movq %rdi, %rdx
406: xorq %rax, %rax
407: inb %dx, %al
1.47 maxv 408: KMSAN_INIT_RET(1)
1.1 ad 409: ret
1.30 maxv 410: END(inb)
1.1 ad 411:
1.14 chs 412: ENTRY(inw)
1.1 ad 413: movq %rdi, %rdx
414: xorq %rax, %rax
415: inw %dx, %ax
1.47 maxv 416: KMSAN_INIT_RET(2)
1.1 ad 417: ret
1.30 maxv 418: END(inw)
1.1 ad 419:
1.14 chs 420: ENTRY(inl)
1.1 ad 421: movq %rdi, %rdx
422: xorq %rax, %rax
423: inl %dx, %eax
1.47 maxv 424: KMSAN_INIT_RET(4)
1.1 ad 425: ret
1.30 maxv 426: END(inl)
1.1 ad 427:
1.14 chs 428: ENTRY(outb)
1.1 ad 429: movq %rdi, %rdx
430: movq %rsi, %rax
431: outb %al, %dx
432: ret
1.30 maxv 433: END(outb)
1.1 ad 434:
1.14 chs 435: ENTRY(outw)
1.1 ad 436: movq %rdi, %rdx
437: movq %rsi, %rax
438: outw %ax, %dx
439: ret
1.30 maxv 440: END(outw)
1.1 ad 441:
1.14 chs 442: ENTRY(outl)
1.1 ad 443: movq %rdi, %rdx
444: movq %rsi, %rax
445: outl %eax, %dx
446: ret
1.30 maxv 447: END(outl)
1.57 ad 448:
449: ENTRY(x86_movs)
450: movq %rdx,%rcx
1.59 ad 451: KMSAN_REP_STOS(8)
1.57 ad 452: rep
453: movsq
454: ret
1.58 ad 455: END(x86_movs)
CVSweb <webmaster@jp.NetBSD.org>