[BACK]Return to nvmm_x86_svm.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / dev / nvmm / x86

Annotation of src/sys/dev/nvmm/x86/nvmm_x86_svm.c, Revision 1.14

1.14    ! maxv        1: /*     $NetBSD: nvmm_x86_svm.c,v 1.13 2019/01/08 14:43:18 maxv Exp $   */
1.1       maxv        2:
                      3: /*
                      4:  * Copyright (c) 2018 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Maxime Villard.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: #include <sys/cdefs.h>
1.14    ! maxv       33: __KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.13 2019/01/08 14:43:18 maxv Exp $");
1.1       maxv       34:
                     35: #include <sys/param.h>
                     36: #include <sys/systm.h>
                     37: #include <sys/kernel.h>
                     38: #include <sys/kmem.h>
                     39: #include <sys/cpu.h>
                     40: #include <sys/xcall.h>
                     41:
                     42: #include <uvm/uvm.h>
                     43: #include <uvm/uvm_page.h>
                     44:
                     45: #include <x86/cputypes.h>
                     46: #include <x86/specialreg.h>
                     47: #include <x86/pmap.h>
                     48: #include <x86/dbregs.h>
                     49: #include <machine/cpuvar.h>
                     50:
                     51: #include <dev/nvmm/nvmm.h>
                     52: #include <dev/nvmm/nvmm_internal.h>
                     53: #include <dev/nvmm/x86/nvmm_x86.h>
                     54:
                     55: int svm_vmrun(paddr_t, uint64_t *);
                     56:
                     57: #define        MSR_VM_HSAVE_PA 0xC0010117
                     58:
                     59: /* -------------------------------------------------------------------------- */
                     60:
                     61: #define VMCB_EXITCODE_CR0_READ         0x0000
                     62: #define VMCB_EXITCODE_CR1_READ         0x0001
                     63: #define VMCB_EXITCODE_CR2_READ         0x0002
                     64: #define VMCB_EXITCODE_CR3_READ         0x0003
                     65: #define VMCB_EXITCODE_CR4_READ         0x0004
                     66: #define VMCB_EXITCODE_CR5_READ         0x0005
                     67: #define VMCB_EXITCODE_CR6_READ         0x0006
                     68: #define VMCB_EXITCODE_CR7_READ         0x0007
                     69: #define VMCB_EXITCODE_CR8_READ         0x0008
                     70: #define VMCB_EXITCODE_CR9_READ         0x0009
                     71: #define VMCB_EXITCODE_CR10_READ                0x000A
                     72: #define VMCB_EXITCODE_CR11_READ                0x000B
                     73: #define VMCB_EXITCODE_CR12_READ                0x000C
                     74: #define VMCB_EXITCODE_CR13_READ                0x000D
                     75: #define VMCB_EXITCODE_CR14_READ                0x000E
                     76: #define VMCB_EXITCODE_CR15_READ                0x000F
                     77: #define VMCB_EXITCODE_CR0_WRITE                0x0010
                     78: #define VMCB_EXITCODE_CR1_WRITE                0x0011
                     79: #define VMCB_EXITCODE_CR2_WRITE                0x0012
                     80: #define VMCB_EXITCODE_CR3_WRITE                0x0013
                     81: #define VMCB_EXITCODE_CR4_WRITE                0x0014
                     82: #define VMCB_EXITCODE_CR5_WRITE                0x0015
                     83: #define VMCB_EXITCODE_CR6_WRITE                0x0016
                     84: #define VMCB_EXITCODE_CR7_WRITE                0x0017
                     85: #define VMCB_EXITCODE_CR8_WRITE                0x0018
                     86: #define VMCB_EXITCODE_CR9_WRITE                0x0019
                     87: #define VMCB_EXITCODE_CR10_WRITE       0x001A
                     88: #define VMCB_EXITCODE_CR11_WRITE       0x001B
                     89: #define VMCB_EXITCODE_CR12_WRITE       0x001C
                     90: #define VMCB_EXITCODE_CR13_WRITE       0x001D
                     91: #define VMCB_EXITCODE_CR14_WRITE       0x001E
                     92: #define VMCB_EXITCODE_CR15_WRITE       0x001F
                     93: #define VMCB_EXITCODE_DR0_READ         0x0020
                     94: #define VMCB_EXITCODE_DR1_READ         0x0021
                     95: #define VMCB_EXITCODE_DR2_READ         0x0022
                     96: #define VMCB_EXITCODE_DR3_READ         0x0023
                     97: #define VMCB_EXITCODE_DR4_READ         0x0024
                     98: #define VMCB_EXITCODE_DR5_READ         0x0025
                     99: #define VMCB_EXITCODE_DR6_READ         0x0026
                    100: #define VMCB_EXITCODE_DR7_READ         0x0027
                    101: #define VMCB_EXITCODE_DR8_READ         0x0028
                    102: #define VMCB_EXITCODE_DR9_READ         0x0029
                    103: #define VMCB_EXITCODE_DR10_READ                0x002A
                    104: #define VMCB_EXITCODE_DR11_READ                0x002B
                    105: #define VMCB_EXITCODE_DR12_READ                0x002C
                    106: #define VMCB_EXITCODE_DR13_READ                0x002D
                    107: #define VMCB_EXITCODE_DR14_READ                0x002E
                    108: #define VMCB_EXITCODE_DR15_READ                0x002F
                    109: #define VMCB_EXITCODE_DR0_WRITE                0x0030
                    110: #define VMCB_EXITCODE_DR1_WRITE                0x0031
                    111: #define VMCB_EXITCODE_DR2_WRITE                0x0032
                    112: #define VMCB_EXITCODE_DR3_WRITE                0x0033
                    113: #define VMCB_EXITCODE_DR4_WRITE                0x0034
                    114: #define VMCB_EXITCODE_DR5_WRITE                0x0035
                    115: #define VMCB_EXITCODE_DR6_WRITE                0x0036
                    116: #define VMCB_EXITCODE_DR7_WRITE                0x0037
                    117: #define VMCB_EXITCODE_DR8_WRITE                0x0038
                    118: #define VMCB_EXITCODE_DR9_WRITE                0x0039
                    119: #define VMCB_EXITCODE_DR10_WRITE       0x003A
                    120: #define VMCB_EXITCODE_DR11_WRITE       0x003B
                    121: #define VMCB_EXITCODE_DR12_WRITE       0x003C
                    122: #define VMCB_EXITCODE_DR13_WRITE       0x003D
                    123: #define VMCB_EXITCODE_DR14_WRITE       0x003E
                    124: #define VMCB_EXITCODE_DR15_WRITE       0x003F
                    125: #define VMCB_EXITCODE_EXCP0            0x0040
                    126: #define VMCB_EXITCODE_EXCP1            0x0041
                    127: #define VMCB_EXITCODE_EXCP2            0x0042
                    128: #define VMCB_EXITCODE_EXCP3            0x0043
                    129: #define VMCB_EXITCODE_EXCP4            0x0044
                    130: #define VMCB_EXITCODE_EXCP5            0x0045
                    131: #define VMCB_EXITCODE_EXCP6            0x0046
                    132: #define VMCB_EXITCODE_EXCP7            0x0047
                    133: #define VMCB_EXITCODE_EXCP8            0x0048
                    134: #define VMCB_EXITCODE_EXCP9            0x0049
                    135: #define VMCB_EXITCODE_EXCP10           0x004A
                    136: #define VMCB_EXITCODE_EXCP11           0x004B
                    137: #define VMCB_EXITCODE_EXCP12           0x004C
                    138: #define VMCB_EXITCODE_EXCP13           0x004D
                    139: #define VMCB_EXITCODE_EXCP14           0x004E
                    140: #define VMCB_EXITCODE_EXCP15           0x004F
                    141: #define VMCB_EXITCODE_EXCP16           0x0050
                    142: #define VMCB_EXITCODE_EXCP17           0x0051
                    143: #define VMCB_EXITCODE_EXCP18           0x0052
                    144: #define VMCB_EXITCODE_EXCP19           0x0053
                    145: #define VMCB_EXITCODE_EXCP20           0x0054
                    146: #define VMCB_EXITCODE_EXCP21           0x0055
                    147: #define VMCB_EXITCODE_EXCP22           0x0056
                    148: #define VMCB_EXITCODE_EXCP23           0x0057
                    149: #define VMCB_EXITCODE_EXCP24           0x0058
                    150: #define VMCB_EXITCODE_EXCP25           0x0059
                    151: #define VMCB_EXITCODE_EXCP26           0x005A
                    152: #define VMCB_EXITCODE_EXCP27           0x005B
                    153: #define VMCB_EXITCODE_EXCP28           0x005C
                    154: #define VMCB_EXITCODE_EXCP29           0x005D
                    155: #define VMCB_EXITCODE_EXCP30           0x005E
                    156: #define VMCB_EXITCODE_EXCP31           0x005F
                    157: #define VMCB_EXITCODE_INTR             0x0060
                    158: #define VMCB_EXITCODE_NMI              0x0061
                    159: #define VMCB_EXITCODE_SMI              0x0062
                    160: #define VMCB_EXITCODE_INIT             0x0063
                    161: #define VMCB_EXITCODE_VINTR            0x0064
                    162: #define VMCB_EXITCODE_CR0_SEL_WRITE    0x0065
                    163: #define VMCB_EXITCODE_IDTR_READ                0x0066
                    164: #define VMCB_EXITCODE_GDTR_READ                0x0067
                    165: #define VMCB_EXITCODE_LDTR_READ                0x0068
                    166: #define VMCB_EXITCODE_TR_READ          0x0069
                    167: #define VMCB_EXITCODE_IDTR_WRITE       0x006A
                    168: #define VMCB_EXITCODE_GDTR_WRITE       0x006B
                    169: #define VMCB_EXITCODE_LDTR_WRITE       0x006C
                    170: #define VMCB_EXITCODE_TR_WRITE         0x006D
                    171: #define VMCB_EXITCODE_RDTSC            0x006E
                    172: #define VMCB_EXITCODE_RDPMC            0x006F
                    173: #define VMCB_EXITCODE_PUSHF            0x0070
                    174: #define VMCB_EXITCODE_POPF             0x0071
                    175: #define VMCB_EXITCODE_CPUID            0x0072
                    176: #define VMCB_EXITCODE_RSM              0x0073
                    177: #define VMCB_EXITCODE_IRET             0x0074
                    178: #define VMCB_EXITCODE_SWINT            0x0075
                    179: #define VMCB_EXITCODE_INVD             0x0076
                    180: #define VMCB_EXITCODE_PAUSE            0x0077
                    181: #define VMCB_EXITCODE_HLT              0x0078
                    182: #define VMCB_EXITCODE_INVLPG           0x0079
                    183: #define VMCB_EXITCODE_INVLPGA          0x007A
                    184: #define VMCB_EXITCODE_IOIO             0x007B
                    185: #define VMCB_EXITCODE_MSR              0x007C
                    186: #define VMCB_EXITCODE_TASK_SWITCH      0x007D
                    187: #define VMCB_EXITCODE_FERR_FREEZE      0x007E
                    188: #define VMCB_EXITCODE_SHUTDOWN         0x007F
                    189: #define VMCB_EXITCODE_VMRUN            0x0080
                    190: #define VMCB_EXITCODE_VMMCALL          0x0081
                    191: #define VMCB_EXITCODE_VMLOAD           0x0082
                    192: #define VMCB_EXITCODE_VMSAVE           0x0083
                    193: #define VMCB_EXITCODE_STGI             0x0084
                    194: #define VMCB_EXITCODE_CLGI             0x0085
                    195: #define VMCB_EXITCODE_SKINIT           0x0086
                    196: #define VMCB_EXITCODE_RDTSCP           0x0087
                    197: #define VMCB_EXITCODE_ICEBP            0x0088
                    198: #define VMCB_EXITCODE_WBINVD           0x0089
                    199: #define VMCB_EXITCODE_MONITOR          0x008A
                    200: #define VMCB_EXITCODE_MWAIT            0x008B
                    201: #define VMCB_EXITCODE_MWAIT_CONDITIONAL        0x008C
                    202: #define VMCB_EXITCODE_XSETBV           0x008D
                    203: #define VMCB_EXITCODE_EFER_WRITE_TRAP  0x008F
                    204: #define VMCB_EXITCODE_CR0_WRITE_TRAP   0x0090
                    205: #define VMCB_EXITCODE_CR1_WRITE_TRAP   0x0091
                    206: #define VMCB_EXITCODE_CR2_WRITE_TRAP   0x0092
                    207: #define VMCB_EXITCODE_CR3_WRITE_TRAP   0x0093
                    208: #define VMCB_EXITCODE_CR4_WRITE_TRAP   0x0094
                    209: #define VMCB_EXITCODE_CR5_WRITE_TRAP   0x0095
                    210: #define VMCB_EXITCODE_CR6_WRITE_TRAP   0x0096
                    211: #define VMCB_EXITCODE_CR7_WRITE_TRAP   0x0097
                    212: #define VMCB_EXITCODE_CR8_WRITE_TRAP   0x0098
                    213: #define VMCB_EXITCODE_CR9_WRITE_TRAP   0x0099
                    214: #define VMCB_EXITCODE_CR10_WRITE_TRAP  0x009A
                    215: #define VMCB_EXITCODE_CR11_WRITE_TRAP  0x009B
                    216: #define VMCB_EXITCODE_CR12_WRITE_TRAP  0x009C
                    217: #define VMCB_EXITCODE_CR13_WRITE_TRAP  0x009D
                    218: #define VMCB_EXITCODE_CR14_WRITE_TRAP  0x009E
                    219: #define VMCB_EXITCODE_CR15_WRITE_TRAP  0x009F
                    220: #define VMCB_EXITCODE_NPF              0x0400
                    221: #define VMCB_EXITCODE_AVIC_INCOMP_IPI  0x0401
                    222: #define VMCB_EXITCODE_AVIC_NOACCEL     0x0402
                    223: #define VMCB_EXITCODE_VMGEXIT          0x0403
                    224: #define VMCB_EXITCODE_INVALID          -1
                    225:
                    226: /* -------------------------------------------------------------------------- */
                    227:
                    228: struct vmcb_ctrl {
                    229:        uint32_t intercept_cr;
                    230: #define VMCB_CTRL_INTERCEPT_RCR(x)     __BIT( 0 + x)
                    231: #define VMCB_CTRL_INTERCEPT_WCR(x)     __BIT(16 + x)
                    232:
                    233:        uint32_t intercept_dr;
                    234: #define VMCB_CTRL_INTERCEPT_RDR(x)     __BIT( 0 + x)
                    235: #define VMCB_CTRL_INTERCEPT_WDR(x)     __BIT(16 + x)
                    236:
                    237:        uint32_t intercept_vec;
                    238: #define VMCB_CTRL_INTERCEPT_VEC(x)     __BIT(x)
                    239:
                    240:        uint32_t intercept_misc1;
                    241: #define VMCB_CTRL_INTERCEPT_INTR       __BIT(0)
                    242: #define VMCB_CTRL_INTERCEPT_NMI                __BIT(1)
                    243: #define VMCB_CTRL_INTERCEPT_SMI                __BIT(2)
                    244: #define VMCB_CTRL_INTERCEPT_INIT       __BIT(3)
                    245: #define VMCB_CTRL_INTERCEPT_VINTR      __BIT(4)
                    246: #define VMCB_CTRL_INTERCEPT_CR0_SPEC   __BIT(5)
                    247: #define VMCB_CTRL_INTERCEPT_RIDTR      __BIT(6)
                    248: #define VMCB_CTRL_INTERCEPT_RGDTR      __BIT(7)
                    249: #define VMCB_CTRL_INTERCEPT_RLDTR      __BIT(8)
                    250: #define VMCB_CTRL_INTERCEPT_RTR                __BIT(9)
                    251: #define VMCB_CTRL_INTERCEPT_WIDTR      __BIT(10)
                    252: #define VMCB_CTRL_INTERCEPT_WGDTR      __BIT(11)
                    253: #define VMCB_CTRL_INTERCEPT_WLDTR      __BIT(12)
                    254: #define VMCB_CTRL_INTERCEPT_WTR                __BIT(13)
                    255: #define VMCB_CTRL_INTERCEPT_RDTSC      __BIT(14)
                    256: #define VMCB_CTRL_INTERCEPT_RDPMC      __BIT(15)
                    257: #define VMCB_CTRL_INTERCEPT_PUSHF      __BIT(16)
                    258: #define VMCB_CTRL_INTERCEPT_POPF       __BIT(17)
                    259: #define VMCB_CTRL_INTERCEPT_CPUID      __BIT(18)
                    260: #define VMCB_CTRL_INTERCEPT_RSM                __BIT(19)
                    261: #define VMCB_CTRL_INTERCEPT_IRET       __BIT(20)
                    262: #define VMCB_CTRL_INTERCEPT_INTN       __BIT(21)
                    263: #define VMCB_CTRL_INTERCEPT_INVD       __BIT(22)
                    264: #define VMCB_CTRL_INTERCEPT_PAUSE      __BIT(23)
                    265: #define VMCB_CTRL_INTERCEPT_HLT                __BIT(24)
                    266: #define VMCB_CTRL_INTERCEPT_INVLPG     __BIT(25)
                    267: #define VMCB_CTRL_INTERCEPT_INVLPGA    __BIT(26)
                    268: #define VMCB_CTRL_INTERCEPT_IOIO_PROT  __BIT(27)
                    269: #define VMCB_CTRL_INTERCEPT_MSR_PROT   __BIT(28)
                    270: #define VMCB_CTRL_INTERCEPT_TASKSW     __BIT(29)
                    271: #define VMCB_CTRL_INTERCEPT_FERR_FREEZE        __BIT(30)
                    272: #define VMCB_CTRL_INTERCEPT_SHUTDOWN   __BIT(31)
                    273:
                    274:        uint32_t intercept_misc2;
                    275: #define VMCB_CTRL_INTERCEPT_VMRUN      __BIT(0)
                    276: #define VMCB_CTRL_INTERCEPT_VMMCALL    __BIT(1)
                    277: #define VMCB_CTRL_INTERCEPT_VMLOAD     __BIT(2)
                    278: #define VMCB_CTRL_INTERCEPT_VMSAVE     __BIT(3)
                    279: #define VMCB_CTRL_INTERCEPT_STGI       __BIT(4)
                    280: #define VMCB_CTRL_INTERCEPT_CLGI       __BIT(5)
                    281: #define VMCB_CTRL_INTERCEPT_SKINIT     __BIT(6)
                    282: #define VMCB_CTRL_INTERCEPT_RDTSCP     __BIT(7)
                    283: #define VMCB_CTRL_INTERCEPT_ICEBP      __BIT(8)
                    284: #define VMCB_CTRL_INTERCEPT_WBINVD     __BIT(9)
                    285: #define VMCB_CTRL_INTERCEPT_MONITOR    __BIT(10)
                    286: #define VMCB_CTRL_INTERCEPT_MWAIT      __BIT(12)
                    287: #define VMCB_CTRL_INTERCEPT_XSETBV     __BIT(13)
                    288: #define VMCB_CTRL_INTERCEPT_EFER_SPEC  __BIT(15)
                    289: #define VMCB_CTRL_INTERCEPT_WCR_SPEC(x)        __BIT(16 + x)
                    290:
                    291:        uint8_t  rsvd1[40];
                    292:        uint16_t pause_filt_thresh;
                    293:        uint16_t pause_filt_cnt;
                    294:        uint64_t iopm_base_pa;
                    295:        uint64_t msrpm_base_pa;
                    296:        uint64_t tsc_offset;
                    297:        uint32_t guest_asid;
                    298:
                    299:        uint32_t tlb_ctrl;
                    300: #define VMCB_CTRL_TLB_CTRL_FLUSH_ALL                   0x01
                    301: #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST                 0x03
                    302: #define VMCB_CTRL_TLB_CTRL_FLUSH_GUEST_NONGLOBAL       0x07
                    303:
                    304:        uint64_t v;
                    305: #define VMCB_CTRL_V_TPR                        __BITS(7,0)
                    306: #define VMCB_CTRL_V_IRQ                        __BIT(8)
                    307: #define VMCB_CTRL_V_VGIF               __BIT(9)
                    308: #define VMCB_CTRL_V_INTR_PRIO          __BITS(19,16)
                    309: #define VMCB_CTRL_V_IGN_TPR            __BIT(20)
                    310: #define VMCB_CTRL_V_INTR_MASKING       __BIT(24)
                    311: #define VMCB_CTRL_V_GUEST_VGIF         __BIT(25)
                    312: #define VMCB_CTRL_V_AVIC_EN            __BIT(31)
                    313: #define VMCB_CTRL_V_INTR_VECTOR                __BITS(39,32)
                    314:
                    315:        uint64_t intr;
                    316: #define VMCB_CTRL_INTR_SHADOW          __BIT(0)
                    317:
                    318:        uint64_t exitcode;
                    319:        uint64_t exitinfo1;
                    320:        uint64_t exitinfo2;
                    321:
                    322:        uint64_t exitintinfo;
                    323: #define VMCB_CTRL_EXITINTINFO_VECTOR   __BITS(7,0)
                    324: #define VMCB_CTRL_EXITINTINFO_TYPE     __BITS(10,8)
                    325: #define VMCB_CTRL_EXITINTINFO_EV       __BIT(11)
                    326: #define VMCB_CTRL_EXITINTINFO_V                __BIT(31)
                    327: #define VMCB_CTRL_EXITINTINFO_ERRORCODE        __BITS(63,32)
                    328:
                    329:        uint64_t enable1;
                    330: #define VMCB_CTRL_ENABLE_NP            __BIT(0)
                    331: #define VMCB_CTRL_ENABLE_SEV           __BIT(1)
                    332: #define VMCB_CTRL_ENABLE_ES_SEV                __BIT(2)
                    333:
                    334:        uint64_t avic;
                    335: #define VMCB_CTRL_AVIC_APIC_BAR                __BITS(51,0)
                    336:
                    337:        uint64_t ghcb;
                    338:
                    339:        uint64_t eventinj;
                    340: #define VMCB_CTRL_EVENTINJ_VECTOR      __BITS(7,0)
                    341: #define VMCB_CTRL_EVENTINJ_TYPE                __BITS(10,8)
                    342: #define VMCB_CTRL_EVENTINJ_EV          __BIT(11)
                    343: #define VMCB_CTRL_EVENTINJ_V           __BIT(31)
                    344: #define VMCB_CTRL_EVENTINJ_ERRORCODE   __BITS(63,32)
                    345:
                    346:        uint64_t n_cr3;
                    347:
                    348:        uint64_t enable2;
                    349: #define VMCB_CTRL_ENABLE_LBR           __BIT(0)
                    350: #define VMCB_CTRL_ENABLE_VVMSAVE       __BIT(1)
                    351:
                    352:        uint32_t vmcb_clean;
                    353: #define VMCB_CTRL_VMCB_CLEAN_I         __BIT(0)
                    354: #define VMCB_CTRL_VMCB_CLEAN_IOPM      __BIT(1)
                    355: #define VMCB_CTRL_VMCB_CLEAN_ASID      __BIT(2)
                    356: #define VMCB_CTRL_VMCB_CLEAN_TPR       __BIT(3)
                    357: #define VMCB_CTRL_VMCB_CLEAN_NP                __BIT(4)
                    358: #define VMCB_CTRL_VMCB_CLEAN_CR                __BIT(5)
                    359: #define VMCB_CTRL_VMCB_CLEAN_DR                __BIT(6)
                    360: #define VMCB_CTRL_VMCB_CLEAN_DT                __BIT(7)
                    361: #define VMCB_CTRL_VMCB_CLEAN_SEG       __BIT(8)
                    362: #define VMCB_CTRL_VMCB_CLEAN_CR2       __BIT(9)
                    363: #define VMCB_CTRL_VMCB_CLEAN_LBR       __BIT(10)
                    364: #define VMCB_CTRL_VMCB_CLEAN_AVIC      __BIT(11)
                    365:
                    366:        uint32_t rsvd2;
                    367:        uint64_t nrip;
                    368:        uint8_t inst_len;
                    369:        uint8_t inst_bytes[15];
1.11      maxv      370:        uint64_t avic_abpp;
                    371:        uint64_t rsvd3;
                    372:        uint64_t avic_ltp;
                    373:
                    374:        uint64_t avic_phys;
                    375: #define VMCB_CTRL_AVIC_PHYS_TABLE_PTR  __BITS(51,12)
                    376: #define VMCB_CTRL_AVIC_PHYS_MAX_INDEX  __BITS(7,0)
                    377:
                    378:        uint64_t rsvd4;
                    379:        uint64_t vmcb_ptr;
                    380:
                    381:        uint8_t pad[752];
1.1       maxv      382: } __packed;
                    383:
                    384: CTASSERT(sizeof(struct vmcb_ctrl) == 1024);
                    385:
                    386: struct vmcb_segment {
                    387:        uint16_t selector;
                    388:        uint16_t attrib;        /* hidden */
                    389:        uint32_t limit;         /* hidden */
                    390:        uint64_t base;          /* hidden */
                    391: } __packed;
                    392:
                    393: CTASSERT(sizeof(struct vmcb_segment) == 16);
                    394:
                    395: struct vmcb_state {
                    396:        struct   vmcb_segment es;
                    397:        struct   vmcb_segment cs;
                    398:        struct   vmcb_segment ss;
                    399:        struct   vmcb_segment ds;
                    400:        struct   vmcb_segment fs;
                    401:        struct   vmcb_segment gs;
                    402:        struct   vmcb_segment gdt;
                    403:        struct   vmcb_segment ldt;
                    404:        struct   vmcb_segment idt;
                    405:        struct   vmcb_segment tr;
                    406:        uint8_t  rsvd1[43];
                    407:        uint8_t  cpl;
                    408:        uint8_t  rsvd2[4];
                    409:        uint64_t efer;
                    410:        uint8_t  rsvd3[112];
                    411:        uint64_t cr4;
                    412:        uint64_t cr3;
                    413:        uint64_t cr0;
                    414:        uint64_t dr7;
                    415:        uint64_t dr6;
                    416:        uint64_t rflags;
                    417:        uint64_t rip;
                    418:        uint8_t  rsvd4[88];
                    419:        uint64_t rsp;
                    420:        uint8_t  rsvd5[24];
                    421:        uint64_t rax;
                    422:        uint64_t star;
                    423:        uint64_t lstar;
                    424:        uint64_t cstar;
                    425:        uint64_t sfmask;
                    426:        uint64_t kernelgsbase;
                    427:        uint64_t sysenter_cs;
                    428:        uint64_t sysenter_esp;
                    429:        uint64_t sysenter_eip;
                    430:        uint64_t cr2;
                    431:        uint8_t  rsvd6[32];
                    432:        uint64_t g_pat;
                    433:        uint64_t dbgctl;
                    434:        uint64_t br_from;
                    435:        uint64_t br_to;
                    436:        uint64_t int_from;
                    437:        uint64_t int_to;
                    438:        uint8_t  pad[2408];
                    439: } __packed;
                    440:
                    441: CTASSERT(sizeof(struct vmcb_state) == 0xC00);
                    442:
                    443: struct vmcb {
                    444:        struct vmcb_ctrl ctrl;
                    445:        struct vmcb_state state;
                    446: } __packed;
                    447:
                    448: CTASSERT(sizeof(struct vmcb) == PAGE_SIZE);
                    449: CTASSERT(offsetof(struct vmcb, state) == 0x400);
                    450:
                    451: /* -------------------------------------------------------------------------- */
                    452:
                    453: struct svm_hsave {
                    454:        paddr_t pa;
                    455: };
                    456:
                    457: static struct svm_hsave hsave[MAXCPUS];
                    458:
                    459: static uint8_t *svm_asidmap __read_mostly;
                    460: static uint32_t svm_maxasid __read_mostly;
                    461: static kmutex_t svm_asidlock __cacheline_aligned;
                    462:
                    463: static bool svm_decode_assist __read_mostly;
                    464: static uint32_t svm_ctrl_tlb_flush __read_mostly;
                    465:
                    466: #define SVM_XCR0_MASK_DEFAULT  (XCR0_X87|XCR0_SSE)
                    467: static uint64_t svm_xcr0_mask __read_mostly;
                    468:
                    469: #define SVM_NCPUIDS    32
                    470:
                    471: #define VMCB_NPAGES    1
                    472:
                    473: #define MSRBM_NPAGES   2
                    474: #define MSRBM_SIZE     (MSRBM_NPAGES * PAGE_SIZE)
                    475:
                    476: #define IOBM_NPAGES    3
                    477: #define IOBM_SIZE      (IOBM_NPAGES * PAGE_SIZE)
                    478:
                    479: /* Does not include EFER_LMSLE. */
                    480: #define EFER_VALID \
                    481:        (EFER_SCE|EFER_LME|EFER_LMA|EFER_NXE|EFER_SVME|EFER_FFXSR|EFER_TCE)
                    482:
                    483: #define EFER_TLB_FLUSH \
                    484:        (EFER_NXE|EFER_LMA|EFER_LME)
                    485: #define CR0_TLB_FLUSH \
                    486:        (CR0_PG|CR0_WP|CR0_CD|CR0_NW)
                    487: #define CR4_TLB_FLUSH \
                    488:        (CR4_PGE|CR4_PAE|CR4_PSE)
                    489:
                    490: /* -------------------------------------------------------------------------- */
                    491:
                    492: struct svm_machdata {
                    493:        bool cpuidpresent[SVM_NCPUIDS];
                    494:        struct nvmm_x86_conf_cpuid cpuid[SVM_NCPUIDS];
                    495: };
                    496:
                    497: static const size_t svm_conf_sizes[NVMM_X86_NCONF] = {
                    498:        [NVMM_X86_CONF_CPUID] = sizeof(struct nvmm_x86_conf_cpuid)
                    499: };
                    500:
                    501: struct svm_cpudata {
                    502:        /* General */
                    503:        bool shared_asid;
                    504:        bool tlb_want_flush;
                    505:
                    506:        /* VMCB */
                    507:        struct vmcb *vmcb;
                    508:        paddr_t vmcb_pa;
                    509:
                    510:        /* I/O bitmap */
                    511:        uint8_t *iobm;
                    512:        paddr_t iobm_pa;
                    513:
                    514:        /* MSR bitmap */
                    515:        uint8_t *msrbm;
                    516:        paddr_t msrbm_pa;
                    517:
                    518:        /* Host state */
1.13      maxv      519:        uint64_t hxcr0;
1.1       maxv      520:        uint64_t star;
                    521:        uint64_t lstar;
                    522:        uint64_t cstar;
                    523:        uint64_t sfmask;
1.14    ! maxv      524:        uint64_t fsbase;
        !           525:        uint64_t kernelgsbase;
1.1       maxv      526:        bool ts_set;
                    527:        struct xsave_header hfpu __aligned(16);
                    528:
1.10      maxv      529:        /* Event state */
                    530:        bool int_window_exit;
                    531:        bool nmi_window_exit;
                    532:
1.1       maxv      533:        /* Guest state */
1.13      maxv      534:        uint64_t gxcr0;
                    535:        uint64_t gprs[NVMM_X64_NGPR];
                    536:        uint64_t drs[NVMM_X64_NDR];
1.1       maxv      537:        uint64_t tsc_offset;
                    538:        struct xsave_header gfpu __aligned(16);
                    539: };
                    540:
1.12      maxv      541: static void
                    542: svm_vmcb_cache_default(struct vmcb *vmcb)
                    543: {
                    544:        vmcb->ctrl.vmcb_clean =
                    545:            VMCB_CTRL_VMCB_CLEAN_I |
                    546:            VMCB_CTRL_VMCB_CLEAN_IOPM |
                    547:            VMCB_CTRL_VMCB_CLEAN_ASID |
                    548:            VMCB_CTRL_VMCB_CLEAN_TPR |
                    549:            VMCB_CTRL_VMCB_CLEAN_NP |
                    550:            VMCB_CTRL_VMCB_CLEAN_CR |
                    551:            VMCB_CTRL_VMCB_CLEAN_DR |
                    552:            VMCB_CTRL_VMCB_CLEAN_DT |
                    553:            VMCB_CTRL_VMCB_CLEAN_SEG |
                    554:            VMCB_CTRL_VMCB_CLEAN_CR2 |
                    555:            VMCB_CTRL_VMCB_CLEAN_LBR |
                    556:            VMCB_CTRL_VMCB_CLEAN_AVIC;
                    557: }
                    558:
                    559: static void
                    560: svm_vmcb_cache_update(struct vmcb *vmcb, uint64_t flags)
                    561: {
                    562:        if (flags & NVMM_X64_STATE_SEGS) {
                    563:                vmcb->ctrl.vmcb_clean &=
                    564:                    ~(VMCB_CTRL_VMCB_CLEAN_SEG | VMCB_CTRL_VMCB_CLEAN_DT);
                    565:        }
                    566:        if (flags & NVMM_X64_STATE_CRS) {
                    567:                vmcb->ctrl.vmcb_clean &=
1.13      maxv      568:                    ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_CR2 |
                    569:                      VMCB_CTRL_VMCB_CLEAN_TPR);
1.12      maxv      570:        }
                    571:        if (flags & NVMM_X64_STATE_DRS) {
                    572:                vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_DR;
                    573:        }
                    574:        if (flags & NVMM_X64_STATE_MSRS) {
                    575:                /* CR for EFER, NP for PAT. */
                    576:                vmcb->ctrl.vmcb_clean &=
                    577:                    ~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_NP);
                    578:        }
                    579:        if (flags & NVMM_X64_STATE_MISC) {
                    580:                /* SEG for CPL. */
                    581:                vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_SEG;
                    582:        }
                    583: }
                    584:
                    585: static inline void
                    586: svm_vmcb_cache_flush(struct vmcb *vmcb, uint64_t flags)
                    587: {
                    588:        vmcb->ctrl.vmcb_clean &= ~flags;
                    589: }
                    590:
                    591: static inline void
                    592: svm_vmcb_cache_flush_all(struct vmcb *vmcb)
                    593: {
                    594:        vmcb->ctrl.vmcb_clean = 0;
                    595: }
                    596:
1.1       maxv      597: #define SVM_EVENT_TYPE_HW_INT  0
                    598: #define SVM_EVENT_TYPE_NMI     2
                    599: #define SVM_EVENT_TYPE_EXC     3
                    600: #define SVM_EVENT_TYPE_SW_INT  4
                    601:
                    602: static void
1.10      maxv      603: svm_event_waitexit_enable(struct nvmm_cpu *vcpu, bool nmi)
1.1       maxv      604: {
1.10      maxv      605:        struct svm_cpudata *cpudata = vcpu->cpudata;
                    606:        struct vmcb *vmcb = cpudata->vmcb;
                    607:
1.1       maxv      608:        if (nmi) {
                    609:                vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_IRET;
1.10      maxv      610:                cpudata->nmi_window_exit = true;
1.1       maxv      611:        } else {
                    612:                vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR;
1.10      maxv      613:                vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
1.12      maxv      614:                svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
1.10      maxv      615:                cpudata->int_window_exit = true;
1.1       maxv      616:        }
1.12      maxv      617:
                    618:        svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
1.1       maxv      619: }
                    620:
                    621: static void
1.10      maxv      622: svm_event_waitexit_disable(struct nvmm_cpu *vcpu, bool nmi)
1.1       maxv      623: {
1.10      maxv      624:        struct svm_cpudata *cpudata = vcpu->cpudata;
                    625:        struct vmcb *vmcb = cpudata->vmcb;
                    626:
1.1       maxv      627:        if (nmi) {
                    628:                vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_IRET;
1.10      maxv      629:                cpudata->nmi_window_exit = false;
1.1       maxv      630:        } else {
                    631:                vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR;
1.10      maxv      632:                vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR);
1.12      maxv      633:                svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR);
1.10      maxv      634:                cpudata->int_window_exit = false;
1.1       maxv      635:        }
1.12      maxv      636:
                    637:        svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
1.1       maxv      638: }
                    639:
                    640: static inline int
                    641: svm_event_has_error(uint64_t vector)
                    642: {
                    643:        switch (vector) {
                    644:        case 8:         /* #DF */
                    645:        case 10:        /* #TS */
                    646:        case 11:        /* #NP */
                    647:        case 12:        /* #SS */
                    648:        case 13:        /* #GP */
                    649:        case 14:        /* #PF */
                    650:        case 17:        /* #AC */
                    651:        case 30:        /* #SX */
                    652:                return 1;
                    653:        default:
                    654:                return 0;
                    655:        }
                    656: }
                    657:
                    658: static int
                    659: svm_vcpu_inject(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                    660:     struct nvmm_event *event)
                    661: {
                    662:        struct svm_cpudata *cpudata = vcpu->cpudata;
                    663:        struct vmcb *vmcb = cpudata->vmcb;
                    664:        int type = 0, err = 0;
                    665:
                    666:        if (event->vector >= 256) {
                    667:                return EINVAL;
                    668:        }
                    669:
                    670:        switch (event->type) {
                    671:        case NVMM_EVENT_INTERRUPT_HW:
                    672:                type = SVM_EVENT_TYPE_HW_INT;
                    673:                if (event->vector == 2) {
                    674:                        type = SVM_EVENT_TYPE_NMI;
                    675:                }
                    676:                if (type == SVM_EVENT_TYPE_NMI) {
1.10      maxv      677:                        if (cpudata->nmi_window_exit) {
1.1       maxv      678:                                return EAGAIN;
                    679:                        }
1.10      maxv      680:                        svm_event_waitexit_enable(vcpu, true);
1.1       maxv      681:                } else {
1.10      maxv      682:                        if (((vmcb->state.rflags & PSL_I) == 0) ||
                    683:                            ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0)) {
                    684:                                svm_event_waitexit_enable(vcpu, false);
1.1       maxv      685:                                return EAGAIN;
                    686:                        }
                    687:                }
                    688:                err = 0;
                    689:                break;
                    690:        case NVMM_EVENT_INTERRUPT_SW:
                    691:                type = SVM_EVENT_TYPE_SW_INT;
                    692:                err = 0;
                    693:                break;
                    694:        case NVMM_EVENT_EXCEPTION:
                    695:                type = SVM_EVENT_TYPE_EXC;
                    696:                if (event->vector == 2 || event->vector >= 32)
                    697:                        return EINVAL;
                    698:                err = svm_event_has_error(event->vector);
                    699:                break;
                    700:        default:
                    701:                return EINVAL;
                    702:        }
                    703:
                    704:        vmcb->ctrl.eventinj =
                    705:            __SHIFTIN(event->vector, VMCB_CTRL_EVENTINJ_VECTOR) |
                    706:            __SHIFTIN(type, VMCB_CTRL_EVENTINJ_TYPE) |
                    707:            __SHIFTIN(err, VMCB_CTRL_EVENTINJ_EV) |
                    708:            __SHIFTIN(1, VMCB_CTRL_EVENTINJ_V) |
                    709:            __SHIFTIN(event->u.error, VMCB_CTRL_EVENTINJ_ERRORCODE);
                    710:
                    711:        return 0;
                    712: }
                    713:
                    714: static void
                    715: svm_inject_ud(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
                    716: {
                    717:        struct nvmm_event event;
                    718:        int ret __diagused;
                    719:
                    720:        event.type = NVMM_EVENT_EXCEPTION;
                    721:        event.vector = 6;
                    722:        event.u.error = 0;
                    723:
                    724:        ret = svm_vcpu_inject(mach, vcpu, &event);
                    725:        KASSERT(ret == 0);
                    726: }
                    727:
                    728: static void
                    729: svm_inject_db(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
                    730: {
                    731:        struct nvmm_event event;
                    732:        int ret __diagused;
                    733:
                    734:        event.type = NVMM_EVENT_EXCEPTION;
                    735:        event.vector = 1;
                    736:        event.u.error = 0;
                    737:
                    738:        ret = svm_vcpu_inject(mach, vcpu, &event);
                    739:        KASSERT(ret == 0);
                    740: }
                    741:
                    742: static void
                    743: svm_inject_gp(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
                    744: {
                    745:        struct nvmm_event event;
                    746:        int ret __diagused;
                    747:
                    748:        event.type = NVMM_EVENT_EXCEPTION;
                    749:        event.vector = 13;
                    750:        event.u.error = 0;
                    751:
                    752:        ret = svm_vcpu_inject(mach, vcpu, &event);
                    753:        KASSERT(ret == 0);
                    754: }
                    755:
                    756: static void
                    757: svm_inkernel_handle_cpuid(struct nvmm_cpu *vcpu, uint64_t eax, uint64_t ecx)
                    758: {
                    759:        struct svm_cpudata *cpudata = vcpu->cpudata;
                    760:
                    761:        switch (eax) {
                    762:        case 0x00000001: /* APIC number in RBX. The rest is tunable. */
1.13      maxv      763:                cpudata->gprs[NVMM_X64_GPR_RBX] &= ~CPUID_LOCAL_APIC_ID;
                    764:                cpudata->gprs[NVMM_X64_GPR_RBX] |= __SHIFTIN(vcpu->cpuid,
1.1       maxv      765:                    CPUID_LOCAL_APIC_ID);
                    766:                break;
                    767:        case 0x0000000D: /* FPU description. Not tunable. */
                    768:                if (ecx != 0 || svm_xcr0_mask == 0) {
                    769:                        break;
                    770:                }
                    771:                cpudata->vmcb->state.rax = svm_xcr0_mask & 0xFFFFFFFF;
1.13      maxv      772:                if (cpudata->gxcr0 & XCR0_SSE) {
                    773:                        cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct fxsave);
1.1       maxv      774:                } else {
1.13      maxv      775:                        cpudata->gprs[NVMM_X64_GPR_RBX] = sizeof(struct save87);
1.1       maxv      776:                }
1.13      maxv      777:                cpudata->gprs[NVMM_X64_GPR_RBX] += 64; /* XSAVE header */
                    778:                cpudata->gprs[NVMM_X64_GPR_RCX] = sizeof(struct fxsave);
                    779:                cpudata->gprs[NVMM_X64_GPR_RDX] = svm_xcr0_mask >> 32;
1.1       maxv      780:                break;
1.10      maxv      781:        case 0x40000000:
1.13      maxv      782:                memcpy(&cpudata->gprs[NVMM_X64_GPR_RBX], "___ ", 4);
                    783:                memcpy(&cpudata->gprs[NVMM_X64_GPR_RCX], "NVMM", 4);
                    784:                memcpy(&cpudata->gprs[NVMM_X64_GPR_RDX], " ___", 4);
1.10      maxv      785:                break;
                    786:        case 0x80000001: /* No SVM in ECX. The rest is tunable. */
1.13      maxv      787:                cpudata->gprs[NVMM_X64_GPR_RCX] &= ~CPUID_SVM;
1.10      maxv      788:                break;
1.1       maxv      789:        default:
                    790:                break;
                    791:        }
                    792: }
                    793:
                    794: static void
                    795: svm_exit_cpuid(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                    796:     struct nvmm_exit *exit)
                    797: {
                    798:        struct svm_machdata *machdata = mach->machdata;
                    799:        struct svm_cpudata *cpudata = vcpu->cpudata;
                    800:        struct nvmm_x86_conf_cpuid *cpuid;
                    801:        uint64_t eax, ecx;
                    802:        u_int descs[4];
                    803:        size_t i;
                    804:
                    805:        eax = cpudata->vmcb->state.rax;
1.13      maxv      806:        ecx = cpudata->gprs[NVMM_X64_GPR_RCX];
1.1       maxv      807:        x86_cpuid2(eax, ecx, descs);
                    808:
                    809:        cpudata->vmcb->state.rax = descs[0];
1.13      maxv      810:        cpudata->gprs[NVMM_X64_GPR_RBX] = descs[1];
                    811:        cpudata->gprs[NVMM_X64_GPR_RCX] = descs[2];
                    812:        cpudata->gprs[NVMM_X64_GPR_RDX] = descs[3];
1.1       maxv      813:
                    814:        for (i = 0; i < SVM_NCPUIDS; i++) {
                    815:                cpuid = &machdata->cpuid[i];
                    816:                if (!machdata->cpuidpresent[i]) {
                    817:                        continue;
                    818:                }
                    819:                if (cpuid->leaf != eax) {
                    820:                        continue;
                    821:                }
                    822:
                    823:                /* del */
                    824:                cpudata->vmcb->state.rax &= ~cpuid->del.eax;
1.13      maxv      825:                cpudata->gprs[NVMM_X64_GPR_RBX] &= ~cpuid->del.ebx;
                    826:                cpudata->gprs[NVMM_X64_GPR_RCX] &= ~cpuid->del.ecx;
                    827:                cpudata->gprs[NVMM_X64_GPR_RDX] &= ~cpuid->del.edx;
1.1       maxv      828:
                    829:                /* set */
                    830:                cpudata->vmcb->state.rax |= cpuid->set.eax;
1.13      maxv      831:                cpudata->gprs[NVMM_X64_GPR_RBX] |= cpuid->set.ebx;
                    832:                cpudata->gprs[NVMM_X64_GPR_RCX] |= cpuid->set.ecx;
                    833:                cpudata->gprs[NVMM_X64_GPR_RDX] |= cpuid->set.edx;
1.1       maxv      834:
                    835:                break;
                    836:        }
                    837:
                    838:        /* Overwrite non-tunable leaves. */
                    839:        svm_inkernel_handle_cpuid(vcpu, eax, ecx);
                    840:
                    841:        /* For now we omit DBREGS. */
                    842:        if (__predict_false(cpudata->vmcb->state.rflags & PSL_T)) {
                    843:                svm_inject_db(mach, vcpu);
                    844:        }
                    845:
                    846:        cpudata->vmcb->state.rip = cpudata->vmcb->ctrl.nrip;
                    847:        exit->reason = NVMM_EXIT_NONE;
                    848: }
                    849:
1.10      maxv      850: static void
                    851: svm_exit_hlt(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                    852:     struct nvmm_exit *exit)
                    853: {
                    854:        struct svm_cpudata *cpudata = vcpu->cpudata;
                    855:
                    856:        exit->reason = NVMM_EXIT_HLT;
                    857:        exit->u.hlt.npc = cpudata->vmcb->ctrl.nrip;
                    858: }
                    859:
1.1       maxv      860: #define SVM_EXIT_IO_PORT       __BITS(31,16)
                    861: #define SVM_EXIT_IO_SEG                __BITS(12,10)
                    862: #define SVM_EXIT_IO_A64                __BIT(9)
                    863: #define SVM_EXIT_IO_A32                __BIT(8)
                    864: #define SVM_EXIT_IO_A16                __BIT(7)
                    865: #define SVM_EXIT_IO_SZ32       __BIT(6)
                    866: #define SVM_EXIT_IO_SZ16       __BIT(5)
                    867: #define SVM_EXIT_IO_SZ8                __BIT(4)
                    868: #define SVM_EXIT_IO_REP                __BIT(3)
                    869: #define SVM_EXIT_IO_STR                __BIT(2)
1.4       maxv      870: #define SVM_EXIT_IO_IN         __BIT(0)
1.1       maxv      871:
                    872: static const int seg_to_nvmm[] = {
                    873:        [0] = NVMM_X64_SEG_ES,
                    874:        [1] = NVMM_X64_SEG_CS,
                    875:        [2] = NVMM_X64_SEG_SS,
                    876:        [3] = NVMM_X64_SEG_DS,
                    877:        [4] = NVMM_X64_SEG_FS,
                    878:        [5] = NVMM_X64_SEG_GS
                    879: };
                    880:
                    881: static void
                    882: svm_exit_io(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                    883:     struct nvmm_exit *exit)
                    884: {
                    885:        struct svm_cpudata *cpudata = vcpu->cpudata;
                    886:        uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
                    887:        uint64_t nextpc = cpudata->vmcb->ctrl.exitinfo2;
                    888:
                    889:        exit->reason = NVMM_EXIT_IO;
                    890:
1.4       maxv      891:        if (info & SVM_EXIT_IO_IN) {
1.1       maxv      892:                exit->u.io.type = NVMM_EXIT_IO_IN;
                    893:        } else {
                    894:                exit->u.io.type = NVMM_EXIT_IO_OUT;
                    895:        }
                    896:
                    897:        exit->u.io.port = __SHIFTOUT(info, SVM_EXIT_IO_PORT);
                    898:
                    899:        if (svm_decode_assist) {
                    900:                KASSERT(__SHIFTOUT(info, SVM_EXIT_IO_SEG) < 6);
                    901:                exit->u.io.seg = seg_to_nvmm[__SHIFTOUT(info, SVM_EXIT_IO_SEG)];
                    902:        } else {
1.8       maxv      903:                exit->u.io.seg = -1;
1.1       maxv      904:        }
                    905:
                    906:        if (info & SVM_EXIT_IO_A64) {
                    907:                exit->u.io.address_size = 8;
                    908:        } else if (info & SVM_EXIT_IO_A32) {
                    909:                exit->u.io.address_size = 4;
                    910:        } else if (info & SVM_EXIT_IO_A16) {
                    911:                exit->u.io.address_size = 2;
                    912:        }
                    913:
                    914:        if (info & SVM_EXIT_IO_SZ32) {
                    915:                exit->u.io.operand_size = 4;
                    916:        } else if (info & SVM_EXIT_IO_SZ16) {
                    917:                exit->u.io.operand_size = 2;
                    918:        } else if (info & SVM_EXIT_IO_SZ8) {
                    919:                exit->u.io.operand_size = 1;
                    920:        }
                    921:
                    922:        exit->u.io.rep = (info & SVM_EXIT_IO_REP) != 0;
                    923:        exit->u.io.str = (info & SVM_EXIT_IO_STR) != 0;
                    924:        exit->u.io.npc = nextpc;
                    925: }
                    926:
1.10      maxv      927: static const uint64_t msr_ignore_list[] = {
                    928:        0xc0010055, /* MSR_CMPHALT */
                    929:        MSR_DE_CFG,
                    930:        MSR_IC_CFG,
                    931:        MSR_UCODE_AMD_PATCHLEVEL
                    932: };
                    933:
1.1       maxv      934: static bool
                    935: svm_inkernel_handle_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                    936:     struct nvmm_exit *exit)
                    937: {
                    938:        struct svm_cpudata *cpudata = vcpu->cpudata;
1.10      maxv      939:        uint64_t val;
                    940:        size_t i;
1.1       maxv      941:
                    942:        switch (exit->u.msr.type) {
                    943:        case NVMM_EXIT_MSR_RDMSR:
                    944:                if (exit->u.msr.msr == MSR_CR_PAT) {
1.10      maxv      945:                        val = cpudata->vmcb->state.g_pat;
                    946:                        cpudata->vmcb->state.rax = (val & 0xFFFFFFFF);
1.13      maxv      947:                        cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1.10      maxv      948:                        goto handled;
                    949:                }
                    950:                if (exit->u.msr.msr == MSR_NB_CFG) {
                    951:                        val = NB_CFG_INITAPICCPUIDLO;
                    952:                        cpudata->vmcb->state.rax = (val & 0xFFFFFFFF);
1.13      maxv      953:                        cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1.10      maxv      954:                        goto handled;
                    955:                }
                    956:                for (i = 0; i < __arraycount(msr_ignore_list); i++) {
                    957:                        if (msr_ignore_list[i] != exit->u.msr.msr)
                    958:                                continue;
                    959:                        val = 0;
                    960:                        cpudata->vmcb->state.rax = (val & 0xFFFFFFFF);
1.13      maxv      961:                        cpudata->gprs[NVMM_X64_GPR_RDX] = (val >> 32);
1.1       maxv      962:                        goto handled;
                    963:                }
                    964:                break;
                    965:        case NVMM_EXIT_MSR_WRMSR:
                    966:                if (exit->u.msr.msr == MSR_EFER) {
                    967:                        if (__predict_false(exit->u.msr.val & ~EFER_VALID)) {
                    968:                                svm_inject_gp(mach, vcpu);
                    969:                                goto handled;
                    970:                        }
                    971:                        if ((cpudata->vmcb->state.efer ^ exit->u.msr.val) &
                    972:                             EFER_TLB_FLUSH) {
                    973:                                cpudata->tlb_want_flush = true;
                    974:                        }
                    975:                        cpudata->vmcb->state.efer = exit->u.msr.val | EFER_SVME;
                    976:                        goto handled;
                    977:                }
                    978:                if (exit->u.msr.msr == MSR_CR_PAT) {
                    979:                        cpudata->vmcb->state.g_pat = exit->u.msr.val;
                    980:                        goto handled;
                    981:                }
1.10      maxv      982:                for (i = 0; i < __arraycount(msr_ignore_list); i++) {
                    983:                        if (msr_ignore_list[i] != exit->u.msr.msr)
                    984:                                continue;
                    985:                        goto handled;
                    986:                }
1.1       maxv      987:                break;
                    988:        }
                    989:
                    990:        return false;
                    991:
                    992: handled:
                    993:        cpudata->vmcb->state.rip = cpudata->vmcb->ctrl.nrip;
                    994:        return true;
                    995: }
                    996:
                    997: static void
                    998: svm_exit_msr(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                    999:     struct nvmm_exit *exit)
                   1000: {
                   1001:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1002:        uint64_t info = cpudata->vmcb->ctrl.exitinfo1;
                   1003:
                   1004:        if (info == 0) {
                   1005:                exit->u.msr.type = NVMM_EXIT_MSR_RDMSR;
                   1006:        } else {
                   1007:                exit->u.msr.type = NVMM_EXIT_MSR_WRMSR;
                   1008:        }
                   1009:
1.13      maxv     1010:        exit->u.msr.msr = cpudata->gprs[NVMM_X64_GPR_RCX];
1.1       maxv     1011:
                   1012:        if (info == 1) {
                   1013:                uint64_t rdx, rax;
1.13      maxv     1014:                rdx = cpudata->gprs[NVMM_X64_GPR_RDX];
1.1       maxv     1015:                rax = cpudata->vmcb->state.rax;
                   1016:                exit->u.msr.val = (rdx << 32) | (rax & 0xFFFFFFFF);
                   1017:        } else {
                   1018:                exit->u.msr.val = 0;
                   1019:        }
                   1020:
                   1021:        if (svm_inkernel_handle_msr(mach, vcpu, exit)) {
                   1022:                exit->reason = NVMM_EXIT_NONE;
                   1023:                return;
                   1024:        }
                   1025:
                   1026:        exit->reason = NVMM_EXIT_MSR;
                   1027:        exit->u.msr.npc = cpudata->vmcb->ctrl.nrip;
                   1028: }
                   1029:
                   1030: static void
                   1031: svm_exit_npf(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                   1032:     struct nvmm_exit *exit)
                   1033: {
                   1034:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1035:        gpaddr_t gpa = cpudata->vmcb->ctrl.exitinfo2;
                   1036:        int error;
                   1037:
                   1038:        error = uvm_fault(&mach->vm->vm_map, gpa, VM_PROT_ALL);
                   1039:
                   1040:        if (error) {
                   1041:                exit->reason = NVMM_EXIT_MEMORY;
                   1042:                if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_W)
                   1043:                        exit->u.mem.perm = NVMM_EXIT_MEMORY_WRITE;
                   1044:                else if (cpudata->vmcb->ctrl.exitinfo1 & PGEX_X)
                   1045:                        exit->u.mem.perm = NVMM_EXIT_MEMORY_EXEC;
                   1046:                else
                   1047:                        exit->u.mem.perm = NVMM_EXIT_MEMORY_READ;
                   1048:                exit->u.mem.gpa = gpa;
                   1049:                exit->u.mem.inst_len = cpudata->vmcb->ctrl.inst_len;
                   1050:                memcpy(exit->u.mem.inst_bytes, cpudata->vmcb->ctrl.inst_bytes,
                   1051:                    sizeof(exit->u.mem.inst_bytes));
                   1052:                exit->u.mem.npc = cpudata->vmcb->ctrl.nrip;
                   1053:        } else {
                   1054:                exit->reason = NVMM_EXIT_NONE;
                   1055:        }
                   1056: }
                   1057:
                   1058: static void
                   1059: svm_exit_xsetbv(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                   1060:     struct nvmm_exit *exit)
                   1061: {
                   1062:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1063:        struct vmcb *vmcb = cpudata->vmcb;
                   1064:        uint64_t val;
                   1065:
                   1066:        exit->reason = NVMM_EXIT_NONE;
                   1067:
1.13      maxv     1068:        val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1.3       maxv     1069:            (vmcb->state.rax & 0xFFFFFFFF);
1.1       maxv     1070:
1.13      maxv     1071:        if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1.1       maxv     1072:                goto error;
                   1073:        } else if (__predict_false(vmcb->state.cpl != 0)) {
                   1074:                goto error;
                   1075:        } else if (__predict_false((val & ~svm_xcr0_mask) != 0)) {
                   1076:                goto error;
                   1077:        } else if (__predict_false((val & XCR0_X87) == 0)) {
                   1078:                goto error;
                   1079:        }
                   1080:
1.13      maxv     1081:        cpudata->gxcr0 = val;
1.1       maxv     1082:
1.7       maxv     1083:        cpudata->vmcb->state.rip = cpudata->vmcb->ctrl.nrip;
1.1       maxv     1084:        return;
                   1085:
                   1086: error:
                   1087:        svm_inject_gp(mach, vcpu);
                   1088: }
                   1089:
                   1090: static void
                   1091: svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
                   1092: {
                   1093:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1094:
                   1095:        if (x86_xsave_features != 0) {
1.13      maxv     1096:                cpudata->hxcr0 = rdxcr(0);
                   1097:                wrxcr(0, cpudata->gxcr0);
1.1       maxv     1098:        }
                   1099:
                   1100:        cpudata->ts_set = (rcr0() & CR0_TS) != 0;
                   1101:
                   1102:        fpu_area_save(&cpudata->hfpu);
                   1103:        fpu_area_restore(&cpudata->gfpu);
                   1104: }
                   1105:
                   1106: static void
                   1107: svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
                   1108: {
                   1109:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1110:
                   1111:        fpu_area_save(&cpudata->gfpu);
                   1112:        fpu_area_restore(&cpudata->hfpu);
                   1113:
                   1114:        if (cpudata->ts_set) {
                   1115:                stts();
                   1116:        }
                   1117:
                   1118:        if (x86_xsave_features != 0) {
1.13      maxv     1119:                cpudata->gxcr0 = rdxcr(0);
                   1120:                wrxcr(0, cpudata->hxcr0);
1.1       maxv     1121:        }
                   1122: }
                   1123:
                   1124: static void
                   1125: svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
                   1126: {
                   1127:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1128:
                   1129:        x86_dbregs_save(curlwp);
                   1130:
1.13      maxv     1131:        ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
                   1132:        ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
                   1133:        ldr2(cpudata->drs[NVMM_X64_DR_DR2]);
                   1134:        ldr3(cpudata->drs[NVMM_X64_DR_DR3]);
1.1       maxv     1135: }
                   1136:
                   1137: static void
                   1138: svm_vcpu_guest_dbregs_leave(struct nvmm_cpu *vcpu)
                   1139: {
                   1140:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1141:
1.13      maxv     1142:        cpudata->drs[NVMM_X64_DR_DR0] = rdr0();
                   1143:        cpudata->drs[NVMM_X64_DR_DR1] = rdr1();
                   1144:        cpudata->drs[NVMM_X64_DR_DR2] = rdr2();
                   1145:        cpudata->drs[NVMM_X64_DR_DR3] = rdr3();
1.1       maxv     1146:
                   1147:        x86_dbregs_restore(curlwp);
                   1148: }
                   1149:
                   1150: static void
                   1151: svm_vcpu_guest_misc_enter(struct nvmm_cpu *vcpu)
                   1152: {
                   1153:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1154:
                   1155:        cpudata->star = rdmsr(MSR_STAR);
                   1156:        cpudata->lstar = rdmsr(MSR_LSTAR);
                   1157:        cpudata->cstar = rdmsr(MSR_CSTAR);
                   1158:        cpudata->sfmask = rdmsr(MSR_SFMASK);
1.14    ! maxv     1159:        cpudata->fsbase = rdmsr(MSR_FSBASE);
        !          1160:        cpudata->kernelgsbase = rdmsr(MSR_KERNELGSBASE);
1.1       maxv     1161: }
                   1162:
                   1163: static void
                   1164: svm_vcpu_guest_misc_leave(struct nvmm_cpu *vcpu)
                   1165: {
                   1166:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1167:
                   1168:        wrmsr(MSR_STAR, cpudata->star);
                   1169:        wrmsr(MSR_LSTAR, cpudata->lstar);
                   1170:        wrmsr(MSR_CSTAR, cpudata->cstar);
                   1171:        wrmsr(MSR_SFMASK, cpudata->sfmask);
1.14    ! maxv     1172:        wrmsr(MSR_FSBASE, cpudata->fsbase);
        !          1173:        wrmsr(MSR_KERNELGSBASE, cpudata->kernelgsbase);
1.1       maxv     1174: }
                   1175:
                   1176: static int
                   1177: svm_vcpu_run(struct nvmm_machine *mach, struct nvmm_cpu *vcpu,
                   1178:     struct nvmm_exit *exit)
                   1179: {
                   1180:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1181:        struct vmcb *vmcb = cpudata->vmcb;
                   1182:        bool tlb_need_flush = false;
                   1183:        int hcpu, s;
                   1184:
                   1185:        kpreempt_disable();
                   1186:        hcpu = cpu_number();
                   1187:
                   1188:        if (vcpu->hcpu_last != hcpu || cpudata->shared_asid) {
                   1189:                tlb_need_flush = true;
                   1190:        }
                   1191:
                   1192:        if (cpudata->tlb_want_flush || tlb_need_flush) {
                   1193:                vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
                   1194:        } else {
                   1195:                vmcb->ctrl.tlb_ctrl = 0;
                   1196:        }
                   1197:
                   1198:        if (vcpu->hcpu_last != hcpu) {
                   1199:                vmcb->ctrl.tsc_offset = cpudata->tsc_offset +
                   1200:                    curcpu()->ci_data.cpu_cc_skew;
1.12      maxv     1201:                svm_vmcb_cache_flush_all(vmcb);
1.1       maxv     1202:        }
                   1203:
                   1204:        svm_vcpu_guest_dbregs_enter(vcpu);
                   1205:        svm_vcpu_guest_misc_enter(vcpu);
                   1206:
                   1207:        while (1) {
                   1208:                s = splhigh();
                   1209:                svm_vcpu_guest_fpu_enter(vcpu);
1.13      maxv     1210:                svm_vmrun(cpudata->vmcb_pa, cpudata->gprs);
1.1       maxv     1211:                svm_vcpu_guest_fpu_leave(vcpu);
                   1212:                splx(s);
                   1213:
                   1214:                svm_vmcb_cache_default(vmcb);
                   1215:
                   1216:                if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) {
                   1217:                        if (cpudata->tlb_want_flush) {
                   1218:                                cpudata->tlb_want_flush = false;
                   1219:                        }
                   1220:                        vcpu->hcpu_last = hcpu;
                   1221:                }
                   1222:
                   1223:                switch (vmcb->ctrl.exitcode) {
                   1224:                case VMCB_EXITCODE_INTR:
                   1225:                case VMCB_EXITCODE_NMI:
                   1226:                        exit->reason = NVMM_EXIT_NONE;
                   1227:                        break;
                   1228:                case VMCB_EXITCODE_VINTR:
1.10      maxv     1229:                        svm_event_waitexit_disable(vcpu, false);
1.1       maxv     1230:                        exit->reason = NVMM_EXIT_INT_READY;
                   1231:                        break;
                   1232:                case VMCB_EXITCODE_IRET:
1.10      maxv     1233:                        svm_event_waitexit_disable(vcpu, true);
1.1       maxv     1234:                        exit->reason = NVMM_EXIT_NMI_READY;
                   1235:                        break;
                   1236:                case VMCB_EXITCODE_CPUID:
                   1237:                        svm_exit_cpuid(mach, vcpu, exit);
                   1238:                        break;
                   1239:                case VMCB_EXITCODE_HLT:
1.10      maxv     1240:                        svm_exit_hlt(mach, vcpu, exit);
1.1       maxv     1241:                        break;
                   1242:                case VMCB_EXITCODE_IOIO:
                   1243:                        svm_exit_io(mach, vcpu, exit);
                   1244:                        break;
                   1245:                case VMCB_EXITCODE_MSR:
                   1246:                        svm_exit_msr(mach, vcpu, exit);
                   1247:                        break;
                   1248:                case VMCB_EXITCODE_SHUTDOWN:
                   1249:                        exit->reason = NVMM_EXIT_SHUTDOWN;
                   1250:                        break;
                   1251:                case VMCB_EXITCODE_RDPMC:
                   1252:                case VMCB_EXITCODE_RSM:
                   1253:                case VMCB_EXITCODE_INVLPGA:
                   1254:                case VMCB_EXITCODE_VMRUN:
                   1255:                case VMCB_EXITCODE_VMMCALL:
                   1256:                case VMCB_EXITCODE_VMLOAD:
                   1257:                case VMCB_EXITCODE_VMSAVE:
                   1258:                case VMCB_EXITCODE_STGI:
                   1259:                case VMCB_EXITCODE_CLGI:
                   1260:                case VMCB_EXITCODE_SKINIT:
                   1261:                case VMCB_EXITCODE_RDTSCP:
                   1262:                        svm_inject_ud(mach, vcpu);
                   1263:                        exit->reason = NVMM_EXIT_NONE;
                   1264:                        break;
                   1265:                case VMCB_EXITCODE_MONITOR:
                   1266:                        exit->reason = NVMM_EXIT_MONITOR;
                   1267:                        break;
                   1268:                case VMCB_EXITCODE_MWAIT:
                   1269:                        exit->reason = NVMM_EXIT_MWAIT;
                   1270:                        break;
                   1271:                case VMCB_EXITCODE_MWAIT_CONDITIONAL:
                   1272:                        exit->reason = NVMM_EXIT_MWAIT_COND;
                   1273:                        break;
                   1274:                case VMCB_EXITCODE_XSETBV:
                   1275:                        svm_exit_xsetbv(mach, vcpu, exit);
                   1276:                        break;
                   1277:                case VMCB_EXITCODE_NPF:
                   1278:                        svm_exit_npf(mach, vcpu, exit);
                   1279:                        break;
                   1280:                case VMCB_EXITCODE_FERR_FREEZE: /* ? */
                   1281:                default:
                   1282:                        exit->reason = NVMM_EXIT_INVALID;
                   1283:                        break;
                   1284:                }
                   1285:
                   1286:                /* If no reason to return to userland, keep rolling. */
                   1287:                if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) {
                   1288:                        break;
                   1289:                }
1.10      maxv     1290:                if (curcpu()->ci_data.cpu_softints != 0) {
                   1291:                        break;
                   1292:                }
                   1293:                if (curlwp->l_flag & LW_USERRET) {
                   1294:                        break;
                   1295:                }
1.1       maxv     1296:                if (exit->reason != NVMM_EXIT_NONE) {
                   1297:                        break;
                   1298:                }
                   1299:        }
                   1300:
                   1301:        svm_vcpu_guest_misc_leave(vcpu);
                   1302:        svm_vcpu_guest_dbregs_leave(vcpu);
                   1303:
                   1304:        kpreempt_enable();
                   1305:
                   1306:        exit->exitstate[NVMM_X64_EXITSTATE_CR8] = __SHIFTOUT(vmcb->ctrl.v,
                   1307:            VMCB_CTRL_V_TPR);
1.6       maxv     1308:        exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] = vmcb->state.rflags;
1.1       maxv     1309:
1.10      maxv     1310:        exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
                   1311:            ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0);
                   1312:        exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
                   1313:            cpudata->int_window_exit;
                   1314:        exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] =
                   1315:            cpudata->nmi_window_exit;
                   1316:
1.1       maxv     1317:        return 0;
                   1318: }
                   1319:
                   1320: /* -------------------------------------------------------------------------- */
                   1321:
                   1322: static int
                   1323: svm_memalloc(paddr_t *pa, vaddr_t *va, size_t npages)
                   1324: {
                   1325:        struct pglist pglist;
                   1326:        paddr_t _pa;
                   1327:        vaddr_t _va;
                   1328:        size_t i;
                   1329:        int ret;
                   1330:
                   1331:        ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0,
                   1332:            &pglist, 1, 0);
                   1333:        if (ret != 0)
                   1334:                return ENOMEM;
                   1335:        _pa = TAILQ_FIRST(&pglist)->phys_addr;
                   1336:        _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0,
                   1337:            UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
                   1338:        if (_va == 0)
                   1339:                goto error;
                   1340:
                   1341:        for (i = 0; i < npages; i++) {
                   1342:                pmap_kenter_pa(_va + i * PAGE_SIZE, _pa + i * PAGE_SIZE,
                   1343:                    VM_PROT_READ | VM_PROT_WRITE, PMAP_WRITE_BACK);
                   1344:        }
1.5       maxv     1345:        pmap_update(pmap_kernel());
1.1       maxv     1346:
                   1347:        memset((void *)_va, 0, npages * PAGE_SIZE);
                   1348:
                   1349:        *pa = _pa;
                   1350:        *va = _va;
                   1351:        return 0;
                   1352:
                   1353: error:
                   1354:        for (i = 0; i < npages; i++) {
                   1355:                uvm_pagefree(PHYS_TO_VM_PAGE(_pa + i * PAGE_SIZE));
                   1356:        }
                   1357:        return ENOMEM;
                   1358: }
                   1359:
                   1360: static void
                   1361: svm_memfree(paddr_t pa, vaddr_t va, size_t npages)
                   1362: {
                   1363:        size_t i;
                   1364:
                   1365:        pmap_kremove(va, npages * PAGE_SIZE);
                   1366:        pmap_update(pmap_kernel());
                   1367:        uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY);
                   1368:        for (i = 0; i < npages; i++) {
                   1369:                uvm_pagefree(PHYS_TO_VM_PAGE(pa + i * PAGE_SIZE));
                   1370:        }
                   1371: }
                   1372:
                   1373: /* -------------------------------------------------------------------------- */
                   1374:
                   1375: #define SVM_MSRBM_READ __BIT(0)
                   1376: #define SVM_MSRBM_WRITE        __BIT(1)
                   1377:
                   1378: static void
                   1379: svm_vcpu_msr_allow(uint8_t *bitmap, uint64_t msr, bool read, bool write)
                   1380: {
                   1381:        uint64_t byte;
                   1382:        uint8_t bitoff;
                   1383:
                   1384:        if (msr < 0x00002000) {
                   1385:                /* Range 1 */
                   1386:                byte = ((msr - 0x00000000) >> 2UL) + 0x0000;
                   1387:        } else if (msr >= 0xC0000000 && msr < 0xC0002000) {
                   1388:                /* Range 2 */
                   1389:                byte = ((msr - 0xC0000000) >> 2UL) + 0x0800;
                   1390:        } else if (msr >= 0xC0010000 && msr < 0xC0012000) {
                   1391:                /* Range 3 */
                   1392:                byte = ((msr - 0xC0010000) >> 2UL) + 0x1000;
                   1393:        } else {
                   1394:                panic("%s: wrong range", __func__);
                   1395:        }
                   1396:
                   1397:        bitoff = (msr & 0x3) << 1;
                   1398:
                   1399:        if (read) {
                   1400:                bitmap[byte] &= ~(SVM_MSRBM_READ << bitoff);
                   1401:        }
                   1402:        if (write) {
                   1403:                bitmap[byte] &= ~(SVM_MSRBM_WRITE << bitoff);
                   1404:        }
                   1405: }
                   1406:
                   1407: static void
                   1408: svm_asid_alloc(struct nvmm_cpu *vcpu)
                   1409: {
                   1410:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1411:        struct vmcb *vmcb = cpudata->vmcb;
                   1412:        size_t i, oct, bit;
                   1413:
                   1414:        mutex_enter(&svm_asidlock);
                   1415:
                   1416:        for (i = 0; i < svm_maxasid; i++) {
                   1417:                oct = i / 8;
                   1418:                bit = i % 8;
                   1419:
                   1420:                if (svm_asidmap[oct] & __BIT(bit)) {
                   1421:                        continue;
                   1422:                }
                   1423:
                   1424:                svm_asidmap[oct] |= __BIT(bit);
                   1425:                vmcb->ctrl.guest_asid = i;
                   1426:                mutex_exit(&svm_asidlock);
                   1427:                return;
                   1428:        }
                   1429:
                   1430:        /*
                   1431:         * No free ASID. Use the last one, which is shared and requires
                   1432:         * special TLB handling.
                   1433:         */
                   1434:        cpudata->shared_asid = true;
                   1435:        vmcb->ctrl.guest_asid = svm_maxasid - 1;
                   1436:        mutex_exit(&svm_asidlock);
                   1437: }
                   1438:
                   1439: static void
                   1440: svm_asid_free(struct nvmm_cpu *vcpu)
                   1441: {
                   1442:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1443:        struct vmcb *vmcb = cpudata->vmcb;
                   1444:        size_t oct, bit;
                   1445:
                   1446:        if (cpudata->shared_asid) {
                   1447:                return;
                   1448:        }
                   1449:
                   1450:        oct = vmcb->ctrl.guest_asid / 8;
                   1451:        bit = vmcb->ctrl.guest_asid % 8;
                   1452:
                   1453:        mutex_enter(&svm_asidlock);
                   1454:        svm_asidmap[oct] &= ~__BIT(bit);
                   1455:        mutex_exit(&svm_asidlock);
                   1456: }
                   1457:
                   1458: static void
                   1459: svm_vcpu_init(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
                   1460: {
                   1461:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1462:        struct vmcb *vmcb = cpudata->vmcb;
                   1463:
                   1464:        /* Allow reads/writes of Control Registers. */
                   1465:        vmcb->ctrl.intercept_cr = 0;
                   1466:
                   1467:        /* Allow reads/writes of Debug Registers. */
                   1468:        vmcb->ctrl.intercept_dr = 0;
                   1469:
                   1470:        /* Allow exceptions 0 to 31. */
                   1471:        vmcb->ctrl.intercept_vec = 0;
                   1472:
                   1473:        /*
                   1474:         * Allow:
                   1475:         *  - SMI [smm interrupts]
                   1476:         *  - VINTR [virtual interrupts]
                   1477:         *  - CR0_SPEC [CR0 writes changing other fields than CR0.TS or CR0.MP]
                   1478:         *  - RIDTR [reads of IDTR]
                   1479:         *  - RGDTR [reads of GDTR]
                   1480:         *  - RLDTR [reads of LDTR]
                   1481:         *  - RTR [reads of TR]
                   1482:         *  - WIDTR [writes of IDTR]
                   1483:         *  - WGDTR [writes of GDTR]
                   1484:         *  - WLDTR [writes of LDTR]
                   1485:         *  - WTR [writes of TR]
                   1486:         *  - RDTSC [rdtsc instruction]
                   1487:         *  - PUSHF [pushf instruction]
                   1488:         *  - POPF [popf instruction]
                   1489:         *  - IRET [iret instruction]
                   1490:         *  - INTN [int $n instructions]
                   1491:         *  - INVD [invd instruction]
                   1492:         *  - PAUSE [pause instruction]
                   1493:         *  - INVLPG [invplg instruction]
                   1494:         *  - TASKSW [task switches]
                   1495:         *
                   1496:         * Intercept the rest below.
                   1497:         */
                   1498:        vmcb->ctrl.intercept_misc1 =
                   1499:            VMCB_CTRL_INTERCEPT_INTR |
                   1500:            VMCB_CTRL_INTERCEPT_NMI |
                   1501:            VMCB_CTRL_INTERCEPT_INIT |
                   1502:            VMCB_CTRL_INTERCEPT_RDPMC |
                   1503:            VMCB_CTRL_INTERCEPT_CPUID |
                   1504:            VMCB_CTRL_INTERCEPT_RSM |
                   1505:            VMCB_CTRL_INTERCEPT_HLT |
                   1506:            VMCB_CTRL_INTERCEPT_INVLPGA |
                   1507:            VMCB_CTRL_INTERCEPT_IOIO_PROT |
                   1508:            VMCB_CTRL_INTERCEPT_MSR_PROT |
                   1509:            VMCB_CTRL_INTERCEPT_FERR_FREEZE |
                   1510:            VMCB_CTRL_INTERCEPT_SHUTDOWN;
                   1511:
                   1512:        /*
                   1513:         * Allow:
                   1514:         *  - ICEBP [icebp instruction]
                   1515:         *  - WBINVD [wbinvd instruction]
                   1516:         *  - WCR_SPEC(0..15) [writes of CR0-15, received after instruction]
                   1517:         *
                   1518:         * Intercept the rest below.
                   1519:         */
                   1520:        vmcb->ctrl.intercept_misc2 =
                   1521:            VMCB_CTRL_INTERCEPT_VMRUN |
                   1522:            VMCB_CTRL_INTERCEPT_VMMCALL |
                   1523:            VMCB_CTRL_INTERCEPT_VMLOAD |
                   1524:            VMCB_CTRL_INTERCEPT_VMSAVE |
                   1525:            VMCB_CTRL_INTERCEPT_STGI |
                   1526:            VMCB_CTRL_INTERCEPT_CLGI |
                   1527:            VMCB_CTRL_INTERCEPT_SKINIT |
                   1528:            VMCB_CTRL_INTERCEPT_RDTSCP |
                   1529:            VMCB_CTRL_INTERCEPT_MONITOR |
                   1530:            VMCB_CTRL_INTERCEPT_MWAIT |
                   1531:            VMCB_CTRL_INTERCEPT_XSETBV;
                   1532:
                   1533:        /* Intercept all I/O accesses. */
                   1534:        memset(cpudata->iobm, 0xFF, IOBM_SIZE);
                   1535:        vmcb->ctrl.iopm_base_pa = cpudata->iobm_pa;
                   1536:
                   1537:        /*
                   1538:         * Allow:
                   1539:         *  - EFER [read]
                   1540:         *  - STAR [read, write]
                   1541:         *  - LSTAR [read, write]
                   1542:         *  - CSTAR [read, write]
                   1543:         *  - SFMASK [read, write]
                   1544:         *  - KERNELGSBASE [read, write]
                   1545:         *  - SYSENTER_CS [read, write]
                   1546:         *  - SYSENTER_ESP [read, write]
                   1547:         *  - SYSENTER_EIP [read, write]
                   1548:         *  - FSBASE [read, write]
                   1549:         *  - GSBASE [read, write]
1.10      maxv     1550:         *  - TSC [read]
1.1       maxv     1551:         *
                   1552:         * Intercept the rest.
                   1553:         */
                   1554:        memset(cpudata->msrbm, 0xFF, MSRBM_SIZE);
                   1555:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_EFER, true, false);
                   1556:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_STAR, true, true);
                   1557:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_LSTAR, true, true);
                   1558:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_CSTAR, true, true);
                   1559:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_SFMASK, true, true);
                   1560:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_KERNELGSBASE, true, true);
                   1561:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_CS, true, true);
                   1562:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_ESP, true, true);
                   1563:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_SYSENTER_EIP, true, true);
                   1564:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_FSBASE, true, true);
                   1565:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_GSBASE, true, true);
1.10      maxv     1566:        svm_vcpu_msr_allow(cpudata->msrbm, MSR_TSC, true, false);
1.1       maxv     1567:        vmcb->ctrl.msrpm_base_pa = cpudata->msrbm_pa;
                   1568:
                   1569:        /* Generate ASID. */
                   1570:        svm_asid_alloc(vcpu);
                   1571:
                   1572:        /* Virtual TPR. */
                   1573:        vmcb->ctrl.v = VMCB_CTRL_V_INTR_MASKING;
                   1574:
                   1575:        /* Enable Nested Paging. */
                   1576:        vmcb->ctrl.enable1 = VMCB_CTRL_ENABLE_NP;
                   1577:        vmcb->ctrl.n_cr3 = mach->vm->vm_map.pmap->pm_pdirpa[0];
                   1578:
                   1579:        /* Must always be set. */
                   1580:        vmcb->state.efer = EFER_SVME;
                   1581:
                   1582:        /* Init XSAVE header. */
                   1583:        cpudata->gfpu.xsh_xstate_bv = svm_xcr0_mask;
                   1584:        cpudata->gfpu.xsh_xcomp_bv = 0;
                   1585:
                   1586:        /* Bluntly hide the host TSC. */
                   1587:        cpudata->tsc_offset = rdtsc();
                   1588: }
                   1589:
                   1590: static int
                   1591: svm_vcpu_create(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
                   1592: {
                   1593:        struct svm_cpudata *cpudata;
                   1594:        int error;
                   1595:
                   1596:        /* Allocate the SVM cpudata. */
                   1597:        cpudata = (struct svm_cpudata *)uvm_km_alloc(kernel_map,
                   1598:            roundup(sizeof(*cpudata), PAGE_SIZE), 0,
                   1599:            UVM_KMF_WIRED|UVM_KMF_ZERO);
                   1600:        vcpu->cpudata = cpudata;
                   1601:
                   1602:        /* VMCB */
                   1603:        error = svm_memalloc(&cpudata->vmcb_pa, (vaddr_t *)&cpudata->vmcb,
                   1604:            VMCB_NPAGES);
                   1605:        if (error)
                   1606:                goto error;
                   1607:
                   1608:        /* I/O Bitmap */
                   1609:        error = svm_memalloc(&cpudata->iobm_pa, (vaddr_t *)&cpudata->iobm,
                   1610:            IOBM_NPAGES);
                   1611:        if (error)
                   1612:                goto error;
                   1613:
                   1614:        /* MSR Bitmap */
                   1615:        error = svm_memalloc(&cpudata->msrbm_pa, (vaddr_t *)&cpudata->msrbm,
                   1616:            MSRBM_NPAGES);
                   1617:        if (error)
                   1618:                goto error;
                   1619:
                   1620:        /* Init the VCPU info. */
                   1621:        svm_vcpu_init(mach, vcpu);
                   1622:
                   1623:        return 0;
                   1624:
                   1625: error:
                   1626:        if (cpudata->vmcb_pa) {
                   1627:                svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb,
                   1628:                    VMCB_NPAGES);
                   1629:        }
                   1630:        if (cpudata->iobm_pa) {
                   1631:                svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm,
                   1632:                    IOBM_NPAGES);
                   1633:        }
                   1634:        if (cpudata->msrbm_pa) {
                   1635:                svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm,
                   1636:                    MSRBM_NPAGES);
                   1637:        }
                   1638:        uvm_km_free(kernel_map, (vaddr_t)cpudata,
                   1639:            roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
                   1640:        return error;
                   1641: }
                   1642:
                   1643: static void
                   1644: svm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_cpu *vcpu)
                   1645: {
                   1646:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1647:
                   1648:        svm_asid_free(vcpu);
                   1649:
                   1650:        svm_memfree(cpudata->vmcb_pa, (vaddr_t)cpudata->vmcb, VMCB_NPAGES);
                   1651:        svm_memfree(cpudata->iobm_pa, (vaddr_t)cpudata->iobm, IOBM_NPAGES);
                   1652:        svm_memfree(cpudata->msrbm_pa, (vaddr_t)cpudata->msrbm, MSRBM_NPAGES);
                   1653:
                   1654:        uvm_km_free(kernel_map, (vaddr_t)cpudata,
                   1655:            roundup(sizeof(*cpudata), PAGE_SIZE), UVM_KMF_WIRED);
                   1656: }
                   1657:
                   1658: #define SVM_SEG_ATTRIB_TYPE            __BITS(4,0)
                   1659: #define SVM_SEG_ATTRIB_DPL             __BITS(6,5)
                   1660: #define SVM_SEG_ATTRIB_P               __BIT(7)
                   1661: #define SVM_SEG_ATTRIB_AVL             __BIT(8)
                   1662: #define SVM_SEG_ATTRIB_LONG            __BIT(9)
                   1663: #define SVM_SEG_ATTRIB_DEF32           __BIT(10)
                   1664: #define SVM_SEG_ATTRIB_GRAN            __BIT(11)
                   1665:
                   1666: static void
                   1667: svm_vcpu_setstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg)
                   1668: {
                   1669:        vseg->selector = seg->selector;
                   1670:        vseg->attrib =
                   1671:            __SHIFTIN(seg->attrib.type, SVM_SEG_ATTRIB_TYPE) |
                   1672:            __SHIFTIN(seg->attrib.dpl, SVM_SEG_ATTRIB_DPL) |
                   1673:            __SHIFTIN(seg->attrib.p, SVM_SEG_ATTRIB_P) |
                   1674:            __SHIFTIN(seg->attrib.avl, SVM_SEG_ATTRIB_AVL) |
                   1675:            __SHIFTIN(seg->attrib.lng, SVM_SEG_ATTRIB_LONG) |
                   1676:            __SHIFTIN(seg->attrib.def32, SVM_SEG_ATTRIB_DEF32) |
                   1677:            __SHIFTIN(seg->attrib.gran, SVM_SEG_ATTRIB_GRAN);
                   1678:        vseg->limit = seg->limit;
                   1679:        vseg->base = seg->base;
                   1680: }
                   1681:
                   1682: static void
                   1683: svm_vcpu_getstate_seg(struct nvmm_x64_state_seg *seg, struct vmcb_segment *vseg)
                   1684: {
                   1685:        seg->selector = vseg->selector;
                   1686:        seg->attrib.type = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_TYPE);
                   1687:        seg->attrib.dpl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DPL);
                   1688:        seg->attrib.p = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_P);
                   1689:        seg->attrib.avl = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_AVL);
                   1690:        seg->attrib.lng = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_LONG);
                   1691:        seg->attrib.def32 = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_DEF32);
                   1692:        seg->attrib.gran = __SHIFTOUT(vseg->attrib, SVM_SEG_ATTRIB_GRAN);
                   1693:        seg->limit = vseg->limit;
                   1694:        seg->base = vseg->base;
                   1695: }
                   1696:
1.13      maxv     1697: static inline bool
                   1698: svm_state_tlb_flush(struct vmcb *vmcb, struct nvmm_x64_state *state,
                   1699:     uint64_t flags)
1.1       maxv     1700: {
                   1701:        if (flags & NVMM_X64_STATE_CRS) {
1.13      maxv     1702:                if ((vmcb->state.cr0 ^
                   1703:                     state->crs[NVMM_X64_CR_CR0]) & CR0_TLB_FLUSH) {
1.1       maxv     1704:                        return true;
                   1705:                }
1.13      maxv     1706:                if (vmcb->state.cr3 != state->crs[NVMM_X64_CR_CR3]) {
1.1       maxv     1707:                        return true;
                   1708:                }
1.13      maxv     1709:                if ((vmcb->state.cr4 ^
                   1710:                     state->crs[NVMM_X64_CR_CR4]) & CR4_TLB_FLUSH) {
1.1       maxv     1711:                        return true;
                   1712:                }
                   1713:        }
                   1714:
                   1715:        if (flags & NVMM_X64_STATE_MSRS) {
1.13      maxv     1716:                if ((vmcb->state.efer ^
                   1717:                     state->msrs[NVMM_X64_MSR_EFER]) & EFER_TLB_FLUSH) {
1.1       maxv     1718:                        return true;
                   1719:                }
                   1720:        }
                   1721:
                   1722:        return false;
                   1723: }
                   1724:
                   1725: static void
                   1726: svm_vcpu_setstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags)
                   1727: {
1.13      maxv     1728:        struct nvmm_x64_state *state = (struct nvmm_x64_state *)data;
1.1       maxv     1729:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1730:        struct vmcb *vmcb = cpudata->vmcb;
                   1731:        struct fxsave *fpustate;
                   1732:
1.13      maxv     1733:        if (svm_state_tlb_flush(vmcb, state, flags)) {
1.1       maxv     1734:                cpudata->tlb_want_flush = true;
                   1735:        }
                   1736:
                   1737:        if (flags & NVMM_X64_STATE_SEGS) {
1.13      maxv     1738:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_CS],
1.1       maxv     1739:                    &vmcb->state.cs);
1.13      maxv     1740:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_DS],
1.1       maxv     1741:                    &vmcb->state.ds);
1.13      maxv     1742:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_ES],
1.1       maxv     1743:                    &vmcb->state.es);
1.13      maxv     1744:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_FS],
1.1       maxv     1745:                    &vmcb->state.fs);
1.13      maxv     1746:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GS],
1.1       maxv     1747:                    &vmcb->state.gs);
1.13      maxv     1748:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_SS],
1.1       maxv     1749:                    &vmcb->state.ss);
1.13      maxv     1750:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_GDT],
1.1       maxv     1751:                    &vmcb->state.gdt);
1.13      maxv     1752:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_IDT],
1.1       maxv     1753:                    &vmcb->state.idt);
1.13      maxv     1754:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_LDT],
1.1       maxv     1755:                    &vmcb->state.ldt);
1.13      maxv     1756:                svm_vcpu_setstate_seg(&state->segs[NVMM_X64_SEG_TR],
1.1       maxv     1757:                    &vmcb->state.tr);
                   1758:        }
                   1759:
1.13      maxv     1760:        CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
1.1       maxv     1761:        if (flags & NVMM_X64_STATE_GPRS) {
1.13      maxv     1762:                memcpy(cpudata->gprs, state->gprs, sizeof(state->gprs));
1.1       maxv     1763:
1.13      maxv     1764:                vmcb->state.rip = state->gprs[NVMM_X64_GPR_RIP];
                   1765:                vmcb->state.rsp = state->gprs[NVMM_X64_GPR_RSP];
                   1766:                vmcb->state.rax = state->gprs[NVMM_X64_GPR_RAX];
                   1767:                vmcb->state.rflags = state->gprs[NVMM_X64_GPR_RFLAGS];
1.1       maxv     1768:        }
                   1769:
                   1770:        if (flags & NVMM_X64_STATE_CRS) {
1.13      maxv     1771:                vmcb->state.cr0 = state->crs[NVMM_X64_CR_CR0];
                   1772:                vmcb->state.cr2 = state->crs[NVMM_X64_CR_CR2];
                   1773:                vmcb->state.cr3 = state->crs[NVMM_X64_CR_CR3];
                   1774:                vmcb->state.cr4 = state->crs[NVMM_X64_CR_CR4];
1.1       maxv     1775:
                   1776:                vmcb->ctrl.v &= ~VMCB_CTRL_V_TPR;
1.13      maxv     1777:                vmcb->ctrl.v |= __SHIFTIN(state->crs[NVMM_X64_CR_CR8],
1.1       maxv     1778:                    VMCB_CTRL_V_TPR);
                   1779:
                   1780:                /* Clear unsupported XCR0 bits, set mandatory X87 bit. */
                   1781:                if (svm_xcr0_mask != 0) {
1.13      maxv     1782:                        cpudata->gxcr0 = state->crs[NVMM_X64_CR_XCR0];
                   1783:                        cpudata->gxcr0 &= svm_xcr0_mask;
                   1784:                        cpudata->gxcr0 |= XCR0_X87;
1.1       maxv     1785:                } else {
1.13      maxv     1786:                        cpudata->gxcr0 = 0;
1.1       maxv     1787:                }
                   1788:        }
                   1789:
1.13      maxv     1790:        CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
1.1       maxv     1791:        if (flags & NVMM_X64_STATE_DRS) {
1.13      maxv     1792:                memcpy(cpudata->drs, state->drs, sizeof(state->drs));
1.1       maxv     1793:
1.13      maxv     1794:                vmcb->state.dr6 = state->drs[NVMM_X64_DR_DR6];
                   1795:                vmcb->state.dr7 = state->drs[NVMM_X64_DR_DR7];
1.1       maxv     1796:        }
                   1797:
                   1798:        if (flags & NVMM_X64_STATE_MSRS) {
                   1799:                /* Bit EFER_SVME is mandatory. */
1.13      maxv     1800:                vmcb->state.efer = state->msrs[NVMM_X64_MSR_EFER] | EFER_SVME;
1.1       maxv     1801:
1.13      maxv     1802:                vmcb->state.star = state->msrs[NVMM_X64_MSR_STAR];
                   1803:                vmcb->state.lstar = state->msrs[NVMM_X64_MSR_LSTAR];
                   1804:                vmcb->state.cstar = state->msrs[NVMM_X64_MSR_CSTAR];
                   1805:                vmcb->state.sfmask = state->msrs[NVMM_X64_MSR_SFMASK];
1.1       maxv     1806:                vmcb->state.kernelgsbase =
1.13      maxv     1807:                    state->msrs[NVMM_X64_MSR_KERNELGSBASE];
1.1       maxv     1808:                vmcb->state.sysenter_cs =
1.13      maxv     1809:                    state->msrs[NVMM_X64_MSR_SYSENTER_CS];
1.1       maxv     1810:                vmcb->state.sysenter_esp =
1.13      maxv     1811:                    state->msrs[NVMM_X64_MSR_SYSENTER_ESP];
1.1       maxv     1812:                vmcb->state.sysenter_eip =
1.13      maxv     1813:                    state->msrs[NVMM_X64_MSR_SYSENTER_EIP];
                   1814:                vmcb->state.g_pat = state->msrs[NVMM_X64_MSR_PAT];
1.1       maxv     1815:        }
                   1816:
                   1817:        if (flags & NVMM_X64_STATE_MISC) {
1.13      maxv     1818:                vmcb->state.cpl = state->misc[NVMM_X64_MISC_CPL];
1.10      maxv     1819:
1.13      maxv     1820:                if (state->misc[NVMM_X64_MISC_INT_SHADOW]) {
1.10      maxv     1821:                        vmcb->ctrl.intr |= VMCB_CTRL_INTR_SHADOW;
                   1822:                } else {
                   1823:                        vmcb->ctrl.intr &= ~VMCB_CTRL_INTR_SHADOW;
                   1824:                }
                   1825:
1.13      maxv     1826:                if (state->misc[NVMM_X64_MISC_INT_WINDOW_EXIT]) {
1.10      maxv     1827:                        svm_event_waitexit_enable(vcpu, false);
                   1828:                } else {
                   1829:                        svm_event_waitexit_disable(vcpu, false);
                   1830:                }
                   1831:
1.13      maxv     1832:                if (state->misc[NVMM_X64_MISC_NMI_WINDOW_EXIT]) {
1.10      maxv     1833:                        svm_event_waitexit_enable(vcpu, true);
                   1834:                } else {
                   1835:                        svm_event_waitexit_disable(vcpu, true);
                   1836:                }
1.1       maxv     1837:        }
                   1838:
1.13      maxv     1839:        CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
1.1       maxv     1840:        if (flags & NVMM_X64_STATE_FPU) {
1.13      maxv     1841:                memcpy(cpudata->gfpu.xsh_fxsave, &state->fpu,
                   1842:                    sizeof(state->fpu));
1.1       maxv     1843:
                   1844:                fpustate = (struct fxsave *)cpudata->gfpu.xsh_fxsave;
                   1845:                fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
                   1846:                fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask;
                   1847:        }
1.12      maxv     1848:
                   1849:        svm_vmcb_cache_update(vmcb, flags);
1.1       maxv     1850: }
                   1851:
                   1852: static void
                   1853: svm_vcpu_getstate(struct nvmm_cpu *vcpu, void *data, uint64_t flags)
                   1854: {
1.13      maxv     1855:        struct nvmm_x64_state *state = (struct nvmm_x64_state *)data;
1.1       maxv     1856:        struct svm_cpudata *cpudata = vcpu->cpudata;
                   1857:        struct vmcb *vmcb = cpudata->vmcb;
                   1858:
                   1859:        if (flags & NVMM_X64_STATE_SEGS) {
1.13      maxv     1860:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_CS],
1.1       maxv     1861:                    &vmcb->state.cs);
1.13      maxv     1862:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_DS],
1.1       maxv     1863:                    &vmcb->state.ds);
1.13      maxv     1864:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_ES],
1.1       maxv     1865:                    &vmcb->state.es);
1.13      maxv     1866:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_FS],
1.1       maxv     1867:                    &vmcb->state.fs);
1.13      maxv     1868:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GS],
1.1       maxv     1869:                    &vmcb->state.gs);
1.13      maxv     1870:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_SS],
1.1       maxv     1871:                    &vmcb->state.ss);
1.13      maxv     1872:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_GDT],
1.1       maxv     1873:                    &vmcb->state.gdt);
1.13      maxv     1874:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_IDT],
1.1       maxv     1875:                    &vmcb->state.idt);
1.13      maxv     1876:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_LDT],
1.1       maxv     1877:                    &vmcb->state.ldt);
1.13      maxv     1878:                svm_vcpu_getstate_seg(&state->segs[NVMM_X64_SEG_TR],
1.1       maxv     1879:                    &vmcb->state.tr);
                   1880:        }
                   1881:
1.13      maxv     1882:        CTASSERT(sizeof(cpudata->gprs) == sizeof(state->gprs));
1.1       maxv     1883:        if (flags & NVMM_X64_STATE_GPRS) {
1.13      maxv     1884:                memcpy(state->gprs, cpudata->gprs, sizeof(state->gprs));
1.1       maxv     1885:
1.13      maxv     1886:                state->gprs[NVMM_X64_GPR_RIP] = vmcb->state.rip;
                   1887:                state->gprs[NVMM_X64_GPR_RSP] = vmcb->state.rsp;
                   1888:                state->gprs[NVMM_X64_GPR_RAX] = vmcb->state.rax;
                   1889:                state->gprs[NVMM_X64_GPR_RFLAGS] = vmcb->state.rflags;
1.1       maxv     1890:        }
                   1891:
                   1892:        if (flags & NVMM_X64_STATE_CRS) {
1.13      maxv     1893:                state->crs[NVMM_X64_CR_CR0] = vmcb->state.cr0;
                   1894:                state->crs[NVMM_X64_CR_CR2] = vmcb->state.cr2;
                   1895:                state->crs[NVMM_X64_CR_CR3] = vmcb->state.cr3;
                   1896:                state->crs[NVMM_X64_CR_CR4] = vmcb->state.cr4;
                   1897:                state->crs[NVMM_X64_CR_CR8] = __SHIFTOUT(vmcb->ctrl.v,
1.1       maxv     1898:                    VMCB_CTRL_V_TPR);
1.13      maxv     1899:                state->crs[NVMM_X64_CR_XCR0] = cpudata->gxcr0;
1.1       maxv     1900:        }
                   1901:
1.13      maxv     1902:        CTASSERT(sizeof(cpudata->drs) == sizeof(state->drs));
1.1       maxv     1903:        if (flags & NVMM_X64_STATE_DRS) {
1.13      maxv     1904:                memcpy(state->drs, cpudata->drs, sizeof(state->drs));
1.1       maxv     1905:
1.13      maxv     1906:                state->drs[NVMM_X64_DR_DR6] = vmcb->state.dr6;
                   1907:                state->drs[NVMM_X64_DR_DR7] = vmcb->state.dr7;
1.1       maxv     1908:        }
                   1909:
                   1910:        if (flags & NVMM_X64_STATE_MSRS) {
1.13      maxv     1911:                state->msrs[NVMM_X64_MSR_EFER] = vmcb->state.efer;
                   1912:                state->msrs[NVMM_X64_MSR_STAR] = vmcb->state.star;
                   1913:                state->msrs[NVMM_X64_MSR_LSTAR] = vmcb->state.lstar;
                   1914:                state->msrs[NVMM_X64_MSR_CSTAR] = vmcb->state.cstar;
                   1915:                state->msrs[NVMM_X64_MSR_SFMASK] = vmcb->state.sfmask;
                   1916:                state->msrs[NVMM_X64_MSR_KERNELGSBASE] =
1.1       maxv     1917:                    vmcb->state.kernelgsbase;
1.13      maxv     1918:                state->msrs[NVMM_X64_MSR_SYSENTER_CS] =
1.1       maxv     1919:                    vmcb->state.sysenter_cs;
1.13      maxv     1920:                state->msrs[NVMM_X64_MSR_SYSENTER_ESP] =
1.1       maxv     1921:                    vmcb->state.sysenter_esp;
1.13      maxv     1922:                state->msrs[NVMM_X64_MSR_SYSENTER_EIP] =
1.1       maxv     1923:                    vmcb->state.sysenter_eip;
1.13      maxv     1924:                state->msrs[NVMM_X64_MSR_PAT] = vmcb->state.g_pat;
1.1       maxv     1925:
                   1926:                /* Hide SVME. */
1.13      maxv     1927:                state->msrs[NVMM_X64_MSR_EFER] &= ~EFER_SVME;
1.1       maxv     1928:        }
                   1929:
                   1930:        if (flags & NVMM_X64_STATE_MISC) {
1.13      maxv     1931:                state->misc[NVMM_X64_MISC_CPL] = vmcb->state.cpl;
1.1       maxv     1932:
1.13      maxv     1933:                state->misc[NVMM_X64_MISC_INT_SHADOW] =
1.10      maxv     1934:                    (vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0;
1.13      maxv     1935:                state->misc[NVMM_X64_MISC_INT_WINDOW_EXIT] =
1.10      maxv     1936:                    cpudata->int_window_exit;
1.13      maxv     1937:                state->misc[NVMM_X64_MISC_NMI_WINDOW_EXIT] =
1.10      maxv     1938:                    cpudata->nmi_window_exit;
1.1       maxv     1939:        }
                   1940:
1.13      maxv     1941:        CTASSERT(sizeof(cpudata->gfpu.xsh_fxsave) == sizeof(state->fpu));
1.1       maxv     1942:        if (flags & NVMM_X64_STATE_FPU) {
1.13      maxv     1943:                memcpy(&state->fpu, cpudata->gfpu.xsh_fxsave,
                   1944:                    sizeof(state->fpu));
1.1       maxv     1945:        }
                   1946: }
                   1947:
                   1948: /* -------------------------------------------------------------------------- */
                   1949:
                   1950: static void
                   1951: svm_tlb_flush(struct pmap *pm)
                   1952: {
                   1953:        struct nvmm_machine *mach = pm->pm_data;
                   1954:        struct svm_cpudata *cpudata;
                   1955:        struct nvmm_cpu *vcpu;
                   1956:        int error;
                   1957:        size_t i;
                   1958:
                   1959:        /* Request TLB flushes. */
                   1960:        for (i = 0; i < NVMM_MAX_VCPUS; i++) {
                   1961:                error = nvmm_vcpu_get(mach, i, &vcpu);
                   1962:                if (error)
                   1963:                        continue;
                   1964:                cpudata = vcpu->cpudata;
                   1965:                cpudata->tlb_want_flush = true;
                   1966:                nvmm_vcpu_put(vcpu);
                   1967:        }
                   1968: }
                   1969:
                   1970: static void
                   1971: svm_machine_create(struct nvmm_machine *mach)
                   1972: {
                   1973:        /* Fill in pmap info. */
                   1974:        mach->vm->vm_map.pmap->pm_data = (void *)mach;
                   1975:        mach->vm->vm_map.pmap->pm_tlb_flush = svm_tlb_flush;
                   1976:
                   1977:        mach->machdata = kmem_zalloc(sizeof(struct svm_machdata), KM_SLEEP);
                   1978: }
                   1979:
                   1980: static void
                   1981: svm_machine_destroy(struct nvmm_machine *mach)
                   1982: {
                   1983:        kmem_free(mach->machdata, sizeof(struct svm_machdata));
                   1984: }
                   1985:
                   1986: static int
                   1987: svm_machine_configure(struct nvmm_machine *mach, uint64_t op, void *data)
                   1988: {
                   1989:        struct nvmm_x86_conf_cpuid *cpuid = data;
                   1990:        struct svm_machdata *machdata = (struct svm_machdata *)mach->machdata;
                   1991:        size_t i;
                   1992:
                   1993:        if (__predict_false(op != NVMM_X86_CONF_CPUID)) {
                   1994:                return EINVAL;
                   1995:        }
                   1996:
                   1997:        if (__predict_false((cpuid->set.eax & cpuid->del.eax) ||
                   1998:            (cpuid->set.ebx & cpuid->del.ebx) ||
                   1999:            (cpuid->set.ecx & cpuid->del.ecx) ||
                   2000:            (cpuid->set.edx & cpuid->del.edx))) {
                   2001:                return EINVAL;
                   2002:        }
                   2003:
                   2004:        /* If already here, replace. */
                   2005:        for (i = 0; i < SVM_NCPUIDS; i++) {
                   2006:                if (!machdata->cpuidpresent[i]) {
                   2007:                        continue;
                   2008:                }
                   2009:                if (machdata->cpuid[i].leaf == cpuid->leaf) {
                   2010:                        memcpy(&machdata->cpuid[i], cpuid,
                   2011:                            sizeof(struct nvmm_x86_conf_cpuid));
                   2012:                        return 0;
                   2013:                }
                   2014:        }
                   2015:
                   2016:        /* Not here, insert. */
                   2017:        for (i = 0; i < SVM_NCPUIDS; i++) {
                   2018:                if (!machdata->cpuidpresent[i]) {
                   2019:                        machdata->cpuidpresent[i] = true;
                   2020:                        memcpy(&machdata->cpuid[i], cpuid,
                   2021:                            sizeof(struct nvmm_x86_conf_cpuid));
                   2022:                        return 0;
                   2023:                }
                   2024:        }
                   2025:
                   2026:        return ENOBUFS;
                   2027: }
                   2028:
                   2029: /* -------------------------------------------------------------------------- */
                   2030:
                   2031: static bool
                   2032: svm_ident(void)
                   2033: {
                   2034:        u_int descs[4];
                   2035:        uint64_t msr;
                   2036:
                   2037:        if (cpu_vendor != CPUVENDOR_AMD) {
                   2038:                return false;
                   2039:        }
                   2040:        if (!(cpu_feature[3] & CPUID_SVM)) {
                   2041:                return false;
                   2042:        }
                   2043:
                   2044:        if (curcpu()->ci_max_ext_cpuid < 0x8000000a) {
                   2045:                return false;
                   2046:        }
                   2047:        x86_cpuid(0x8000000a, descs);
                   2048:
                   2049:        /* Want Nested Paging. */
                   2050:        if (!(descs[3] & CPUID_AMD_SVM_NP)) {
                   2051:                return false;
                   2052:        }
                   2053:
                   2054:        /* Want nRIP. */
                   2055:        if (!(descs[3] & CPUID_AMD_SVM_NRIPS)) {
                   2056:                return false;
                   2057:        }
                   2058:
                   2059:        svm_decode_assist = (descs[3] & CPUID_AMD_SVM_DecodeAssist) != 0;
                   2060:
                   2061:        msr = rdmsr(MSR_VMCR);
                   2062:        if ((msr & VMCR_SVMED) && (msr & VMCR_LOCK)) {
                   2063:                return false;
                   2064:        }
                   2065:
                   2066:        return true;
                   2067: }
                   2068:
                   2069: static void
                   2070: svm_init_asid(uint32_t maxasid)
                   2071: {
                   2072:        size_t i, j, allocsz;
                   2073:
                   2074:        mutex_init(&svm_asidlock, MUTEX_DEFAULT, IPL_NONE);
                   2075:
                   2076:        /* Arbitrarily limit. */
                   2077:        maxasid = uimin(maxasid, 8192);
                   2078:
                   2079:        svm_maxasid = maxasid;
                   2080:        allocsz = roundup(maxasid, 8) / 8;
                   2081:        svm_asidmap = kmem_zalloc(allocsz, KM_SLEEP);
                   2082:
                   2083:        /* ASID 0 is reserved for the host. */
                   2084:        svm_asidmap[0] |= __BIT(0);
                   2085:
                   2086:        /* ASID n-1 is special, we share it. */
                   2087:        i = (maxasid - 1) / 8;
                   2088:        j = (maxasid - 1) % 8;
                   2089:        svm_asidmap[i] |= __BIT(j);
                   2090: }
                   2091:
                   2092: static void
                   2093: svm_change_cpu(void *arg1, void *arg2)
                   2094: {
                   2095:        bool enable = (bool)arg1;
                   2096:        uint64_t msr;
                   2097:
                   2098:        msr = rdmsr(MSR_VMCR);
                   2099:        if (msr & VMCR_SVMED) {
                   2100:                wrmsr(MSR_VMCR, msr & ~VMCR_SVMED);
                   2101:        }
                   2102:
                   2103:        if (!enable) {
                   2104:                wrmsr(MSR_VM_HSAVE_PA, 0);
                   2105:        }
                   2106:
                   2107:        msr = rdmsr(MSR_EFER);
                   2108:        if (enable) {
                   2109:                msr |= EFER_SVME;
                   2110:        } else {
                   2111:                msr &= ~EFER_SVME;
                   2112:        }
                   2113:        wrmsr(MSR_EFER, msr);
                   2114:
                   2115:        if (enable) {
                   2116:                wrmsr(MSR_VM_HSAVE_PA, hsave[cpu_index(curcpu())].pa);
                   2117:        }
                   2118: }
                   2119:
                   2120: static void
                   2121: svm_init(void)
                   2122: {
                   2123:        CPU_INFO_ITERATOR cii;
                   2124:        struct cpu_info *ci;
                   2125:        struct vm_page *pg;
                   2126:        u_int descs[4];
                   2127:        uint64_t xc;
                   2128:
                   2129:        x86_cpuid(0x8000000a, descs);
                   2130:
                   2131:        /* The guest TLB flush command. */
                   2132:        if (descs[3] & CPUID_AMD_SVM_FlushByASID) {
                   2133:                svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_GUEST;
                   2134:        } else {
                   2135:                svm_ctrl_tlb_flush = VMCB_CTRL_TLB_CTRL_FLUSH_ALL;
                   2136:        }
                   2137:
                   2138:        /* Init the ASID. */
                   2139:        svm_init_asid(descs[1]);
                   2140:
                   2141:        /* Init the XCR0 mask. */
                   2142:        svm_xcr0_mask = SVM_XCR0_MASK_DEFAULT & x86_xsave_features;
                   2143:
                   2144:        memset(hsave, 0, sizeof(hsave));
                   2145:        for (CPU_INFO_FOREACH(cii, ci)) {
                   2146:                pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
                   2147:                hsave[cpu_index(ci)].pa = VM_PAGE_TO_PHYS(pg);
                   2148:        }
                   2149:
                   2150:        xc = xc_broadcast(0, svm_change_cpu, (void *)true, NULL);
                   2151:        xc_wait(xc);
                   2152: }
                   2153:
                   2154: static void
                   2155: svm_fini_asid(void)
                   2156: {
                   2157:        size_t allocsz;
                   2158:
                   2159:        allocsz = roundup(svm_maxasid, 8) / 8;
                   2160:        kmem_free(svm_asidmap, allocsz);
                   2161:
                   2162:        mutex_destroy(&svm_asidlock);
                   2163: }
                   2164:
                   2165: static void
                   2166: svm_fini(void)
                   2167: {
                   2168:        uint64_t xc;
                   2169:        size_t i;
                   2170:
                   2171:        xc = xc_broadcast(0, svm_change_cpu, (void *)false, NULL);
                   2172:        xc_wait(xc);
                   2173:
                   2174:        for (i = 0; i < MAXCPUS; i++) {
                   2175:                if (hsave[i].pa != 0)
                   2176:                        uvm_pagefree(PHYS_TO_VM_PAGE(hsave[i].pa));
                   2177:        }
                   2178:
                   2179:        svm_fini_asid();
                   2180: }
                   2181:
                   2182: static void
                   2183: svm_capability(struct nvmm_capability *cap)
                   2184: {
                   2185:        cap->u.x86.xcr0_mask = svm_xcr0_mask;
                   2186:        cap->u.x86.mxcsr_mask = x86_fpu_mxcsr_mask;
                   2187:        cap->u.x86.conf_cpuid_maxops = SVM_NCPUIDS;
                   2188: }
                   2189:
                   2190: const struct nvmm_impl nvmm_x86_svm = {
                   2191:        .ident = svm_ident,
                   2192:        .init = svm_init,
                   2193:        .fini = svm_fini,
                   2194:        .capability = svm_capability,
                   2195:        .conf_max = NVMM_X86_NCONF,
                   2196:        .conf_sizes = svm_conf_sizes,
                   2197:        .state_size = sizeof(struct nvmm_x64_state),
                   2198:        .machine_create = svm_machine_create,
                   2199:        .machine_destroy = svm_machine_destroy,
                   2200:        .machine_configure = svm_machine_configure,
                   2201:        .vcpu_create = svm_vcpu_create,
                   2202:        .vcpu_destroy = svm_vcpu_destroy,
                   2203:        .vcpu_setstate = svm_vcpu_setstate,
                   2204:        .vcpu_getstate = svm_vcpu_getstate,
                   2205:        .vcpu_inject = svm_vcpu_inject,
                   2206:        .vcpu_run = svm_vcpu_run
                   2207: };

CVSweb <webmaster@jp.NetBSD.org>